diff --git a/README.md b/README.md
index d2f34e90d..b559970c2 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@

----------------
+-----------------
# SKALED – SKALE C++ Client
diff --git a/libbatched-io/batched_io.cpp b/libbatched-io/batched_io.cpp
index 9051dda5c..e7edc1f72 100644
--- a/libbatched-io/batched_io.cpp
+++ b/libbatched-io/batched_io.cpp
@@ -13,9 +13,9 @@ std::string test_crash_at;
namespace batched_io {
void test_crash_before_commit( const std::string& id ) {
if ( !test_crash_at.empty() ) {
- cnote << "test_crash_before_commit: " << id << std::endl;
+ cnote << "test_crash_before_commit: " << id;
if ( id == test_crash_at ) {
- cerror << "test_crash_before_commit: crashing at " << test_crash_at << std::endl;
+ cerror << "test_crash_before_commit: crashing at " << test_crash_at;
_exit( 33 );
}
} // if 1
diff --git a/libdevcore/BMPBN_tests.h b/libdevcore/BMPBN_tests.h
index fc513ab52..b7d1a66ea 100644
--- a/libdevcore/BMPBN_tests.h
+++ b/libdevcore/BMPBN_tests.h
@@ -50,18 +50,18 @@ inline bool test1( const std::string& s, bool bIsVerbose ) {
ss << std::hex << std::setw( 2 ) << std::setfill( '0' ) << int( buffer[0] );
if ( bIsVerbose ) {
while ( --length > 0 )
- ss << cc::debug( ":" ) << std::setw( 2 ) << std::setfill( '0' )
- << int( buffer[++index] );
+ ss << ":" << std::setw( 2 ) << std::setfill( '0' ) << int( buffer[++index] );
}
if ( bIsVerbose )
- ss << cc::debug( " = " ) << std::dec << std::setw( 1 ) << x << cc::debug( ", " );
+ ss << " = " << std::dec << std::setw( 1 ) << x << ", ";
if ( x != y ) {
if ( bIsVerbose )
- std::cout << " " << ss.str() << cc::fatal( "FAILED" ) << "\n";
+ std::cout << " " << ss.str() << "FAILED"
+ << "\n";
return false;
}
// if ( bIsVerbose )
- // std::cout << " " << cc::success( "OK for " ) << cc::info( s ) << "\n";
+ // std::cout << " " << "OK for " ) << s ) << "\n";
return true;
} // namespace rpc
@@ -69,8 +69,8 @@ template < class T >
inline bool test(
bool bIncludeNegative, bool bIncludeHuge, bool bIsVerbose, const char* strTypeDescription ) {
if ( bIsVerbose && strTypeDescription && strTypeDescription[0] )
- std::cout << cc::debug( "Testing conversion of " ) << cc::info( strTypeDescription )
- << cc::debug( "..." ) << "\n";
+ std::cout << "Testing conversion of " << strTypeDescription << "..."
+ << "\n";
bool bOKay = true;
//
if ( !test1< T >( std::string( "0" ), bIsVerbose ) )
@@ -159,11 +159,12 @@ inline bool test(
}
if ( bIsVerbose && strTypeDescription && strTypeDescription[0] ) {
if ( bOKay )
- std::cout << cc::success( "Successful conversion test of " )
- << cc::info( strTypeDescription ) << cc::success( "." ) << "\n";
+ std::cout << "Successful conversion test of " << strTypeDescription << "."
+ << "\n";
else
- std::cout << cc::fatal( "FAILED" ) << cc::error( " conversion test of " )
- << cc::warn( strTypeDescription ) << cc::error( "." ) << "\n";
+ std::cout << "FAILED"
+ << " conversion test of " << strTypeDescription << "."
+ << "\n";
}
return bOKay;
} // namespace BMPBN
@@ -172,8 +173,8 @@ template < class T >
inline bool test_limit_limbs_and_halves(
const char* strStartValue, size_t nBits, bool bIsVerbose ) {
if ( bIsVerbose )
- std::cout << cc::debug( "Testing limit margin of " ) << cc::size10( nBits )
- << cc::debug( " bit values..." ) << "\n";
+ std::cout << "Testing limit margin of " << nBits << " bit values..."
+ << "\n";
bool bOKay = true;
if ( !test1< T >( std::string( strStartValue ), bIsVerbose ) )
bOKay = false;
@@ -187,11 +188,12 @@ inline bool test_limit_limbs_and_halves(
}
if ( bIsVerbose ) {
if ( bOKay )
- std::cout << cc::success( "Successful conversion test of " ) << cc::size10( nBits )
- << cc::success( " bit values." ) << "\n";
+ std::cout << "Successful conversion test of " << nBits << " bit values."
+ << "\n";
else
- std::cout << cc::fatal( "FAILED" ) << cc::error( " conversion test of " )
- << cc::size10( nBits ) << cc::debug( " bit values" ) << "\n";
+ std::cout << "FAILED"
+ << " conversion test of " << nBits << " bit values"
+ << "\n";
}
return bOKay;
}
diff --git a/libdevcore/Common.cpp b/libdevcore/Common.cpp
index e15bb6ae7..689551cdf 100644
--- a/libdevcore/Common.cpp
+++ b/libdevcore/Common.cpp
@@ -58,7 +58,7 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
string( "\nInternal exit requested while already exiting. " ) :
"\nInternal exit initiated. ";
}
- std::cerr << strMessagePrefix << skutils::signal::signal2str( nSignalNo ) << "\n\n";
+ cerror << strMessagePrefix << skutils::signal::signal2str( nSignalNo ) << "\n\n";
switch ( nSignalNo ) {
case SIGINT:
@@ -85,10 +85,10 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
case SIGFPE:
case SIGSEGV:
// abort signals
- std::cout << "\n" << skutils::signal::generate_stack_trace() << "\n";
- std::cout.flush();
- std::cout << skutils::signal::read_maps() << "\n";
- std::cout.flush();
+ cerror << "\n" << skutils::signal::generate_stack_trace();
+ cerror.flush();
+ cerror << skutils::signal::read_maps();
+ cerror.flush();
_exit( nSignalNo + 128 );
@@ -106,10 +106,9 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
auto start_time = std::chrono::steady_clock::now();
std::thread( [nSignalNo, start_time]() {
- std::cerr << ( "\n" + string( "SELF-KILL:" ) + " " + "Will sleep " +
- cc::size10( ExitHandler::KILL_TIMEOUT ) +
- " seconds before force exit..." ) +
- "\n\n";
+ cerror << "\nSELF-KILL: "
+ << "Will sleep " << ExitHandler::KILL_TIMEOUT
+ << " seconds before force exit...";
clog( VerbosityInfo, "exit" ) << "THREADS timer started";
@@ -125,10 +124,9 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
threads = new_threads;
if ( threads_diff.size() ) {
- cerr << seconds << " THREADS " << threads.size() << ":";
+ cerror << seconds << " THREADS " << threads.size() << ":";
for ( const string& t : threads_diff )
- cerr << " " << t;
- cerr << endl;
+ cerror << " " << t;
}
} catch ( ... ) {
// swallow it
@@ -137,10 +135,9 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
std::this_thread::sleep_for( 100ms );
}
- std::cerr << ( "\n" + string( "SELF-KILL:" ) + " " +
- "Will force exit after sleeping " +
- cc::size10( ExitHandler::KILL_TIMEOUT ) + cc::error( " second(s)" ) +
- "\n\n" );
+ cerror << "\nSELF-KILL: "
+ << "Will force exit after sleeping " << ExitHandler::KILL_TIMEOUT
+ << " second(s)";
// TODO deduplicate this with main() before return
ExitHandler::exit_code_t ec = ExitHandler::requestedExitCode();
@@ -158,7 +155,7 @@ void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) {
// TODO deduplicate with first if()
if ( ExitHandler::shouldExit() && s_nStopSignal > 0 && nSignalNo > 0 ) {
- std::cerr << ( "\n" + string( "SIGNAL-HANDLER:" ) + " " + "Will force exit now...\n\n" );
+ cerror << "\nSIGNAL-HANDLER: Will force exit now...";
_exit( 13 );
}
diff --git a/libdevcore/Exceptions.cpp b/libdevcore/Exceptions.cpp
index 2ccd2d909..5df044d02 100644
--- a/libdevcore/Exceptions.cpp
+++ b/libdevcore/Exceptions.cpp
@@ -56,8 +56,6 @@ std::string innermost_exception_what( const std::exception& ex ) {
}
void rethrow_most_nested( const std::exception& ex ) {
- // std::cerr << nested_exception_what(ex) << std::endl;
-
const std::nested_exception* nested_ptr = dynamic_cast< const std::nested_exception* >( &ex );
if ( nested_ptr == nullptr ) {
throw; // TODO can we make this func to be called without arguments? what does it really
diff --git a/libdevcore/Log.cpp b/libdevcore/Log.cpp
index 4e0ca878e..a5d8a1d1f 100644
--- a/libdevcore/Log.cpp
+++ b/libdevcore/Log.cpp
@@ -123,12 +123,10 @@ void setupLogging( LoggingOptions const& _options ) {
ss << channel;
strChannel = ss.str();
} // block
- sink->set_formatter( expr::stream
- << timestamp << " " << cc::info( strThreadName ) << " "
- << cc::warn( strChannel )
- << expr::if_( expr::has_attr(
- context ) )[expr::stream << " " << cc::warn( strChannel )]
- << " " << expr::smessage );
+ sink->set_formatter(
+ expr::stream << timestamp << " " << strThreadName << " " << strChannel
+ << expr::if_( expr::has_attr( context ) )[expr::stream << " " << strChannel]
+ << " " << expr::smessage );
boost::log::core::get()->add_sink( sink );
boost::log::core::get()->add_global_attribute(
diff --git a/libdevcore/Log.h b/libdevcore/Log.h
index da18916f9..0cfd6f936 100644
--- a/libdevcore/Log.h
+++ b/libdevcore/Log.h
@@ -146,7 +146,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, bigint const& _value ) {
std::stringstream ss;
ss << _value;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -154,7 +154,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -162,7 +162,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, u256 const& _value ) {
std::stringstream ss;
ss << _value;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -170,7 +170,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -178,7 +178,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, u160 const& _value ) {
std::stringstream ss;
ss << _value;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -186,7 +186,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -195,7 +195,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, FixedHash< N > const& _value ) {
std::stringstream ss;
ss << _value.abridged();
- _strm.stream() << cc::warn( "#" ) << cc::info( ss.str() );
+ _strm.stream() << "#" << ss.str();
return _strm;
}
template < unsigned N >
@@ -204,7 +204,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -212,7 +212,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, h160 const& _value ) {
std::stringstream ss;
ss << _value.abridged();
- _strm.stream() << cc::warn( "#" ) << cc::error( ss.str() );
+ _strm.stream() << "#" << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -220,7 +220,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::error( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -228,7 +228,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, h256 const& _value ) {
std::stringstream ss;
ss << _value.abridged();
- _strm.stream() << cc::warn( "#" ) << cc::info( ss.str() );
+ _strm.stream() << "#" << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -236,7 +236,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -244,7 +244,7 @@ inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, h512 const& _value ) {
std::stringstream ss;
ss << _value.abridged();
- _strm.stream() << cc::warn( "##" ) << cc::info( ss.str() );
+ _strm.stream() << "##" << ss.str();
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -252,7 +252,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
@@ -271,7 +271,7 @@ namespace boost {
namespace log {
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, dev::bytes const& _value ) {
- _strm.stream() << cc::warn( "%" ) << cc::c( dev::toHex( _value ) );
+ _strm.stream() << "%" << dev::toHex( _value );
return _strm;
}
inline boost::log::formatting_ostream& operator<<(
@@ -284,21 +284,21 @@ inline boost::log::formatting_ostream& operator<<(
template < typename T >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::vector< T > const& _value ) {
- _strm.stream() << cc::attention( "[" );
+ _strm.stream() << "[";
size_t idxWalk = 0;
int n = 0;
for ( T const& i : _value ) {
- _strm.stream() << ( n++ ? ( cc::debug( ", " ) ) : std::string( "" ) );
+ _strm.stream() << ( n++ ? ", " : "" );
std::stringstream ss;
ss << i;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
if ( cc::_max_value_size_ != std::string::npos && idxWalk > cc::_max_value_size_ ) {
_strm << cc::trimmed_str( _value.size() );
break;
}
++idxWalk;
}
- _strm.stream() << cc::attention( "]" );
+ _strm.stream() << "]";
return _strm;
}
template < typename T >
@@ -307,28 +307,28 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
template < typename T >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::set< T > const& _value ) {
- _strm.stream() << cc::attention( "{" );
+ _strm.stream() << "{";
size_t idxWalk = 0;
int n = 0;
for ( T const& i : _value ) {
- _strm.stream() << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm.stream() << n++ ? ", " : std::string( "" );
std::stringstream ss;
ss << i;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
if ( cc::_max_value_size_ != std::string::npos && idxWalk > cc::_max_value_size_ ) {
_strm << cc::trimmed_str( _value.size() );
break;
}
++idxWalk;
}
- _strm.stream() << cc::attention( "}" );
+ _strm.stream() << "}";
return _strm;
}
template < typename T >
@@ -337,28 +337,28 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
template < typename T >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::unordered_set< T > const& _value ) {
- _strm.stream() << cc::attention( "{" );
+ _strm.stream() << "{";
size_t idxWalk = 0;
int n = 0;
for ( T const& i : _value ) {
- _strm.stream() << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm.stream() << n++ ? ", " : std::string( "" );
std::stringstream ss;
ss << i;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
if ( cc::_max_value_size_ != std::string::npos && idxWalk > cc::_max_value_size_ ) {
_strm << cc::trimmed_str( _value.size() );
break;
}
++idxWalk;
}
- _strm.stream() << cc::attention( "}" );
+ _strm.stream() << "}";
return _strm;
}
template < typename T >
@@ -367,30 +367,30 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
template < typename T, typename U >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::map< T, U > const& _value ) {
- _strm.stream() << cc::attention( "{" );
+ _strm.stream() << "{";
int n = 0;
for ( auto const& i : _value ) {
- _strm << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm << n++ ? ", " : std::string( "" );
{ // block
std::stringstream ss;
ss << i.first;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
} // block
- _strm << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm << n++ ? ", " : std::string( "" );
{ // block
std::stringstream ss;
ss << i.second;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
} // block
}
- _strm.stream() << cc::attention( "}" );
+ _strm.stream() << "}";
return _strm;
}
template < typename T, typename U >
@@ -399,30 +399,30 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm << cc::info( ss.str() );
+ _strm << ss.str();
return _strm;
}
template < typename T, typename U >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::unordered_map< T, U > const& _value ) {
- _strm << cc::attention( "{" );
+ _strm << "{";
int n = 0;
for ( auto const& i : _value ) {
- _strm.stream() << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm.stream() << n++ ? ", " : std::string( "" );
{ // block
std::stringstream ss;
ss << i.first;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
} // block
- _strm.stream() << ( n++ ? cc::debug( ", " ) : std::string( "" ) );
+ _strm.stream() << n++ ? ", " : std::string( "" );
{ // block
std::stringstream ss;
ss << i.second;
- _strm << cc::notice( ss.str() );
+ _strm << ss.str();
} // block
}
- _strm << cc::attention( "}" );
+ _strm << "}";
return _strm;
}
template < typename T, typename U >
@@ -431,18 +431,18 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
template < typename T, typename U >
inline boost::log::formatting_ostream& operator<<(
boost::log::formatting_ostream& _strm, std::pair< T, U > const& _value ) {
- _strm.stream() << cc::attention( "(" );
+ _strm.stream() << "(";
_strm << _value.first;
- _strm.stream() << cc::debug( ", " );
+ _strm.stream() << ", ";
_strm << _value.second;
- _strm.stream() << cc::attention( ")" );
+ _strm.stream() << ")";
return _strm;
}
template < typename T, typename U >
@@ -451,7 +451,7 @@ inline boost::log::formatting_ostream& operator<<(
auto const& constValue = _value;
std::stringstream ss;
ss << constValue;
- _strm.stream() << cc::info( ss.str() );
+ _strm.stream() << ss.str();
return _strm;
}
} // namespace log
diff --git a/libdevcore/TrieDB.h b/libdevcore/TrieDB.h
index 32f5bcbfc..42f980ae8 100644
--- a/libdevcore/TrieDB.h
+++ b/libdevcore/TrieDB.h
@@ -573,7 +573,7 @@ void GenericTrieDB< DB >::iterator::next( NibbleSlice _key ) {
}
if ( !rlp.isList() || ( rlp.itemCount() != 2 && rlp.itemCount() != 17 ) ) {
#if ETH_PARANOIA
- cwarn << "BIG FAT ERROR. STATE TRIE CORRUPTED!!!!!";
+ cwarn << "ERROR. STATE TRIE CORRUPTED";
cwarn << b.rlp.size() << toHex( b.rlp );
cwarn << rlp;
auto c = rlp.itemCount();
@@ -687,7 +687,7 @@ void GenericTrieDB< DB >::iterator::next() {
}
if ( !( rlp.isList() && ( rlp.itemCount() == 2 || rlp.itemCount() == 17 ) ) ) {
#if ETH_PARANOIA
- cwarn << "BIG FAT ERROR. STATE TRIE CORRUPTED!!!!!";
+ cwarn << "ERROR. STATE TRIE CORRUPTED";
cwarn << b.rlp.size() << toHex( b.rlp );
cwarn << rlp;
auto c = rlp.itemCount();
@@ -894,7 +894,7 @@ void GenericTrieDB< DB >::mergeAtAux(
bool isRemovable = false;
if ( !r.isList() && !r.isEmpty() ) {
h256 h = _orig.toHash< h256 >();
- // std::cerr << "going down non-inline node " << h << "\n";
+
s = node( h );
r = RLP( s );
assert( !r.isNull() );
diff --git a/libdevcore/Worker.cpp b/libdevcore/Worker.cpp
index be3c6e4a1..f183d72c5 100644
--- a/libdevcore/Worker.cpp
+++ b/libdevcore/Worker.cpp
@@ -67,7 +67,7 @@ void Worker::startWorking() {
cwarn << "Exception thrown in Worker thread: " << _e.what();
} catch ( ... ) {
cerror << "CRITICAL unknown exception thrown in Worker thread";
- cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl;
+ cerror << "\n" << skutils::signal::generate_stack_trace() << "\n";
}
// ex = WorkerState::Stopping;
diff --git a/libdevcore/microprofile.cpp b/libdevcore/microprofile.cpp
index b54078a8b..18b9cebfd 100644
--- a/libdevcore/microprofile.cpp
+++ b/libdevcore/microprofile.cpp
@@ -6922,7 +6922,7 @@ void MicroProfileGpuShutdown() {
#include "microprofile_xboxone.h"
#endif
-#endif //#if MICROPROFILE_ENABLED
+#endif // #if MICROPROFILE_ENABLED
#include "microprofile_html.h"
diff --git a/libdevcore/system_usage.cpp b/libdevcore/system_usage.cpp
index fb5c4318b..b22e10d21 100644
--- a/libdevcore/system_usage.cpp
+++ b/libdevcore/system_usage.cpp
@@ -5,7 +5,6 @@
#include
#include "sys/times.h"
-//#include "sys/vtimes.h"
int parseLine( char* line ) {
// This assumes that a digit will be found and the line ends in " Kb".
diff --git a/libethcore/BlockHeader.cpp b/libethcore/BlockHeader.cpp
index ecf76d5c3..433d32bb5 100644
--- a/libethcore/BlockHeader.cpp
+++ b/libethcore/BlockHeader.cpp
@@ -229,7 +229,7 @@ void BlockHeader::verify( Strictness _s, BlockHeader const& _parent, bytesConstR
txList.itemCount(), [&]( unsigned i ) { return rlp( i ); },
[&]( unsigned i ) { return txList[i].data().toBytes(); } );
- LOG( m_logger ) << "Expected trie root: " << toString( expectedRoot );
+ LOG( m_loggerDebug ) << "Expected trie root: " << toString( expectedRoot );
if ( m_transactionsRoot != expectedRoot ) {
MemoryDB tm;
GenericTrieDB< MemoryDB > transactionsTrie( &tm );
@@ -244,19 +244,19 @@ void BlockHeader::verify( Strictness _s, BlockHeader const& _parent, bytesConstR
transactionsTrie.insert( &k.out(), txList[i].data() );
txs.push_back( txList[i].data() );
- cdebug << toHex( k.out() ) << toHex( txList[i].data() );
+ LOG( m_loggerDebug ) << toHex( k.out() ) << toHex( txList[i].data() );
}
- cdebug << "trieRootOver" << expectedRoot;
- cdebug << "orderedTrieRoot" << orderedTrieRoot( txs );
- cdebug << "TrieDB" << transactionsTrie.root();
- cdebug << "Contents:";
+ LOG( m_loggerDebug ) << "trieRootOver" << expectedRoot;
+ LOG( m_loggerDebug ) << "orderedTrieRoot" << orderedTrieRoot( txs );
+ LOG( m_loggerDebug ) << "TrieDB" << transactionsTrie.root();
+ LOG( m_loggerDebug ) << "Contents:";
for ( auto const& t : txs )
- cdebug << toHex( t );
+ LOG( m_loggerDebug ) << toHex( t );
BOOST_THROW_EXCEPTION( InvalidTransactionsRoot()
<< Hash256RequirementError( expectedRoot, m_transactionsRoot ) );
}
- LOG( m_logger ) << "Expected uncle hash: " << toString( sha3( root[2].data() ) );
+ LOG( m_loggerDebug ) << "Expected uncle hash: " << toString( sha3( root[2].data() ) );
if ( m_sha3Uncles != sha3( root[2].data() ) )
BOOST_THROW_EXCEPTION( InvalidUnclesHash() << Hash256RequirementError(
sha3( root[2].data() ), m_sha3Uncles ) );
diff --git a/libethcore/BlockHeader.h b/libethcore/BlockHeader.h
index 749b53cae..1b21a7e03 100644
--- a/libethcore/BlockHeader.h
+++ b/libethcore/BlockHeader.h
@@ -246,7 +246,7 @@ class BlockHeader {
mutable h256 m_hashWithout; ///< (Memoised) SHA3 hash of the block header without seal.
mutable Mutex m_hashLock; ///< A lock for both m_hash and m_hashWithout.
- mutable Logger m_logger{ createLogger( VerbosityDebug, "blockhdr" ) };
+ mutable Logger m_loggerDebug{ createLogger( VerbosityDebug, "blockhdr" ) };
Counter< BlockHeader > c;
diff --git a/libethcore/Common.cpp b/libethcore/Common.cpp
index cb889a51a..65e9b38b9 100644
--- a/libethcore/Common.cpp
+++ b/libethcore/Common.cpp
@@ -134,9 +134,9 @@ bytes getMultitransactionCallData() {
}
static void badBlockInfo( BlockHeader const& _bi, string const& _err ) {
- string const c_line = cc::debug( string( 80, ' ' ) );
- string const c_border = cc::debug( string( 2, ' ' ) );
- string const c_space = cc::debug( string( 76, ' ' ) ) + c_border;
+ string const c_line = string( 80, ' ' );
+ string const c_border = string( 2, ' ' );
+ string const c_space = string( 76, ' ' ) + c_border;
stringstream ss;
ss << c_line << "\n";
ss << c_space << "\n";
diff --git a/libethereum/Block.cpp b/libethereum/Block.cpp
index 848acf2ee..da0048f19 100644
--- a/libethereum/Block.cpp
+++ b/libethereum/Block.cpp
@@ -86,7 +86,7 @@ Block::Block( const BlockChain& _bc, h256 const& _hash, const State& _state, Bas
if ( !_bc.isKnown( _hash ) ) {
// Might be worth throwing here.
- cwarn << "Invalid block given for state population: " << _hash;
+ LOG( m_loggerWarning ) << "Invalid block given for state population: " << _hash;
BOOST_THROW_EXCEPTION( BlockNotFound() << errinfo_target( _hash ) );
}
@@ -207,7 +207,7 @@ PopulationStatistics Block::populateFromChain(
if ( !_bc.isKnown( _h ) ) {
// Might be worth throwing here.
- cwarn << "Invalid block given for state population: " << _h;
+ LOG( m_loggerWarning ) << "Invalid block given for state population: " << _h;
BOOST_THROW_EXCEPTION( BlockNotFound() << errinfo_target( _h ) );
}
@@ -273,14 +273,14 @@ bool Block::sync( BlockChain const& _bc, h256 const& _block, BlockHeader const&
break;
} catch ( Exception const& _e ) {
// TODO: Slightly nicer handling? :-)
- cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart."
- << endl;
- cerr << diagnostic_information( _e ) << endl;
+ LOG( m_loggerError )
+ << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart.";
+ LOG( m_loggerError ) << diagnostic_information( _e );
} catch ( std::exception const& _e ) {
// TODO: Slightly nicer handling? :-)
- cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart."
- << endl;
- cerr << _e.what() << endl;
+ LOG( m_loggerError )
+ << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart.";
+ LOG( m_loggerError ) << _e.what();
}
}
#endif
@@ -298,14 +298,6 @@ bool Block::sync( BlockChain const& _bc, h256 const& _block, BlockHeader const&
// Find most recent state dump and replay what's left.
// (Most recent state dump might end up being genesis.)
- // if (m_state.db().lookup(bi.stateRoot()).empty()) // TODO: API in State for this?
- // {
- // cwarn << "Unable to sync to" << bi.hash() << "; state root" << bi.stateRoot()
- // << "not found in database.";
- // cwarn << "Database corrupt: contains block without stateRoot:" << bi;
- // cwarn << "Try rescuing the database by running: eth --rescue";
- // BOOST_THROW_EXCEPTION(InvalidStateRoot() << errinfo_target(bi.stateRoot()));
- // }
m_previousBlock = bi;
resetCurrent();
ret = true;
@@ -337,8 +329,9 @@ bool Block::sync( BlockChain const& _bc, h256 const& _block, BlockHeader const&
}
} catch ( ... ) {
// TODO: Slightly nicer handling? :-)
- cerr << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart." << endl;
- cerr << boost::current_exception_diagnostic_information() << endl;
+ LOG( m_loggerError )
+ << "ERROR: Corrupt block-chain! Delete your block-chain DB and restart.";
+ LOG( m_loggerError ) << boost::current_exception_diagnostic_information();
exit( 1 );
}
@@ -388,7 +381,7 @@ pair< TransactionReceipts, bool > Block::sync(
++goodTxs;
// cnote << "TX took:" << t.elapsed() * 1000;
} else if ( t.gasPrice() < _gp.ask( *this ) * 9 / 10 ) {
- LOG( m_logger )
+ LOG( m_loggerDebug )
<< t.sha3() << " Dropping El Cheapo transaction (<90% of ask price)";
_tq.drop( t.sha3() );
}
@@ -398,11 +391,12 @@ pair< TransactionReceipts, bool > Block::sync(
if ( req > got ) {
// too old
- LOG( m_logger ) << t.sha3() << " Dropping old transaction (nonce too low)";
+ LOG( m_loggerDebug )
+ << t.sha3() << " Dropping old transaction (nonce too low)";
_tq.drop( t.sha3() );
} else if ( got > req + _tq.waiting( t.sender() ) ) {
// too new
- LOG( m_logger )
+ LOG( m_loggerDebug )
<< t.sha3() << " Dropping new transaction (too many nonces ahead)";
_tq.drop( t.sha3() );
} else
@@ -410,16 +404,17 @@ pair< TransactionReceipts, bool > Block::sync(
} catch ( BlockGasLimitReached const& e ) {
bigint const& got = *boost::get_error_info< errinfo_got >( e );
if ( got > m_currentBlock.gasLimit() ) {
- LOG( m_logger )
+ LOG( m_loggerDebug )
<< t.sha3()
<< " Dropping over-gassy transaction (gas > block's gas limit)";
- LOG( m_logger )
+ LOG( m_loggerDebug )
<< "got: " << got << " required: " << m_currentBlock.gasLimit();
_tq.drop( t.sha3() );
} else {
- LOG( m_logger ) << t.sha3()
- << " Temporarily no gas left in current block (txs gas > "
- "block's gas limit)";
+ LOG( m_loggerDebug )
+ << t.sha3()
+ << " Temporarily no gas left in current block (txs gas > "
+ "block's gas limit)";
//_tq.drop(t.sha3());
// Temporarily no gas left in current block.
// OPTIMISE: could note this and then we don't evaluate until a block that
@@ -427,14 +422,15 @@ pair< TransactionReceipts, bool > Block::sync(
}
} catch ( Exception const& _e ) {
// Something else went wrong - drop it.
- LOG( m_logger )
+ LOG( m_loggerDebug )
<< t.sha3()
<< " Dropping invalid transaction: " << diagnostic_information( _e );
_tq.drop( t.sha3() );
} catch ( std::exception const& ) {
// Something else went wrong - drop it.
_tq.drop( t.sha3() );
- cwarn << t.sha3() << "Transaction caused low-level exception :(";
+ LOG( m_loggerWarning )
+ << t.sha3() << "Transaction caused low-level exception :(";
}
}
if ( chrono::steady_clock::now() > deadline ) {
@@ -519,8 +515,9 @@ tuple< TransactionReceipts, unsigned > Block::syncEveryone( BlockChain const& _b
if ( !tr.isInvalid() && !tr.hasExternalGas() && tr.gasPrice() < _gasPrice ) {
- LOG( m_logger ) << "Transaction " << tr.sha3() << " WouldNotBeInBlock: gasPrice "
- << tr.gasPrice() << " < " << _gasPrice;
+ LOG( m_loggerDebug )
+ << "Transaction " << tr.sha3() << " WouldNotBeInBlock: gasPrice "
+ << tr.gasPrice() << " < " << _gasPrice;
if ( SkipInvalidTransactionsPatch::isEnabledInWorkingBlock() ) {
// Add to the user-originated transactions that we've executed.
@@ -562,7 +559,7 @@ tuple< TransactionReceipts, unsigned > Block::syncEveryone( BlockChain const& _b
ex << errinfo_transactionIndex( i );
// throw;
// just ignore invalid transactions
- clog( VerbosityError, "block" ) << "FAILED transaction after consensus! " << ex.what();
+ LOG( m_loggerError ) << "FAILED transaction after consensus! " << ex.what();
}
}
@@ -637,8 +634,8 @@ u256 Block::enactOn( VerifiedBlockRef const& _block, BlockChain const& _bc ) {
#if ETH_TIMED_ENACTMENTS
enactment = t.elapsed();
if ( populateVerify + populateGrand + syncReset + enactment > 0.5 )
- LOG( m_logger ) << "popVer/popGrand/syncReset/enactment = " << populateVerify << " / "
- << populateGrand << " / " << syncReset << " / " << enactment;
+ LOG( m_loggerDebug ) << "popVer/popGrand/syncReset/enactment = " << populateVerify << " / "
+ << populateGrand << " / " << syncReset << " / " << enactment;
#endif
return ret;
}
@@ -695,7 +692,7 @@ u256 Block::enact( VerifiedBlockRef const& _block, BlockChain const& _bc ) {
// ex << errinfo_vmtrace(vmTrace(_block.block, _bc, ImportRequirements::None));
for ( auto const& receipt : m_receipts ) {
if ( !receipt.hasStatusCode() ) {
- cwarn << "Skale does not support state root in receipt";
+ LOG( m_loggerWarning ) << "Skale does not support state root in receipt";
break;
}
}
@@ -922,17 +919,17 @@ ExecutionResult Block::execute( LastBlockHashesFace const& _lh, Transaction cons
// use fake receipt created above if execution throws!!
} catch ( const TransactionException& ex ) {
// shoul not happen as exception in execute() means that tx should not be in block
- cerror << DETAILED_ERROR;
+ LOG( m_loggerError ) << DETAILED_ERROR;
assert( false );
} catch ( const std::exception& ex ) {
h256 sha = _t.hasSignature() ? _t.sha3() : _t.sha3( WithoutSignature );
- LOG( m_logger ) << "Transaction " << sha << " WouldNotBeInBlock: " << ex.what();
+ LOG( m_loggerDebug ) << "Transaction " << sha << " WouldNotBeInBlock: " << ex.what();
if ( _p != Permanence::Reverted ) // if it is not call
_p = Permanence::CommittedWithoutState;
resultReceipt.first.excepted = TransactionException::WouldNotBeInBlock;
} catch ( ... ) {
h256 sha = _t.hasSignature() ? _t.sha3() : _t.sha3( WithoutSignature );
- LOG( m_logger ) << "Transaction " << sha << " WouldNotBeInBlock: ...";
+ LOG( m_loggerDebug ) << "Transaction " << sha << " WouldNotBeInBlock: ...";
if ( _p != Permanence::Reverted ) // if it is not call
_p = Permanence::CommittedWithoutState;
resultReceipt.first.excepted = TransactionException::WouldNotBeInBlock;
@@ -1085,9 +1082,9 @@ void Block::commitToSeal(
// m_state.commit(removeEmptyAccounts ? State::CommitBehaviour::RemoveEmptyAccounts :
// State::CommitBehaviour::KeepEmptyAccounts);
- LOG( m_loggerDetailed ) << cc::debug( "Post-reward stateRoot: " )
- << cc::notice( "is not calculated in Skale state" );
- LOG( m_loggerDetailed ) << m_state;
+ LOG( m_loggerTrace ) << "Post-reward stateRoot: "
+ << "is not calculated in Skale state";
+ LOG( m_loggerTrace ) << m_state;
m_currentBlock.setLogBloom( logBloom() );
m_currentBlock.setGasUsed( gasUsed() );
@@ -1160,30 +1157,15 @@ LogBloom Block::logBloom() const {
void Block::cleanup() {
MICROPROFILE_SCOPEI( "Block", "cleanup", MP_BEIGE );
- // Commit the new trie to disk.
- // LOG(m_logger) << "Committing to disk: stateRoot " << m_currentBlock.stateRoot() << " = "
- // << rootHash() << " = " << toHex(asBytes(db().lookup(globalRoot())));
-
- // try
- // {
- // EnforceRefs er(db(), true);
- // globalRoot();
- // }
- // catch (BadRoot const&)
- // {
- // cwarn << "Trie corrupt! :-(";
- // throw;
- // }
-
m_state.commit(); // TODO: State API for this?
- LOG( m_logger ) << "Committed: stateRoot is not calculated in Skale state";
+ LOG( m_loggerDebug ) << "Committed: stateRoot is not calculated in Skale state";
m_previousBlock = m_currentBlock;
sealEngine()->populateFromParent( m_currentBlock, m_previousBlock );
- LOG( m_logger ) << "finalising enactment. current -> previous, hash is "
- << m_previousBlock.hash();
+ LOG( m_loggerDebug ) << "finalising enactment. current -> previous, hash is "
+ << m_previousBlock.hash();
resetCurrent();
}
diff --git a/libethereum/Block.h b/libethereum/Block.h
index 249f9932d..d33a86b0b 100644
--- a/libethereum/Block.h
+++ b/libethereum/Block.h
@@ -353,8 +353,10 @@ class Block {
SealEngineFace* m_sealEngine = nullptr; ///< The chain's seal engine.
- Logger m_logger{ createLogger( VerbosityDebug, "block" ) };
- Logger m_loggerDetailed{ createLogger( VerbosityTrace, "block" ) };
+ Logger m_loggerDebug{ createLogger( VerbosityDebug, "block" ) };
+ Logger m_loggerTrace{ createLogger( VerbosityTrace, "block" ) };
+ Logger m_loggerWarning{ createLogger( VerbosityWarning, "block" ) };
+ Logger m_loggerError{ createLogger( VerbosityError, "block" ) };
Counter< Block > c;
;
diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp
index 788c5454d..5b1d90ef1 100644
--- a/libethereum/BlockChain.cpp
+++ b/libethereum/BlockChain.cpp
@@ -82,7 +82,7 @@ std::ostream& dev::eth::operator<<( std::ostream& _out, BlockChain const& _bc )
try {
BlockHeader d( bytesConstRef{ _value } );
_out << toHex( key ) << ": " << d.number() << " @ " << d.parentHash()
- << ( cmp == key ? " BEST" : "" ) << std::endl;
+ << ( cmp == key ? " BEST" : "" ) << "\n";
} catch ( ... ) {
cwarn << "Invalid DB entry:" << toHex( key ) << " -> "
<< toHex( bytesConstRef( _value ) );
@@ -227,7 +227,7 @@ void BlockChain::open( fs::path const& _path, bool _applyPatches, WithExisting _
DEV_IGNORE_EXCEPTIONS( fs::permissions( extrasPath, fs::owner_all ) );
if ( _we == WithExisting::Kill ) {
- cnote << "Killing blockchain & extras database (WithExisting::Kill).";
+ LOG( m_loggerInfo ) << "Killing blockchain & extras database (WithExisting::Kill).";
fs::remove_all( chainPath / fs::path( "blocks_and_extras" ) );
}
@@ -251,14 +251,16 @@ void BlockChain::open( fs::path const& _path, bool _applyPatches, WithExisting _
throw;
if ( fs::space( chainPath / fs::path( "blocks_and_extras" ) ).available < 1024 ) {
- cwarn << "Not enough available space found on hard drive. Please free some up and then "
- "re-run. Bailing.";
+ LOG( m_loggerWarning )
+ << "Not enough available space found on hard drive. Please free some up and then "
+ "re-run. Bailing.";
BOOST_THROW_EXCEPTION( NotEnoughAvailableSpace() );
} else {
- cwarn << "Database " << ( chainPath / fs::path( "blocks_and_extras" ) ) << "or "
- << ( extrasPath / fs::path( "extras" ) )
- << "already open. You appear to have another instance of ethereum running. "
- "Bailing.";
+ LOG( m_loggerWarning )
+ << "Database " << ( chainPath / fs::path( "blocks_and_extras" ) ) << "or "
+ << ( extrasPath / fs::path( "extras" ) )
+ << "already open. You appear to have another instance of ethereum running. "
+ "Bailing.";
BOOST_THROW_EXCEPTION( DatabaseAlreadyOpen() );
}
}
@@ -304,8 +306,8 @@ void BlockChain::open( fs::path const& _path, bool _applyPatches, WithExisting _
m_lastBlockNumber = number( m_lastBlockHash );
- cdebug << cc::info( "Opened blockchain DB. Latest: " ) << currentHash() << ' '
- << m_lastBlockNumber;
+ LOG( m_loggerDebug ) << "Opened blockchain DB. Latest: " << currentHash() << ' '
+ << m_lastBlockNumber;
// dump_blocks_and_extras_db( *this, 0 );
@@ -320,7 +322,7 @@ void BlockChain::reopen( ChainParams const& _p, bool _applyPatches, WithExisting
}
void BlockChain::close() {
- ctrace << "Closing blockchain DB";
+ LOG( m_loggerTrace ) << "Closing blockchain DB";
// Not thread safe...
m_extrasDB = nullptr;
m_blocksDB = nullptr;
@@ -430,27 +432,29 @@ tuple< ImportRoute, bool, unsigned > BlockChain::sync(
std::back_inserter( goodTransactions ) );
++count;
} catch ( dev::eth::AlreadyHaveBlock const& ) {
- cwarn << "ODD: Import queue contains already imported block";
+ LOG( m_loggerWarning ) << "ODD: Import queue contains already imported block";
continue;
} catch ( dev::eth::UnknownParent const& ) {
- cwarn << "ODD: Import queue contains block with unknown parent."; // <<
- // LogTag::Error
+ LOG( m_loggerWarning )
+ << "ODD: Import queue contains block with unknown parent."; // <<
+ // LogTag::Error
// <<
// boost::current_exception_diagnostic_information();
// NOTE: don't reimport since the queue should guarantee everything in the right
// order. Can't continue - chain bad.
badBlocks.push_back( block.verified.info.hash() );
} catch ( dev::eth::FutureTime const& ) {
- cwarn << "ODD: Import queue contains a block with future time.";
+ LOG( m_loggerWarning ) << "ODD: Import queue contains a block with future time.";
this_thread::sleep_for( chrono::seconds( 1 ) );
continue;
} catch ( dev::eth::TransientError const& ) {
this_thread::sleep_for( chrono::milliseconds( 100 ) );
continue;
} catch ( Exception& ex ) {
- cerror << "Exception while importing block. Someone (Jeff? That you?) seems to be "
- << "giving us dodgy blocks !";
- cerror << diagnostic_information( ex );
+ LOG( m_loggerError )
+ << "Exception while importing block. Someone (Jeff? That you?) seems to be "
+ << "giving us dodgy blocks !";
+ LOG( m_loggerError ) << diagnostic_information( ex );
if ( m_onBad )
m_onBad( ex );
// NOTE: don't reimport since the queue should guarantee everything in the right
@@ -486,8 +490,7 @@ ImportRoute BlockChain::import( bytes const& _block, State& _state, bool _mustBe
// VERIFY: populates from the block and checks the block is internally coherent.
VerifiedBlockRef const block =
verifyBlock( &_block, m_onBad, ImportRequirements::OutOfOrderChecks );
- // cerr << "Import block #" << block.info.number() << " with hash = " << block.info.hash() <<
- // endl;
+
return import( block, _state, _mustBeNew );
}
@@ -504,7 +507,8 @@ ImportRoute BlockChain::import( VerifiedBlockRef const& _block, State& _state, b
// Work out its number as the parent's number + 1
if ( !isKnown( _block.info.parentHash(), false ) ) // doesn't have to be current.
{
- LOG( m_logger ) << _block.info.hash() << " : Unknown parent " << _block.info.parentHash();
+ LOG( m_loggerDebug ) << _block.info.hash() << " : Unknown parent "
+ << _block.info.parentHash();
// We don't know the parent (yet) - discard for now. It'll get resent to us if we find out
// about its ancestry later on.
BOOST_THROW_EXCEPTION( UnknownParent() << errinfo_hash256( _block.info.parentHash() ) );
@@ -521,7 +525,7 @@ ImportRoute BlockChain::import( VerifiedBlockRef const& _block, State& _state, b
LOG( m_loggerError ) << "Block: " << BlockHeader( &parentBlock );
LOG( m_loggerError ) << "RLP: " << RLP( parentBlock );
LOG( m_loggerError ) << "DATABASE CORRUPTION: CRITICAL FAILURE";
- cerror << DETAILED_ERROR;
+ LOG( m_loggerError ) << DETAILED_ERROR;
exit( -1 );
}
@@ -530,7 +534,7 @@ ImportRoute BlockChain::import( VerifiedBlockRef const& _block, State& _state, b
// Verify parent-critical parts
verifyBlock( _block.block, m_onBad, ImportRequirements::InOrderChecks );
- LOG( m_loggerDetail ) << "Attempting import of " << _block.info.hash() << " ...";
+ LOG( m_loggerTrace ) << "Attempting import of " << _block.info.hash() << " ...";
performanceLogger.onStageFinished( "preliminaryChecks" );
@@ -559,9 +563,9 @@ ImportRoute BlockChain::import( VerifiedBlockRef const& _block, State& _state, b
checkConsistency();
#endif // ETH_PARANOIA
} catch ( BadRoot& ex ) {
- cwarn << "*** BadRoot error! Trying to import" << _block.info.hash() << "needed root"
- << *boost::get_error_info< errinfo_hash256 >( ex );
- cwarn << _block.info;
+ LOG( m_loggerWarning ) << "*** BadRoot error! Trying to import" << _block.info.hash()
+ << "needed root" << *boost::get_error_info< errinfo_hash256 >( ex );
+ LOG( m_loggerWarning ) << _block.info;
// Attempt in import later.
BOOST_THROW_EXCEPTION( TransientError() );
} catch ( Exception& ex ) {
@@ -625,7 +629,7 @@ ImportRoute BlockChain::import( const Block& _block ) {
void BlockChain::checkBlockIsNew( VerifiedBlockRef const& _block ) const {
if ( isKnown( _block.info.hash() ) ) {
- LOG( m_logger ) << _block.info.hash() << " : Not new.";
+ LOG( m_loggerDebug ) << _block.info.hash() << " : Not new.";
BOOST_THROW_EXCEPTION( AlreadyHaveBlock() << errinfo_block( _block.block.toBytes() ) );
}
}
@@ -633,8 +637,8 @@ void BlockChain::checkBlockIsNew( VerifiedBlockRef const& _block ) const {
void BlockChain::checkBlockTimestamp( BlockHeader const& _header ) const {
// Check it's not crazy
if ( _header.timestamp() > utcTime() && !m_params.allowFutureBlocks ) {
- LOG( m_loggerDetail ) << _header.hash() << " : Future time " << _header.timestamp()
- << " (now at " << utcTime() << ")";
+ LOG( m_loggerTrace ) << _header.hash() << " : Future time " << _header.timestamp()
+ << " (now at " << utcTime() << ")";
// Block has a timestamp in the future. This is no good.
BOOST_THROW_EXCEPTION( FutureTime() );
}
@@ -649,8 +653,7 @@ bool BlockChain::rotateDBIfNeeded( uint64_t pieceUsageBytes ) {
true :
false;
if ( isRotate ) {
- clog( VerbosityTrace, "BlockChain" )
- << ( cc::debug( "Will perform " ) + cc::notice( "storage-based block rotation" ) );
+ LOG( m_loggerTrace ) << "Will perform storage-based block rotation";
}
}
if ( clockLastDbRotation_ == 0 )
@@ -660,8 +663,7 @@ bool BlockChain::rotateDBIfNeeded( uint64_t pieceUsageBytes ) {
clock_t clockNow = clock();
if ( ( clockNow - clockLastDbRotation_ ) >= clockDbRotationPeriod_ ) {
isRotate = true;
- clog( VerbosityTrace, "BlockChain" )
- << ( cc::debug( "Will perform " ) + cc::notice( "timer-based block rotation" ) );
+ LOG( m_loggerTrace ) << "Will perform timer-based block rotation";
}
}
if ( !isRotate )
@@ -839,7 +841,7 @@ void BlockChain::recomputeExistingOccupiedSpaceForBlockRotation() try {
size_t blocksBatchSize = 0;
size_t extrasBatchSize = 0;
- LOG( m_logger ) << "Recomputing old blocks sizes...";
+ LOG( m_loggerDebug ) << "Recomputing old blocks sizes...";
// HACK 34 is key size + extra size + db prefix (blocks or extras)
for ( unsigned i = 1; i <= number; ++i ) {
@@ -896,8 +898,8 @@ void BlockChain::recomputeExistingOccupiedSpaceForBlockRotation() try {
// HACK Since blooms are often re-used, let's adjust size for them
extrasBatchSize +=
( 4147 + 34 ) / 16 + ( 4147 + 34 ) / 256 + 2; // 1+1/16th big bloom per block
- LOG( m_loggerDetail ) << "Computed block " << i
- << " DB usage = " << blocksBatchSize + extrasBatchSize;
+ LOG( m_loggerTrace ) << "Computed block " << i
+ << " DB usage = " << blocksBatchSize + extrasBatchSize;
} // for block
uint64_t pieceUsageBytes = 0;
@@ -905,8 +907,8 @@ void BlockChain::recomputeExistingOccupiedSpaceForBlockRotation() try {
pieceUsageBytes = std::stoull( this->m_db->lookup( ( db::Slice ) "pieceUsageBytes" ) );
}
- LOG( m_logger ) << "pieceUsageBytes from DB = " << pieceUsageBytes
- << " computed = " << blocksBatchSize + extrasBatchSize;
+ LOG( m_loggerDebug ) << "pieceUsageBytes from DB = " << pieceUsageBytes
+ << " computed = " << blocksBatchSize + extrasBatchSize;
if ( pieceUsageBytes == 0 ) {
pieceUsageBytes = blocksBatchSize + extrasBatchSize;
@@ -967,11 +969,10 @@ ImportRoute BlockChain::insertBlockAndExtras( VerifiedBlockRef const& _block,
newLastBlockHash = _block.info.hash();
newLastBlockNumber = ( unsigned ) _block.info.number();
- LOG( m_loggerDetail ) << cc::debug( " Imported and best " ) << _totalDifficulty
- << cc::debug( " (" ) << cc::warn( "#" )
- << cc::num10( _block.info.number() ) << cc::debug( "). Has " )
- << ( details( _block.info.parentHash() ).children.size() - 1 )
- << cc::debug( " siblings." );
+ LOG( m_loggerTrace ) << " Imported and best " << _totalDifficulty << " ("
+ << "#" << _block.info.number() << "). Has "
+ << ( details( _block.info.parentHash() ).children.size() - 1 )
+ << " siblings.";
#if ETH_PARANOIA
if ( isKnown( _block.info.hash() ) && !details( _block.info.hash() ) ) {
@@ -1009,12 +1010,13 @@ ImportRoute BlockChain::insertBlockAndExtras( VerifiedBlockRef const& _block,
db::Slice( ( char const* ) &m_lastBlockHash, 32 ) );
m_db->commit( "insertBlockAndExtras" );
} catch ( boost::exception const& ex ) {
- cwarn << "Error writing to blocks_and_extras database: "
- << boost::diagnostic_information( ex );
- cwarn << "Put" << toHex( bytesConstRef( db::Slice( "best" ) ) ) << "=>"
- << toHex( bytesConstRef( db::Slice( ( char const* ) &m_lastBlockHash, 32 ) ) );
- cwarn << "Fail writing to blocks_and_extras database. Bombing out.";
- cerror << DETAILED_ERROR;
+ LOG( m_loggerWarning ) << "Error writing to blocks_and_extras database: "
+ << boost::diagnostic_information( ex );
+ LOG( m_loggerWarning )
+ << "Put" << toHex( bytesConstRef( db::Slice( "best" ) ) ) << "=>"
+ << toHex( bytesConstRef( db::Slice( ( char const* ) &m_lastBlockHash, 32 ) ) );
+ LOG( m_loggerWarning ) << "Fail writing to blocks_and_extras database. Bombing out.";
+ LOG( m_loggerError ) << DETAILED_ERROR;
exit( -1 );
}
}
@@ -1042,9 +1044,8 @@ ImportRoute BlockChain::insertBlockAndExtras( VerifiedBlockRef const& _block,
h256s fresh;
fresh.push_back( tbi.hash() );
- clog( VerbosityTrace, "BlockChain" )
- << cc::debug( "Insterted block with " ) << _block.transactions.size()
- << cc::debug( " transactions" );
+ LOG( m_loggerTrace ) << "Insterted block with " << _block.transactions.size()
+ << " transactions";
return ImportRoute{ dead, fresh, _block.transactions };
}
@@ -1089,7 +1090,7 @@ void BlockChain::clearBlockBlooms( unsigned _begin, unsigned _end ) {
}
void BlockChain::rescue( State const& /*_state*/ ) {
- clog( VerbosityInfo, "BlockChain" ) << "Rescuing database...";
+ LOG( m_loggerInfo ) << "Rescuing database...";
throw std::logic_error( "Rescueing is not implemented" );
unsigned u = 1;
@@ -1104,35 +1105,32 @@ void BlockChain::rescue( State const& /*_state*/ ) {
}
}
unsigned l = u / 2;
- clog( VerbosityInfo, "BlockChain" ) << cc::debug( "Finding last likely block number..." );
+ LOG( m_loggerTrace ) << "Finding last likely block number...";
while ( u - l > 1 ) {
unsigned m = ( u + l ) / 2;
- clog( VerbosityInfo, "BlockChain" ) << " " << m << flush;
+ LOG( m_loggerTrace ) << " " << m << flush;
if ( isKnown( numberHash( m ) ) )
l = m;
else
u = m;
}
- clog( VerbosityInfo, "BlockChain" ) << " lowest is " << l;
+ LOG( m_loggerTrace ) << " lowest is " << l;
for ( ; l > 0; --l ) {
h256 h = numberHash( l );
- clog( VerbosityInfo, "BlockChain" )
- << cc::debug( "Checking validity of " ) << l << cc::debug( " (" ) << h
- << cc::debug( ")..." ) << flush;
+ LOG( m_loggerTrace ) << "Checking validity of " << l << " (" << h << ")..." << flush;
try {
- clog( VerbosityInfo, "BlockChain" ) << cc::debug( "block..." ) << flush;
+ LOG( m_loggerTrace ) << "block..." << flush;
BlockHeader bi( block( h ) );
- clog( VerbosityInfo, "BlockChain" ) << cc::debug( "extras..." ) << flush;
+ LOG( m_loggerTrace ) << "extras..." << flush;
details( h );
- clog( VerbosityInfo, "BlockChain" ) << cc::debug( "state..." ) << flush;
- clog( VerbosityInfo, "BlockChain" )
- << cc::warn( "STATE VALIDITY CHECK IS NOT SUPPORTED" ) << flush;
+ LOG( m_loggerTrace ) << "state..." << flush;
+ LOG( m_loggerTrace ) << "STATE VALIDITY CHECK IS NOT SUPPORTED" << flush;
// if (_db.exists(bi.stateRoot()))
// break;
} catch ( ... ) {
}
}
- clog( VerbosityInfo, "BlockChain" ) << "OK.";
+ LOG( m_loggerTrace ) << "OK.";
rewind( l );
}
@@ -1147,11 +1145,13 @@ void BlockChain::rewind( unsigned _newHead ) {
m_extrasDB->insert(
db::Slice( "best" ), db::Slice( ( char const* ) &m_lastBlockHash, 32 ) );
} catch ( boost::exception const& ex ) {
- cwarn << "Error writing to extras database: " << boost::diagnostic_information( ex );
- cwarn << "Put" << toHex( bytesConstRef( db::Slice( "best" ) ) ) << "=>"
- << toHex( bytesConstRef( db::Slice( ( char const* ) &m_lastBlockHash, 32 ) ) );
- cwarn << "Fail writing to extras database. Bombing out.";
- cerror << DETAILED_ERROR;
+ LOG( m_loggerWarning )
+ << "Error writing to extras database: " << boost::diagnostic_information( ex );
+ LOG( m_loggerWarning )
+ << "Put" << toHex( bytesConstRef( db::Slice( "best" ) ) ) << "=>"
+ << toHex( bytesConstRef( db::Slice( ( char const* ) &m_lastBlockHash, 32 ) ) );
+ LOG( m_loggerWarning ) << "Fail writing to extras database. Bombing out.";
+ LOG( m_loggerError ) << DETAILED_ERROR;
exit( -1 );
}
noteCanonChanged();
@@ -1332,7 +1332,7 @@ void BlockChain::garbageCollect( bool _force ) {
case ExtraBlockHash: {
// m_cacheUsage should not contain ExtraBlockHash elements currently. See the
// second noteUsed() in BlockChain.h, which is a no-op.
- cerror << DETAILED_ERROR;
+ LOG( m_loggerError ) << DETAILED_ERROR;
assert( false );
break;
}
@@ -1438,11 +1438,13 @@ void BlockChain::checkConsistency() {
{
auto dp = details( p );
if ( asserts( contains( dp.children, h ) ) )
- cnote << "Apparently the database is corrupt. Not much we can do at this "
- "stage...";
+ LOG( m_loggerInfo )
+ << "Apparently the database is corrupt. Not much we can do at this "
+ "stage...";
if ( assertsEqual( dp.number, dh.number - 1 ) )
- cnote << "Apparently the database is corrupt. Not much we can do at this "
- "stage...";
+ LOG( m_loggerInfo )
+ << "Apparently the database is corrupt. Not much we can do at this "
+ "stage...";
}
}
return true;
@@ -1590,7 +1592,7 @@ bytes BlockChain::block( h256 const& _hash ) const {
string d = m_blocksDB->lookup( toSlice( _hash ) );
if ( d.empty() ) {
- cwarn << "Couldn't find requested block:" << _hash;
+ LOG( m_loggerWarning ) << "Couldn't find requested block:" << _hash;
return bytes();
}
@@ -1616,7 +1618,7 @@ bytes BlockChain::headerData( h256 const& _hash ) const {
string d = m_blocksDB->lookup( toSlice( _hash ) );
if ( d.empty() ) {
- cwarn << "Couldn't find requested block:" << _hash;
+ LOG( m_loggerWarning ) << "Couldn't find requested block:" << _hash;
return bytes();
}
diff --git a/libethereum/BlockChain.h b/libethereum/BlockChain.h
index 7a7ae5f5a..9d3345f42 100644
--- a/libethereum/BlockChain.h
+++ b/libethereum/BlockChain.h
@@ -281,9 +281,6 @@ class BlockChain {
BlocksBlooms blocksBlooms( h256 const& _chunkId ) const {
auto res = queryExtras< BlocksBlooms, ExtraBlocksBlooms >(
_chunkId, m_blocksBlooms, x_blocksBlooms, NullBlocksBlooms );
- // std::cerr << "Queried " << _chunkId.hex() << "->" << std::endl;
- // for ( size_t i = 0; i < 16; ++i )
- // std::cerr << "\t" << i << " = " << res.blooms[i].hex() << std::endl;
return res;
}
LogBloom blockBloom( unsigned _number ) const {
@@ -617,10 +614,11 @@ class BlockChain {
boost::filesystem::path m_dbPath;
- mutable Logger m_loggerInfo{ createLogger( VerbosityInfo, "chain" ) };
- mutable Logger m_logger{ createLogger( VerbosityDebug, "chain" ) };
- mutable Logger m_loggerDetail{ createLogger( VerbosityTrace, "chain" ) };
- mutable Logger m_loggerError{ createLogger( VerbosityError, "chain" ) };
+ mutable Logger m_loggerInfo{ createLogger( VerbosityInfo, "Blockchain" ) };
+ mutable Logger m_loggerDebug{ createLogger( VerbosityDebug, "Blockchain" ) };
+ mutable Logger m_loggerTrace{ createLogger( VerbosityTrace, "Blockchain" ) };
+ mutable Logger m_loggerError{ createLogger( VerbosityError, "Blockchain" ) };
+ mutable Logger m_loggerWarning{ createLogger( VerbosityWarning, "Blockchain" ) };
friend std::ostream& operator<<( std::ostream& _out, BlockChain const& _bc );
};
diff --git a/libethereum/BlockQueue.cpp b/libethereum/BlockQueue.cpp
index 5c69b0505..dbb74d929 100644
--- a/libethereum/BlockQueue.cpp
+++ b/libethereum/BlockQueue.cpp
@@ -135,7 +135,8 @@ void BlockQueue::verifierBody() try {
m_readySet.erase( work.hash );
m_knownBad.insert( work.hash );
if ( !m_verifying.remove( work.hash ) )
- cwarn << "Unexpected exception when verifying block: " << _ex.what();
+ LOG( m_loggerWarning )
+ << "Unexpected exception when verifying block: " << _ex.what();
drainVerified_WITH_BOTH_LOCKS();
continue;
}
@@ -158,18 +159,18 @@ void BlockQueue::verifierBody() try {
ready = true;
} else {
if ( !m_verifying.replace( work.hash, move( res ) ) )
- cwarn << "BlockQueue missing our job: was there a GM?";
+ LOG( m_loggerWarning ) << "BlockQueue missing our job: was there a GM?";
}
}
if ( ready )
m_onReady();
}
} catch ( const std::exception& ex ) {
- cerror << "CRITICAL " << ex.what();
- cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl;
+ LOG( m_loggerError ) << "CRITICAL " << ex.what();
+ LOG( m_loggerError ) << "\n" << skutils::signal::generate_stack_trace() << "\n";
} catch ( ... ) {
- cerror << "CRITICAL unknown exception";
- cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl;
+ LOG( m_loggerError ) << "CRITICAL unknown exception";
+ LOG( m_loggerError ) << "\n" << skutils::signal::generate_stack_trace() << "\n";
}
void BlockQueue::drainVerified_WITH_BOTH_LOCKS() {
@@ -189,14 +190,14 @@ ImportResult BlockQueue::import( bytesConstRef _block, bool _isOurs ) {
// Check if we already know this block.
h256 h = BlockHeader::headerHashFromBlock( _block );
- LOG( m_loggerDetail ) << "Queuing block " << h << " for import...";
+ LOG( m_loggerTrace ) << "Queuing block " << h << " for import...";
UpgradableGuard l( m_lock );
if ( contains( m_readySet, h ) || contains( m_drainingSet, h ) || contains( m_unknownSet, h ) ||
contains( m_knownBad, h ) || contains( m_futureSet, h ) ) {
// Already know about this one.
- LOG( m_loggerDetail ) << "Already known.";
+ LOG( m_loggerTrace ) << "Already known.";
return ImportResult::AlreadyKnown;
}
@@ -206,16 +207,16 @@ ImportResult BlockQueue::import( bytesConstRef _block, bool _isOurs ) {
// VERIFY: populates from the block and checks the block is internally coherent.
bi = m_bc->verifyBlock( _block, m_onBad, ImportRequirements::PostGenesis ).info;
} catch ( Exception const& _e ) {
- cwarn << "Ignoring malformed block: " << diagnostic_information( _e );
+ LOG( m_loggerWarning ) << "Ignoring malformed block: " << diagnostic_information( _e );
return ImportResult::Malformed;
}
- LOG( m_loggerDetail ) << "Block " << h << " is " << bi.number() << " parent is "
- << bi.parentHash();
+ LOG( m_loggerTrace ) << "Block " << h << " is " << bi.number() << " parent is "
+ << bi.parentHash();
// Check block doesn't already exist first!
if ( m_bc->isKnown( h ) ) {
- LOG( m_logger ) << "Already known in chain.";
+ LOG( m_loggerDebug ) << "Already known in chain.";
return ImportResult::AlreadyInChain;
}
@@ -230,8 +231,8 @@ ImportResult BlockQueue::import( bytesConstRef _block, bool _isOurs ) {
time_t bit = static_cast< time_t >( bi.timestamp() );
if ( strftime( buf, 24, "%X", localtime( &bit ) ) == 0 )
buf[0] = '\0'; // empty if case strftime fails
- LOG( m_loggerDetail ) << "OK - queued for future [" << bi.timestamp() << " vs " << utcTime()
- << "] - will wait until " << buf;
+ LOG( m_loggerTrace ) << "OK - queued for future [" << bi.timestamp() << " vs " << utcTime()
+ << "] - will wait until " << buf;
m_difficulty += bi.difficulty();
h256 const parentHash = bi.parentHash();
bool const unknown = !contains( m_readySet, parentHash ) &&
@@ -249,7 +250,7 @@ ImportResult BlockQueue::import( bytesConstRef _block, bool _isOurs ) {
!m_drainingSet.count( bi.parentHash() ) && !m_bc->isKnown( bi.parentHash() ) ) {
// We don't know the parent (yet) - queue it up for later. It'll get resent to us if we
// find out about its ancestry later on.
- LOG( m_loggerDetail ) << "OK - queued as unknown parent: " << bi.parentHash();
+ LOG( m_loggerTrace ) << "OK - queued as unknown parent: " << bi.parentHash();
m_unknown.insert( bi.parentHash(), h, _block.toBytes() );
m_unknownSet.insert( h );
m_difficulty += bi.difficulty();
@@ -257,7 +258,7 @@ ImportResult BlockQueue::import( bytesConstRef _block, bool _isOurs ) {
return ImportResult::UnknownParent;
} else {
// If valid, append to blocks.
- LOG( m_loggerDetail ) << "OK - ready for chain insertion.";
+ LOG( m_loggerTrace ) << "OK - ready for chain insertion.";
DEV_GUARDED( m_verification )
m_unverified.enqueue( UnverifiedBlock{ h, bi.parentHash(), _block.toBytes() } );
m_moreToVerify.notify_one();
@@ -354,13 +355,13 @@ void BlockQueue::tick() {
if ( m_future.isEmpty() )
return;
- LOG( m_logger ) << cc::debug( "Checking past-future blocks..." );
+ LOG( m_loggerDebug ) << "Checking past-future blocks...";
time_t t = utcTime();
if ( t < m_future.firstKey() )
return;
- LOG( m_logger ) << cc::debug( "Past-future blocks ready." );
+ LOG( m_loggerDebug ) << "Past-future blocks ready.";
{
UpgradeGuard l2( l );
@@ -370,8 +371,7 @@ void BlockQueue::tick() {
m_futureSet.erase( hash.first );
}
}
- LOG( m_logger ) << cc::debug( "Importing " ) << todo.size()
- << cc::debug( " past-future blocks." );
+ LOG( m_loggerDebug ) << "Importing " << todo.size() << " past-future blocks.";
for ( auto const& b : todo )
import( &b.second );
diff --git a/libethereum/BlockQueue.h b/libethereum/BlockQueue.h
index 41d0feadb..a62dec7c2 100644
--- a/libethereum/BlockQueue.h
+++ b/libethereum/BlockQueue.h
@@ -329,8 +329,10 @@ class BlockQueue : HasInvariants {
u256 m_difficulty; ///< Total difficulty of blocks in the queue
u256 m_drainingDifficulty; ///< Total difficulty of blocks in draining
- Logger m_logger{ createLogger( VerbosityDebug, "bq" ) };
- Logger m_loggerDetail{ createLogger( VerbosityTrace, "bq" ) };
+ Logger m_loggerDebug{ createLogger( VerbosityDebug, "bq" ) };
+ Logger m_loggerTrace{ createLogger( VerbosityTrace, "bq" ) };
+ Logger m_loggerWarning{ createLogger( VerbosityWarning, "bq" ) };
+ Logger m_loggerError{ createLogger( VerbosityError, "bq" ) };
Counter< BlockQueue > c;
diff --git a/libethereum/ChainParams.cpp b/libethereum/ChainParams.cpp
index 4b2be0789..c90b629dc 100644
--- a/libethereum/ChainParams.cpp
+++ b/libethereum/ChainParams.cpp
@@ -483,10 +483,10 @@ void ChainParams::populateFromGenesis( bytes const& _genesisRLP, AccountMap cons
auto b = genesisBlock();
if ( b != _genesisRLP ) {
- cdebug << "Block passed:" << bi.hash() << bi.hash( WithoutSeal );
- cdebug << "Genesis now:" << BlockHeader::headerHashFromBlock( b );
- cdebug << RLP( b );
- cdebug << RLP( _genesisRLP );
+ LOG( m_loggerDebug ) << "Block passed:" << bi.hash() << bi.hash( WithoutSeal );
+ LOG( m_loggerDebug ) << "Genesis now:" << BlockHeader::headerHashFromBlock( b );
+ LOG( m_loggerDebug ) << RLP( b );
+ LOG( m_loggerDebug ) << RLP( _genesisRLP );
throw 0;
}
}
diff --git a/libethereum/ChainParams.h b/libethereum/ChainParams.h
index 60c763398..c5b9c2665 100644
--- a/libethereum/ChainParams.h
+++ b/libethereum/ChainParams.h
@@ -84,6 +84,8 @@ struct ChainParams : public ChainOperationParams {
ChainParams loadGenesis( std::string const& _json ) const;
mutable std::string originalJSON;
+
+ Logger m_loggerDebug{ createLogger( VerbosityDebug, "ChainParams" ) };
};
} // namespace dev::eth
diff --git a/libethereum/Client.cpp b/libethereum/Client.cpp
index b266145df..902cd8ce3 100644
--- a/libethereum/Client.cpp
+++ b/libethereum/Client.cpp
@@ -145,8 +145,8 @@ Client::Client( ChainParams const& _params, int _networkID,
#endif /// (defined __HAVE_SKALED_LOCK_FILE_INDICATING_CRITICAL_STOP__)
m_debugTracer.call_on_tracepoint( [this]( const std::string& name ) {
- clog( VerbosityTrace, "client" )
- << "TRACEPOINT " << name << " " << m_debugTracer.get_tracepoint_count( name );
+ LOG( m_loggerTrace ) << "TRACEPOINT " << name << " "
+ << m_debugTracer.get_tracepoint_count( name );
} );
m_debugHandler = [this]( const std::string& arg ) -> std::string {
@@ -172,7 +172,7 @@ void Client::stopWorking() {
m_skaleHost->stopWorking(); // TODO Find and document a systematic way to start/stop all
// workers
else
- cerror << "Instance of SkaleHost was not properly created.";
+ LOG( m_loggerError ) << "Instance of SkaleHost was not properly created.";
m_snapshotAgent->terminate();
@@ -185,23 +185,25 @@ void Client::stopWorking() {
m_bq.stop(); // l_sergiy: added to stop block queue processing
m_bc.close();
- LOG( m_logger ) << cc::success( "Blockchain is closed" );
+ LOG( m_loggerInfo ) << "Blockchain is closed";
#if ( defined __HAVE_SKALED_LOCK_FILE_INDICATING_CRITICAL_STOP__ )
bool isForcefulExit =
( !m_skaleHost || m_skaleHost->exitedForcefully() == false ) ? false : true;
if ( !isForcefulExit ) {
delete_lock_file( m_dbPath );
- LOG( m_logger ) << cc::success( "Deleted lock file " )
- << cc::p( boost::filesystem::canonical( m_dbPath ).string() +
- std::string( "/skaled.lock" ) );
+ LOG( m_loggerInfo ) << "Deleted lock file "
+ << boost::filesystem::canonical( m_dbPath ).string() +
+ std::string( "/skaled.lock" );
} else {
- LOG( m_logger ) << cc::fatal( "ATTENTION:" ) << " " << cc::error( "Deleted lock file " )
- << cc::p( boost::filesystem::canonical( m_dbPath ).string() +
- std::string( "/skaled.lock" ) )
- << cc::error( " after forceful exit" );
+ LOG( m_loggerInfo ) << "ATTENTION:"
+ << " "
+ << "Deleted lock file "
+ << boost::filesystem::canonical( m_dbPath ).string() +
+ std::string( "/skaled.lock" )
+ << " after forceful exit";
}
- LOG( m_logger ).flush();
+ LOG( m_loggerInfo ).flush();
#endif /// (defined __HAVE_SKALED_LOCK_FILE_INDICATING_CRITICAL_STOP__)
terminate();
@@ -306,8 +308,8 @@ void Client::init( WithExisting _forceAction, u256 _networkId ) {
if ( chainParams().sChain.nodeGroups.size() > 0 ) {
initHistoricGroupIndex();
} else {
- LOG( m_logger ) << "Empty node groups in config. "
- "This is OK in tests but not OK in production";
+ LOG( m_loggerInfo ) << "Empty node groups in config. "
+ "This is OK in tests but not OK in production";
}
// init snapshots for not newly created chains
@@ -346,11 +348,12 @@ tuple< ImportRoute, bool, unsigned > Client::syncQueue( unsigned _max ) {
}
void Client::onBadBlock( Exception& _ex ) const {
- // BAD BLOCK!!!
+ // BAD BLOCK
bytes const* block = boost::get_error_info< errinfo_block >( _ex );
if ( !block ) {
- cwarn << "ODD: onBadBlock called but exception (" << _ex.what() << ") has no block in it.";
- cwarn << boost::diagnostic_information( _ex );
+ LOG( m_loggerWarning ) << "ODD: onBadBlock called but exception (" << _ex.what()
+ << ") has no block in it.";
+ LOG( m_loggerWarning ) << boost::diagnostic_information( _ex );
return;
}
@@ -395,7 +398,7 @@ bool Client::isMajorSyncing() const {
void Client::startedWorking() {
// Synchronise the state according to the head of the block chain.
// TODO: currently it contains keys for *all* blocks. Make it remove old ones.
- LOG( m_loggerDetail ) << cc::debug( "startedWorking()" );
+ LOG( m_loggerTrace ) << "startedWorking()";
DEV_GUARDED( m_blockImportMutex ) {
DEV_WRITE_GUARDED( x_preSeal )
@@ -500,8 +503,8 @@ void Client::syncBlockQueue() {
double elapsed = t.elapsed();
if ( count ) {
- LOG( m_logger ) << count << " blocks imported in " << unsigned( elapsed * 1000 ) << " ms ("
- << ( count / elapsed ) << " blocks/s) in #" << bc().number();
+ LOG( m_loggerInfo ) << count << " blocks imported in " << unsigned( elapsed * 1000 )
+ << " ms (" << ( count / elapsed ) << " blocks/s) in #" << bc().number();
}
if ( elapsed > c_targetDuration * 1.1 && count > c_syncMin )
@@ -514,7 +517,6 @@ void Client::syncBlockQueue() {
onChainChanged( ir );
}
-
size_t Client::importTransactionsAsBlock(
const Transactions& _transactions, u256 _gasPrice, uint64_t _timestamp ) {
// on schain creation, SnapshotAgent needs timestamp of block 1
@@ -534,14 +536,13 @@ size_t Client::importTransactionsAsBlock(
SchainPatch::useLatestBlockTimestamp( blockChain().info().timestamp() );
if ( !UnsafeRegion::isActive() ) {
- LOG( m_loggerDetail ) << "Total unsafe time so far = "
- << std::chrono::duration_cast< std::chrono::seconds >(
- UnsafeRegion::getTotalTime() )
- .count()
- << " seconds";
+ LOG( m_loggerTrace ) << "Total unsafe time so far = "
+ << std::chrono::duration_cast< std::chrono::seconds >(
+ UnsafeRegion::getTotalTime() )
+ .count()
+ << " seconds";
} else
- cwarn << "Warning: UnsafeRegion still active!";
-
+ LOG( m_loggerWarning ) << "Warning: UnsafeRegion still active!";
if ( chainParams().sChain.nodeGroups.size() > 0 )
updateHistoricGroupIndex();
@@ -558,7 +559,7 @@ size_t Client::syncTransactions(
assert( m_skaleHost );
while ( m_working.isSealed() ) {
- cnote << "m_working.isSealed. sleeping";
+ LOG( m_loggerInfo ) << "m_working.isSealed. sleeping";
usleep( 1000 );
}
@@ -591,12 +592,12 @@ size_t Client::syncTransactions(
// Tell network about the new transactions.
m_skaleHost->noteNewTransactions();
- ctrace << "Processed " << newPendingReceipts.size() << " transactions in "
- << timer.elapsed() * 1000 << "(" << ( bool ) m_syncTransactionQueue << ")";
+ LOG( m_loggerTrace ) << "Processed " << newPendingReceipts.size() << " transactions in "
+ << timer.elapsed() * 1000 << "(" << ( bool ) m_syncTransactionQueue << ")";
#ifdef HISTORIC_STATE
- LOG( m_logger ) << "HSCT: "
- << m_working.mutableState().mutableHistoricState().getAndResetBlockCommitTime();
+ LOG( m_loggerInfo )
+ << "HSCT: " << m_working.mutableState().mutableHistoricState().getAndResetBlockCommitTime();
#endif
return goodReceipts;
}
@@ -604,12 +605,12 @@ size_t Client::syncTransactions(
void Client::onDeadBlocks( h256s const& _blocks, h256Hash& io_changed ) {
// insert transactions that we are declaring the dead part of the chain
for ( auto const& h : _blocks ) {
- LOG( m_loggerDetail ) << cc::warn( "Dead block: " ) << h;
+ LOG( m_loggerTrace ) << "Dead block: " << h;
for ( auto const& t : bc().transactions( h ) ) {
- LOG( m_loggerDetail ) << cc::debug( "Resubmitting dead-block transaction " )
- << Transaction( t, CheckTransaction::None );
- ctrace << cc::debug( "Resubmitting dead-block transaction " )
- << Transaction( t, CheckTransaction::None );
+ LOG( m_loggerTrace ) << "Resubmitting dead-block transaction "
+ << Transaction( t, CheckTransaction::None );
+ LOG( m_loggerTrace ) << "Resubmitting dead-block transaction "
+ << Transaction( t, CheckTransaction::None );
m_tq.import( t, IfDropped::Retry );
}
}
@@ -652,7 +653,7 @@ void Client::restartMining() {
DEV_READ_GUARDED( x_postSeal )
if ( !m_postSeal.isSealed() || m_postSeal.info().hash() != newPreMine.info().parentHash() )
for ( auto const& t : m_postSeal.pending() ) {
- LOG( m_loggerDetail ) << "Resubmitting post-seal transaction " << t;
+ LOG( m_loggerTrace ) << "Resubmitting post-seal transaction " << t;
// ctrace << "Resubmitting post-seal transaction " << t;
auto ir = m_tq.import( t, IfDropped::Retry );
if ( ir != ImportResult::Success )
@@ -700,7 +701,7 @@ bool Client::remoteActive() const {
}
void Client::onPostStateChanged() {
- LOG( m_loggerDetail ) << cc::notice( "Post state changed." );
+ LOG( m_loggerTrace ) << "Post state changed.";
m_signalled.notify_all();
m_remoteWorking = false;
}
@@ -708,12 +709,12 @@ void Client::onPostStateChanged() {
void Client::startSealing() {
if ( m_wouldSeal == true )
return;
- LOG( m_logger ) << "Client::startSealing: " << author();
+ LOG( m_loggerInfo ) << "Client::startSealing: " << author();
if ( author() ) {
m_wouldSeal = true;
m_signalled.notify_all();
} else
- LOG( m_logger ) << "You need to set an author in order to seal!";
+ LOG( m_loggerInfo ) << "You need to set an author in order to seal!";
}
void Client::rejigSealing() {
@@ -721,15 +722,15 @@ void Client::rejigSealing() {
if ( sealEngine()->shouldSeal( this ) ) {
m_wouldButShouldnot = false;
- LOG( m_loggerDetail ) << "Rejigging seal engine...";
+ LOG( m_loggerTrace ) << "Rejigging seal engine...";
DEV_WRITE_GUARDED( x_working ) {
if ( m_working.isSealed() ) {
- LOG( m_logger ) << "Tried to seal sealed block...";
+ LOG( m_loggerInfo ) << "Tried to seal sealed block...";
return;
}
// TODO is that needed? we have "Generating seal on" below
- LOG( m_loggerDetail ) << "Starting to seal block"
- << " #" << m_working.info().number();
+ LOG( m_loggerTrace ) << "Starting to seal block"
+ << " #" << m_working.info().number();
// TODO Deduplicate code
dev::h256 stateRootToSet;
@@ -756,15 +757,15 @@ void Client::rejigSealing() {
if ( wouldSeal() ) {
sealEngine()->onSealGenerated( [=]( bytes const& _header ) {
- LOG( m_logger ) << "Block sealed"
- << " #" << BlockHeader( _header, HeaderData ).number();
+ LOG( m_loggerInfo ) << "Block sealed"
+ << " #" << BlockHeader( _header, HeaderData ).number();
if ( this->submitSealed( _header ) )
m_onBlockSealed( _header );
else
- LOG( m_logger ) << "Submitting block failed...";
+ LOG( m_loggerInfo ) << "Submitting block failed...";
} );
- ctrace << "Generating seal on " << m_sealingInfo.hash( WithoutSeal ) << " #"
- << m_sealingInfo.number();
+ LOG( m_loggerTrace ) << "Generating seal on " << m_sealingInfo.hash( WithoutSeal )
+ << " #" << m_sealingInfo.number();
sealEngine()->generateSeal( m_sealingInfo );
}
} else
@@ -777,15 +778,15 @@ void Client::rejigSealing() {
void Client::sealUnconditionally( bool submitToBlockChain ) {
m_wouldButShouldnot = false;
- LOG( m_loggerDetail ) << "Rejigging seal engine...";
+ LOG( m_loggerTrace ) << "Rejigging seal engine...";
DEV_WRITE_GUARDED( x_working ) {
if ( m_working.isSealed() ) {
- LOG( m_logger ) << "Tried to seal sealed block...";
+ LOG( m_loggerInfo ) << "Tried to seal sealed block...";
return;
}
// TODO is that needed? we have "Generating seal on" below
- LOG( m_loggerDetail ) << "Starting to seal block"
- << " #" << m_working.info().number();
+ LOG( m_loggerTrace ) << "Starting to seal block"
+ << " #" << m_working.info().number();
// latest hash is really updated after NEXT snapshot already started hash computation
// TODO Deduplicate code
dev::h256 stateRootToSet;
@@ -819,13 +820,13 @@ void Client::sealUnconditionally( bool submitToBlockChain ) {
m_sealingInfo.streamRLP( headerRlp );
const bytes& header = headerRlp.out();
BlockHeader header_struct( header, HeaderData );
- LOG( m_logger ) << cc::success( "Block sealed" ) << " #" << cc::num10( header_struct.number() )
- << " (" << header_struct.hash() << ")";
+ LOG( m_loggerInfo ) << "Block sealed"
+ << " #" << header_struct.number() << " (" << header_struct.hash() << ")";
std::stringstream ssBlockStats;
- ssBlockStats << cc::success( "Block stats:" ) << "BN:" << number()
- << ":BTS:" << bc().info().timestamp() << ":TXS:" << TransactionBase::howMany()
- << ":HDRS:" << BlockHeader::howMany() << ":LOGS:" << LogEntry::howMany()
- << ":SENGS:" << SealEngineBase::howMany()
+ ssBlockStats << "Block stats:"
+ << "BN:" << number() << ":BTS:" << bc().info().timestamp()
+ << ":TXS:" << TransactionBase::howMany() << ":HDRS:" << BlockHeader::howMany()
+ << ":LOGS:" << LogEntry::howMany() << ":SENGS:" << SealEngineBase::howMany()
<< ":TXRS:" << TransactionReceipt::howMany() << ":BLCKS:" << Block::howMany()
<< ":ACCS:" << Account::howMany() << ":BQS:" << BlockQueue::howMany()
<< ":BDS:" << BlockDetails::howMany() << ":TSS:" << TransactionSkeleton::howMany()
@@ -837,14 +838,14 @@ void Client::sealUnconditionally( bool submitToBlockChain ) {
ssBlockStats << ":RAM:" << getRAMUsage();
ssBlockStats << ":CPU:" << getCPUUsage();
}
- LOG( m_logger ) << ssBlockStats.str();
+ LOG( m_loggerInfo ) << ssBlockStats.str();
if ( submitToBlockChain ) {
if ( this->submitSealed( header ) )
m_onBlockSealed( header );
else
- LOG( m_logger ) << cc::error( "Submitting block failed..." );
+ LOG( m_loggerInfo ) << "Submitting block failed...";
} else {
UpgradableGuard l( x_working );
{
@@ -852,7 +853,7 @@ void Client::sealUnconditionally( bool submitToBlockChain ) {
if ( m_working.sealBlock( header ) ) {
m_onBlockSealed( header );
} else {
- LOG( m_logger ) << cc::error( "Sealing block failed..." );
+ LOG( m_loggerInfo ) << "Sealing block failed...";
}
}
DEV_WRITE_GUARDED( x_postSeal )
@@ -870,16 +871,16 @@ void Client::importWorkingBlock() {
void Client::noteChanged( h256Hash const& _filters ) {
Guard l( x_filtersWatches );
if ( _filters.size() )
- LOG( m_loggerWatch ) << cc::notice( "noteChanged: " ) << filtersToString( _filters );
+ LOG( m_loggerWatch ) << "noteChanged: " << filtersToString( _filters );
// accrue all changes left in each filter into the watches.
for ( auto& w : m_watches )
if ( _filters.count( w.second.id ) ) {
if ( m_filters.count( w.second.id ) ) {
- LOG( m_loggerWatch ) << "!!! " << w.first << " " << w.second.id.abridged();
+ LOG( m_loggerWatch ) << w.first << " " << w.second.id.abridged();
w.second.append_changes( m_filters.at( w.second.id ).changes_ );
} else if ( m_specialFilters.count( w.second.id ) )
for ( h256 const& hash : m_specialFilters.at( w.second.id ) ) {
- LOG( m_loggerWatch ) << "!!! " << w.first << " "
+ LOG( m_loggerWatch ) << w.first << " "
<< ( w.second.id == PendingChangedFilter ? "pending" :
w.second.id == ChainChangedFilter ? "chain" :
"???" );
@@ -938,7 +939,7 @@ void Client::tick() {
m_bq.tick();
m_lastTick = chrono::system_clock::now();
if ( m_report.ticks == 15 )
- LOG( m_loggerDetail ) << activityReport();
+ LOG( m_loggerTrace ) << activityReport();
}
}
@@ -954,11 +955,11 @@ void Client::checkWatchGarbage() {
chrono::seconds( 20 ) ) // NB Was 200 for debugging. Normal value is 20!
{
toUninstall.push_back( key );
- LOG( m_loggerDetail ) << "GC: Uninstall " << key << " ("
- << chrono::duration_cast< chrono::seconds >(
- chrono::system_clock::now() - m_watches[key].lastPoll )
- .count()
- << " s old)";
+ LOG( m_loggerTrace ) << "GC: Uninstall " << key << " ("
+ << chrono::duration_cast< chrono::seconds >(
+ chrono::system_clock::now() - m_watches[key].lastPoll )
+ .count()
+ << " s old)";
}
for ( auto i : toUninstall )
uninstallWatch( i );
@@ -1176,12 +1177,12 @@ ExecutionResult Client::call( Address const& _from, u256 _value, Address _dest,
temp.mutableState().addBalance( _from, ( u256 )( t.gas() * t.gasPrice() + t.value() ) );
ret = temp.execute( bc().lastBlockHashes(), t, skale::Permanence::Reverted );
} catch ( InvalidNonce const& in ) {
- LOG( m_logger ) << "exception in client call(1):"
- << boost::current_exception_diagnostic_information() << std::endl;
+ LOG( m_loggerInfo ) << "exception in client call(1):"
+ << boost::current_exception_diagnostic_information();
throw std::runtime_error( "call with invalid nonce" );
} catch ( ... ) {
- LOG( m_logger ) << "exception in client call(2):"
- << boost::current_exception_diagnostic_information() << std::endl;
+ LOG( m_loggerInfo ) << "exception in client call(2):"
+ << boost::current_exception_diagnostic_information();
throw;
}
return ret;
diff --git a/libethereum/Client.h b/libethereum/Client.h
index e9f423bbc..89f6c2dcd 100644
--- a/libethereum/Client.h
+++ b/libethereum/Client.h
@@ -563,8 +563,10 @@ class Client : public ClientBase, protected Worker {
///< the DB
Signal< bytes const& > m_onBlockSealed; ///< Called if we have sealed a new block
- Logger m_logger{ createLogger( VerbosityInfo, "client" ) };
- Logger m_loggerDetail{ createLogger( VerbosityTrace, "client" ) };
+ mutable Logger m_loggerInfo{ createLogger( VerbosityInfo, "client" ) };
+ mutable Logger m_loggerTrace{ createLogger( VerbosityTrace, "client" ) };
+ mutable Logger m_loggerWarning{ createLogger( VerbosityWarning, "client" ) };
+ mutable Logger m_loggerError{ createLogger( VerbosityError, "client" ) };
SkaleDebugTracer m_debugTracer;
SkaleDebugInterface::handler m_debugHandler;
diff --git a/libethereum/ClientTest.cpp b/libethereum/ClientTest.cpp
index 85e9ebe58..1a44813f5 100644
--- a/libethereum/ClientTest.cpp
+++ b/libethereum/ClientTest.cpp
@@ -81,7 +81,7 @@ void ClientTest::modifyTimestamp( int64_t _timestamp ) {
}
bool ClientTest::mineBlocks( unsigned _count ) noexcept {
- std::cout << "mineBlocks begin " << _count << std::endl;
+ LOG( m_loggerDebug ) << "mineBlocks begin " << _count << "\n";
if ( wouldSeal() )
return false;
try {
@@ -97,10 +97,10 @@ bool ClientTest::mineBlocks( unsigned _count ) noexcept {
startSealing();
future_status ret = allBlocksImported.get_future().wait_for(
std::chrono::seconds( m_singleBlockMaxMiningTimeInSeconds * _count ) );
- std::cout << "mineBlocks end 0 is OK:" << ( int ) ret << std::endl;
+ LOG( m_loggerDebug ) << "mineBlocks end 0 is OK:" << ( int ) ret << "\n";
return ( ret == future_status::ready );
} catch ( std::exception const& ) {
- LOG( m_logger ) << boost::current_exception_diagnostic_information();
+ LOG( m_loggerDebug ) << boost::current_exception_diagnostic_information();
return false;
}
}
diff --git a/libethereum/ClientTest.h b/libethereum/ClientTest.h
index c58aeb76f..9dace0311 100644
--- a/libethereum/ClientTest.h
+++ b/libethereum/ClientTest.h
@@ -51,6 +51,9 @@ class ClientTest : public Client {
void rewindToBlock( unsigned _number );
h256 importRawBlock( std::string const& _blockRLP );
+private:
+ Logger m_loggerDebug{ createLogger( VerbosityDebug, "ClientTest" ) };
+
protected:
unsigned const m_singleBlockMaxMiningTimeInSeconds = 10;
};
diff --git a/libethereum/ConsensusStub.cpp b/libethereum/ConsensusStub.cpp
index 23215a836..d08a786e5 100644
--- a/libethereum/ConsensusStub.cpp
+++ b/libethereum/ConsensusStub.cpp
@@ -88,8 +88,8 @@ void ConsensusStub::doWork() {
if ( txns.size() == 0 ) // check for exit
return;
- std::cout << cc::debug( "Taken " ) << txns.size() << cc::debug( " transactions for consensus" )
- << std::endl;
+ LOG( m_loggerDebug ) << "Taken " << txns.size() << " transactions for consensus"
+ << "\n";
size_t txns_in_block = txns.size(); // rand()%txns.size();
// any subset but not zero
@@ -109,9 +109,10 @@ void ConsensusStub::doWork() {
++blockCounter;
m_extFace.createBlock( out_vector, time( NULL ), 0, blockCounter,
getPriceForBlockId( blockCounter ), stateRoot, -1 );
- std::cout << cc::debug( "createBlock" ) << std::endl;
+ LOG( m_loggerDebug ) << "createBlock"
+ << "\n";
} catch ( const dev::Exception& x ) {
- std::cout << x.what() << std::endl;
+ LOG( m_loggerDebug ) << x.what() << "\n";
} // catch
}
diff --git a/libethereum/ConsensusStub.h b/libethereum/ConsensusStub.h
index d102b78fc..a72c5953b 100644
--- a/libethereum/ConsensusStub.h
+++ b/libethereum/ConsensusStub.h
@@ -29,6 +29,7 @@ using namespace std;
#include
#include
#include
+#include
#include
#include