From 7cdf18acde243fa3a89134ad58cd1f115390daa2 Mon Sep 17 00:00:00 2001 From: Tudor Malene Date: Tue, 26 Sep 2023 13:12:01 +0100 Subject: [PATCH] improve logging (#1556) * improve logging * address pr comments --- go/enclave/components/batch_executor.go | 14 +- go/enclave/components/batch_registry.go | 2 +- go/enclave/components/block_processor.go | 6 +- go/enclave/components/rollup_consumer.go | 5 +- go/enclave/container/enclave_container.go | 2 +- go/enclave/core/utils.go | 66 ++++---- .../crosschain/block_message_extractor.go | 6 +- go/enclave/crosschain/message_bus_manager.go | 6 +- go/enclave/enclave.go | 22 +-- go/enclave/events/subscription_manager.go | 12 +- go/enclave/nodetype/sequencer.go | 5 +- go/enclave/storage/storage.go | 141 ++++++------------ go/host/enclave/guardian.go | 43 +++--- go/host/enclave/state.go | 2 +- go/host/events/logs.go | 14 +- go/host/host.go | 4 +- go/host/l1/blockrepository.go | 2 +- go/host/l1/publisher.go | 4 +- go/host/l2/batchrepository.go | 2 +- go/host/p2p/p2p.go | 19 ++- go/host/rpc/enclaverpc/enclave_client.go | 8 +- 21 files changed, 178 insertions(+), 207 deletions(-) diff --git a/go/enclave/components/batch_executor.go b/go/enclave/components/batch_executor.go index 71bda70edd..0b390dfc26 100644 --- a/go/enclave/components/batch_executor.go +++ b/go/enclave/components/batch_executor.go @@ -72,14 +72,14 @@ func (executor *batchExecutor) payL1Fees(stateDB *state.StateDB, context *BatchE for _, tx := range context.Transactions { sender, err := core.GetAuthenticatedSender(context.ChainConfig.ChainID.Int64(), tx) if err != nil { - executor.logger.Warn("Unable to extract sender for tx", log.TxKey, tx.Hash()) + executor.logger.Error("Unable to extract sender for tx. Should not happen at this point.", log.TxKey, tx.Hash(), log.ErrKey, err) continue } accBalance := stateDB.GetBalance(*sender) cost, err := executor.gasOracle.EstimateL1StorageGasCost(tx, block) if err != nil { - executor.logger.Warn("Unable to get gas cost for tx", log.TxKey, tx.Hash(), log.ErrKey, err) + executor.logger.Error("Unable to get gas cost for tx. Should not happen at this point.", log.TxKey, tx.Hash(), log.ErrKey, err) continue } @@ -108,7 +108,7 @@ func (executor *batchExecutor) payL1Fees(stateDB *state.StateDB, context *BatchE } func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*ComputedBatch, error) { - defer executor.logger.Info("Batch context processed", log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(executor.logger, measure.NewStopwatch(), "Batch context processed") // sanity check that the l1 block exists. We don't have to execute batches of forks. block, err := executor.storage.FetchBlock(context.BlockPtr) @@ -204,14 +204,14 @@ func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*Co return gethcommon.Hash{}, fmt.Errorf("commit failure for batch %d. Cause: %w", batch.SeqNo(), err) } trieDB := executor.storage.TrieDB() - err = trieDB.Commit(h, true) + err = trieDB.Commit(h, false) return h, err }, }, nil } func (executor *batchExecutor) ExecuteBatch(batch *core.Batch) (types.Receipts, error) { - defer executor.logger.Info("Executed batch", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(executor.logger, measure.NewStopwatch(), "Executed batch", log.BatchHashKey, batch.Hash()) // Validators recompute the entire batch using the same batch context // if they have all necessary prerequisites like having the l1 block processed @@ -316,8 +316,8 @@ func (executor *batchExecutor) populateOutboundCrossChainData(batch *core.Batch, valueTransferMessages, err := executor.crossChainProcessors.Local.ExtractOutboundTransfers(receipts) if err != nil { - executor.logger.Error("Extracting messages L2->L1 failed", log.ErrKey, err, log.CmpKey, log.CrossChainCmp) - return fmt.Errorf("could not extract cross chain messages. Cause: %w", err) + executor.logger.Error("Failed extracting L2->L1 messages value transfers", log.ErrKey, err, log.CmpKey, log.CrossChainCmp) + return fmt.Errorf("could not extract cross chain value transfers. Cause: %w", err) } transfersHash := types.DeriveSha(ValueTransfers(valueTransferMessages), &trie.StackTrie{}) diff --git a/go/enclave/components/batch_registry.go b/go/enclave/components/batch_registry.go index 148f98685c..da212ac713 100644 --- a/go/enclave/components/batch_registry.go +++ b/go/enclave/components/batch_registry.go @@ -70,7 +70,7 @@ func (br *batchRegistry) OnBatchExecuted(batch *core.Batch, receipts types.Recei br.callbackMutex.RLock() defer br.callbackMutex.RUnlock() - defer br.logger.Debug("Sending batch and events", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(br.logger, measure.NewStopwatch(), "Sending batch and events", log.BatchHashKey, batch.Hash()) br.headBatchSeq = batch.SeqNo() if br.batchesCallback != nil { diff --git a/go/enclave/components/block_processor.go b/go/enclave/components/block_processor.go index 6b73e93194..e8f27c54a8 100644 --- a/go/enclave/components/block_processor.go +++ b/go/enclave/components/block_processor.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/obscuronet/go-obscuro/go/enclave/gas" "github.com/obscuronet/go-obscuro/go/enclave/storage" @@ -34,7 +36,7 @@ func NewBlockProcessor(storage storage.Storage, cc *crosschain.Processors, gasOr } func (bp *l1BlockProcessor) Process(br *common.BlockAndReceipts) (*BlockIngestionType, error) { - defer bp.logger.Info("L1 block processed", log.BlockHashKey, br.Block.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(bp.logger, measure.NewStopwatch(), "L1 block processed", log.BlockHashKey, br.Block.Hash()) ingestion, err := bp.tryAndInsertBlock(br) if err != nil { @@ -78,7 +80,7 @@ func (bp *l1BlockProcessor) tryAndInsertBlock(br *common.BlockAndReceipts) (*Blo // Do not store the block if the L1 chain insertion failed return nil, err } - bp.logger.Trace("block inserted successfully", + bp.logger.Trace("Block inserted successfully", log.BlockHeightKey, block.NumberU64(), log.BlockHashKey, block.Hash(), "ingestionType", ingestionType) err = bp.storage.StoreBlock(block, ingestionType.ChainFork) diff --git a/go/enclave/components/rollup_consumer.go b/go/enclave/components/rollup_consumer.go index 8cc644798d..398939a84f 100644 --- a/go/enclave/components/rollup_consumer.go +++ b/go/enclave/components/rollup_consumer.go @@ -3,6 +3,8 @@ package components import ( "fmt" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/obscuronet/go-obscuro/go/enclave/storage" "github.com/obscuronet/go-obscuro/go/common/measure" @@ -45,8 +47,7 @@ func NewRollupConsumer( } func (rc *rollupConsumerImpl) ProcessRollupsInBlock(b *common.BlockAndReceipts) error { - stopwatch := measure.NewStopwatch() - defer rc.logger.Info("Rollup consumer processed block", log.BlockHashKey, b.Block.Hash(), log.DurationKey, stopwatch) + defer core.LogMethodDuration(rc.logger, measure.NewStopwatch(), "Rollup consumer processed block", log.BlockHashKey, b.Block.Hash()) rollups := rc.extractRollups(b) if len(rollups) == 0 { diff --git a/go/enclave/container/enclave_container.go b/go/enclave/container/enclave_container.go index 72b259415c..8302eaaa6c 100644 --- a/go/enclave/container/enclave_container.go +++ b/go/enclave/container/enclave_container.go @@ -37,7 +37,7 @@ func (e *EnclaveContainer) Start() error { func (e *EnclaveContainer) Stop() error { _, err := e.RPCServer.Stop(context.Background(), nil) if err != nil { - e.Logger.Warn("unable to cleanly stop enclave", log.ErrKey, err) + e.Logger.Error("Unable to cleanly stop enclave", log.ErrKey, err) return err } return nil diff --git a/go/enclave/core/utils.go b/go/enclave/core/utils.go index 37fef7fb30..0bd8844fda 100644 --- a/go/enclave/core/utils.go +++ b/go/enclave/core/utils.go @@ -4,41 +4,13 @@ import ( "fmt" "math/big" - "github.com/ethereum/go-ethereum/core/types" - - "github.com/obscuronet/go-obscuro/go/common" - gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/obscuronet/go-obscuro/go/common/log" + "github.com/obscuronet/go-obscuro/go/common/measure" ) -func MakeMap(txs []*common.L2Tx) map[gethcommon.Hash]*common.L2Tx { - m := make(map[gethcommon.Hash]*common.L2Tx) - for _, tx := range txs { - m[tx.Hash()] = tx - } - return m -} - -func ToMap(txs []*common.L2Tx) map[gethcommon.Hash]gethcommon.Hash { - m := make(map[gethcommon.Hash]gethcommon.Hash) - for _, tx := range txs { - m[tx.Hash()] = tx.Hash() - } - return m -} - -func PrintTxs(txs []*common.L2Tx) (txsString []string) { - for _, t := range txs { - txsString = printTx(t, txsString) - } - return txsString -} - -func printTx(t *common.L2Tx, txsString []string) []string { - txsString = append(txsString, fmt.Sprintf("%s,", t.Hash().Hex())) - return txsString -} - // VerifySignature - Checks that the L2Tx has a valid signature. func VerifySignature(chainID int64, tx *types.Transaction) error { signer := types.NewLondonSigner(big.NewInt(chainID)) @@ -55,3 +27,33 @@ func GetAuthenticatedSender(chainID int64, tx *types.Transaction) (*gethcommon.A } return &sender, nil } + +const ( + // log level for requests that take longer than this threshold in millis + _errorThreshold = 500 + _warnThreshold = 200 + _infoThreshold = 100 + _debugThreshold = 50 +) + +// LogMethodDuration - call only with "defer" +func LogMethodDuration(logger gethlog.Logger, stopWatch *measure.Stopwatch, msg string, args ...any) { + var f func(msg string, ctx ...interface{}) + durationMillis := stopWatch.Measure().Milliseconds() + + // we adjust the logging level based on the time + switch { + case durationMillis > _errorThreshold: + f = logger.Error + case durationMillis > _warnThreshold: + f = logger.Warn + case durationMillis > _infoThreshold: + f = logger.Info + case durationMillis > _debugThreshold: + f = logger.Debug + default: + f = logger.Trace + } + newArgs := append([]any{log.DurationKey, stopWatch}, args...) + f(fmt.Sprintf("LogMethodDuration::%s", msg), newArgs...) +} diff --git a/go/enclave/crosschain/block_message_extractor.go b/go/enclave/crosschain/block_message_extractor.go index 7b3c173e5a..ad5d09df2d 100644 --- a/go/enclave/crosschain/block_message_extractor.go +++ b/go/enclave/crosschain/block_message_extractor.go @@ -3,6 +3,8 @@ package crosschain import ( "fmt" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/obscuronet/go-obscuro/go/enclave/storage" gethcommon "github.com/ethereum/go-ethereum/common" @@ -35,7 +37,7 @@ func (m *blockMessageExtractor) Enabled() bool { } func (m *blockMessageExtractor) StoreCrossChainValueTransfers(block *common.L1Block, receipts common.L1Receipts) error { - defer m.logger.Info("Block value transfer messages processed", log.BlockHashKey, block.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(m.logger, measure.NewStopwatch(), "Block value transfer messages processed", log.BlockHashKey, block.Hash()) /*areReceiptsValid := common.VerifyReceiptHash(block, receipts) @@ -74,7 +76,7 @@ func (m *blockMessageExtractor) StoreCrossChainValueTransfers(block *common.L1Bl // block - the L1 block for which events are extracted. // receipts - all of the receipts for the corresponding block. This is validated. func (m *blockMessageExtractor) StoreCrossChainMessages(block *common.L1Block, receipts common.L1Receipts) error { - defer m.logger.Info("Block cross chain messages processed", log.BlockHashKey, block.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(m.logger, measure.NewStopwatch(), "Block cross chain messages processed", log.BlockHashKey, block.Hash()) if len(receipts) == 0 { return nil diff --git a/go/enclave/crosschain/message_bus_manager.go b/go/enclave/crosschain/message_bus_manager.go index 533369b7d2..e0eddfbe89 100644 --- a/go/enclave/crosschain/message_bus_manager.go +++ b/go/enclave/crosschain/message_bus_manager.go @@ -183,7 +183,11 @@ func (m *MessageBusManager) RetrieveInboundMessages(fromBlock *common.L1Block, t b = p } - m.logger.Info(fmt.Sprintf("Extracted cross chain messages for block height %d ->%d: %d.", fromBlock.NumberU64(), toBlock.NumberU64(), len(messages))) + logf := m.logger.Info + if len(messages)+len(transfers) == 0 { + logf = m.logger.Debug + } + logf(fmt.Sprintf("Extracted cross chain messages for block height %d ->%d", fromBlock.NumberU64(), toBlock.NumberU64()), "no_msgs", len(messages), "no_value_transfers", len(transfers)) return messages, transfers } diff --git a/go/enclave/enclave.go b/go/enclave/enclave.go index 1d07fa3755..7e6d578e25 100644 --- a/go/enclave/enclave.go +++ b/go/enclave/enclave.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "github.com/obscuronet/go-obscuro/go/common/measure" + "github.com/obscuronet/go-obscuro/go/enclave/gas" "github.com/obscuronet/go-obscuro/go/enclave/storage" @@ -363,9 +365,10 @@ func (e *enclaveImpl) StopClient() common.SystemError { } func (e *enclaveImpl) sendBatch(batch *core.Batch, outChannel chan common.StreamL2UpdatesResponse) { - e.logger.Info("Streaming batch to client", log.BatchHashKey, batch.Hash()) + e.logger.Info("Streaming batch to host", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo()) extBatch, err := batch.ToExtBatch(e.dataEncryptionService, e.dataCompressionService) if err != nil { + // this error is unrecoverable e.logger.Crit("failed to convert batch", log.ErrKey, err) } resp := common.StreamL2UpdatesResponse{ @@ -377,7 +380,7 @@ func (e *enclaveImpl) sendBatch(batch *core.Batch, outChannel chan common.Stream // this function is only called when the executed batch is the new head func (e *enclaveImpl) streamEventsForNewHeadBatch(batch *core.Batch, receipts types.Receipts, outChannel chan common.StreamL2UpdatesResponse) { logs, err := e.subscriptionManager.GetSubscribedLogsForBatch(batch, receipts) - e.logger.Info("Stream Events for", log.BatchHashKey, batch.Hash(), "nr_events", len(logs)) + e.logger.Debug("Stream Events for", log.BatchHashKey, batch.Hash(), "nr_events", len(logs)) if err != nil { e.logger.Error("Error while getting subscription logs", log.ErrKey, err) return @@ -449,7 +452,7 @@ func (e *enclaveImpl) ingestL1Block(br *common.BlockAndReceipts) (*components.Bl if err != nil { // only warn for unexpected errors if errors.Is(err, errutil.ErrBlockAncestorNotFound) || errors.Is(err, errutil.ErrBlockAlreadyProcessed) { - e.logger.Debug("Failed ingesting block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash()) + e.logger.Debug("Did not ingest block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash()) } else { e.logger.Warn("Failed ingesting block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash()) } @@ -547,12 +550,9 @@ func (e *enclaveImpl) SubmitBatch(extBatch *common.ExtBatch) common.SystemError return responses.ToInternalError(fmt.Errorf("requested SubmitBatch with the enclave stopping")) } - callStart := time.Now() - defer func() { - e.logger.Info("SubmitBatch call completed.", "start", callStart, log.DurationKey, time.Since(callStart), log.BatchHashKey, extBatch.Hash()) - }() + core.LogMethodDuration(e.logger, measure.NewStopwatch(), "SubmitBatch call completed.", log.BatchHashKey, extBatch.Hash()) - e.logger.Info("SubmitBatch", log.BatchHeightKey, extBatch.Header.Number, log.BatchHashKey, extBatch.Hash(), "l1", extBatch.Header.L1Proof) + e.logger.Info("Received new p2p batch", log.BatchHeightKey, extBatch.Header.Number, log.BatchHashKey, extBatch.Hash(), "l1", extBatch.Header.L1Proof) batch, err := core.ToBatch(extBatch, e.dataEncryptionService, e.dataCompressionService) if err != nil { return responses.ToInternalError(fmt.Errorf("could not convert batch. Cause: %w", err)) @@ -988,7 +988,7 @@ func (e *enclaveImpl) Stop() common.SystemError { if e.profiler != nil { if err := e.profiler.Stop(); err != nil { - e.logger.Error("Could not profiler", log.ErrKey, err) + e.logger.Error("Could not stop profiler", log.ErrKey, err) return err } } @@ -1212,14 +1212,14 @@ func (e *enclaveImpl) DoEstimateGas(args *gethapi.TransactionArgs, blkNumber *ge if transfer == nil { transfer = new(hexutil.Big) } - e.logger.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + e.logger.Debug("Gas estimation capped by limited funds", "original", hi, "balance", balance, "sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance) hi = allowance.Uint64() } } // Recap the highest gas allowance with specified gascap. if gasCap != 0 && hi > gasCap { - e.logger.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) + e.logger.Debug("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) hi = gasCap } cap = hi //nolint: revive diff --git a/go/enclave/events/subscription_manager.go b/go/enclave/events/subscription_manager.go index c35db7f0b3..a66c9bf35f 100644 --- a/go/enclave/events/subscription_manager.go +++ b/go/enclave/events/subscription_manager.go @@ -151,7 +151,7 @@ func (s *SubscriptionManager) GetSubscribedLogsForBatch(batch *core.Batch, recei if relevant { relevantLogsForSub = append(relevantLogsForSub, logItem) } - s.logger.Info(fmt.Sprintf("Subscription %s. Account %s. Log %v. Extracted addresses: %v. Relevant: %t", id, sub.Account, logItem, userAddrs, relevant)) + s.logger.Debug(fmt.Sprintf("Subscription %s. Account %s. Log %v. Extracted addresses: %v. Relevant: %t", id, sub.Account, logItem, userAddrs, relevant)) } if len(relevantLogsForSub) > 0 { relevantLogsPerSubscription[id] = relevantLogsForSub @@ -237,21 +237,21 @@ func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []geth Logs: for _, logItem := range logs { if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > logItem.BlockNumber { - logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the past. The starting block num for filter is bigger than log") + logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the past. The starting block num for filter is bigger than log") continue } if toBlock != nil && toBlock.Int64() > 0 && toBlock.Uint64() < logItem.BlockNumber { - logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the future. The ending block num for filter is smaller than log") + logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the future. The ending block num for filter is smaller than log") continue } if len(addresses) > 0 && !includes(addresses, logItem.Address) { - logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "The contract address of the log is not an address of interest") + logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "The contract address of the log is not an address of interest") continue } // If the to filtered topics is greater than the amount of topics in logs, skip. if len(topics) > len(logItem.Topics) { - logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Insufficient topics. The log has less topics than the required one to satisfy the query") + logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Insufficient topics. The log has less topics than the required one to satisfy the query") continue } for i, sub := range topics { @@ -263,7 +263,7 @@ Logs: } } if !match { - logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Topics do not match.") + logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Topics do not match.") continue Logs } } diff --git a/go/enclave/nodetype/sequencer.go b/go/enclave/nodetype/sequencer.go index e50dc5a10f..48207eabee 100644 --- a/go/enclave/nodetype/sequencer.go +++ b/go/enclave/nodetype/sequencer.go @@ -230,7 +230,7 @@ func (s *sequencer) produceBatch(sequencerNo *big.Int, l1Hash common.L1BlockHash // StoreExecutedBatch - stores an executed batch in one go. This can be done for the sequencer because it is guaranteed // that all dependencies are in place for the execution to be successful. func (s *sequencer) StoreExecutedBatch(batch *core.Batch, receipts types.Receipts) error { - defer s.logger.Info("Registry StoreBatch() exit", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(s.logger, measure.NewStopwatch(), "Registry StoreBatch() exit", log.BatchHashKey, batch.Hash()) // Check if this batch is already stored. if _, err := s.storage.FetchBatchHeader(batch.Hash()); err == nil { @@ -268,8 +268,6 @@ func (s *sequencer) CreateRollup(lastBatchNo uint64) (*common.ExtRollup, error) return nil, fmt.Errorf("failed to sign created rollup: %w", err) } - s.logger.Info("Created new head rollup", log.RollupHashKey, rollup.Hash(), "numBatches", len(rollup.Batches)) - return s.rollupCompression.CreateExtRollup(rollup) } @@ -339,6 +337,7 @@ func (s *sequencer) OnL1Fork(fork *common.ChainFork) error { rollup, err := s.storage.FetchReorgedRollup(fork.NonCanonicalPath) if err == nil { s.logger.Error("Reissue rollup", log.RollupHashKey, rollup) + // todo - tudor - finalise the logic to reissue a rollup when the block used for compression was reorged return nil } if !errors.Is(err, errutil.ErrNotFound) { diff --git a/go/enclave/storage/storage.go b/go/enclave/storage/storage.go index e8d627c65b..6560781007 100644 --- a/go/enclave/storage/storage.go +++ b/go/enclave/storage/storage.go @@ -9,6 +9,8 @@ import ( "math/big" "time" + "github.com/obscuronet/go-obscuro/go/common/measure" + "github.com/allegro/bigcache/v3" "github.com/eko/gocache/lib/v4/cache" bigcache_store "github.com/eko/gocache/store/bigcache/v4" @@ -107,28 +109,24 @@ func (s *storageImpl) Close() error { } func (s *storageImpl) FetchHeadBatch() (*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchHeadBatch", callStart) + defer s.logDuration("FetchHeadBatch", measure.NewStopwatch()) return enclavedb.ReadCurrentHeadBatch(s.db.GetSQLDB()) } func (s *storageImpl) FetchCurrentSequencerNo() (*big.Int, error) { - callStart := time.Now() - defer s.logDuration("FetchCurrentSequencerNo", callStart) + defer s.logDuration("FetchCurrentSequencerNo", measure.NewStopwatch()) return enclavedb.ReadCurrentSequencerNo(s.db.GetSQLDB()) } func (s *storageImpl) FetchBatch(hash common.L2BatchHash) (*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchBatch", callStart) + defer s.logDuration("FetchBatch", measure.NewStopwatch()) return getCachedValue(s.batchCache, s.logger, hash, func(v any) (*core.Batch, error) { return enclavedb.ReadBatchByHash(s.db.GetSQLDB(), v.(common.L2BatchHash)) }) } func (s *storageImpl) FetchBatchHeader(hash common.L2BatchHash) (*common.BatchHeader, error) { - callStart := time.Now() - defer s.logDuration("FetchBatchHeader", callStart) + defer s.logDuration("FetchBatchHeader", measure.NewStopwatch()) b, err := s.FetchBatch(hash) if err != nil { return nil, err @@ -137,14 +135,12 @@ func (s *storageImpl) FetchBatchHeader(hash common.L2BatchHash) (*common.BatchHe } func (s *storageImpl) FetchBatchByHeight(height uint64) (*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchBatchByHeight", callStart) + defer s.logDuration("FetchBatchByHeight", measure.NewStopwatch()) return enclavedb.ReadCanonicalBatchByHeight(s.db.GetSQLDB(), height) } func (s *storageImpl) StoreBlock(b *types.Block, chainFork *common.ChainFork) error { - callStart := time.Now() - defer s.logDuration("StoreBlock", callStart) + defer s.logDuration("StoreBlock", measure.NewStopwatch()) dbTransaction := s.db.NewDBTransaction() if chainFork != nil && chainFork.IsFork() { s.logger.Info(fmt.Sprintf("Fork. %s", chainFork)) @@ -168,16 +164,14 @@ func (s *storageImpl) StoreBlock(b *types.Block, chainFork *common.ChainFork) er } func (s *storageImpl) FetchBlock(blockHash common.L1BlockHash) (*types.Block, error) { - callStart := time.Now() - defer s.logDuration("FetchBlock", callStart) + defer s.logDuration("FetchBlock", measure.NewStopwatch()) return getCachedValue(s.blockCache, s.logger, blockHash, func(hash any) (*types.Block, error) { return enclavedb.FetchBlock(s.db.GetSQLDB(), hash.(common.L1BlockHash)) }) } func (s *storageImpl) FetchCanonicaBlockByHeight(height *big.Int) (*types.Block, error) { - callStart := time.Now() - defer s.logDuration("FetchCanonicaBlockByHeight", callStart) + defer s.logDuration("FetchCanonicaBlockByHeight", measure.NewStopwatch()) header, err := enclavedb.FetchBlockHeaderByHeight(s.db.GetSQLDB(), height) if err != nil { return nil, err @@ -189,14 +183,12 @@ func (s *storageImpl) FetchCanonicaBlockByHeight(height *big.Int) (*types.Block, } func (s *storageImpl) FetchHeadBlock() (*types.Block, error) { - callStart := time.Now() - defer s.logDuration("FetchHeadBlock", callStart) + defer s.logDuration("FetchHeadBlock", measure.NewStopwatch()) return enclavedb.FetchHeadBlock(s.db.GetSQLDB()) } func (s *storageImpl) StoreSecret(secret crypto.SharedEnclaveSecret) error { - callStart := time.Now() - defer s.logDuration("StoreSecret", callStart) + defer s.logDuration("StoreSecret", measure.NewStopwatch()) enc, err := rlp.EncodeToBytes(secret) if err != nil { return fmt.Errorf("could not encode shared secret. Cause: %w", err) @@ -209,8 +201,7 @@ func (s *storageImpl) StoreSecret(secret crypto.SharedEnclaveSecret) error { } func (s *storageImpl) FetchSecret() (*crypto.SharedEnclaveSecret, error) { - callStart := time.Now() - defer s.logDuration("FetchSecret", callStart) + defer s.logDuration("FetchSecret", measure.NewStopwatch()) var ss crypto.SharedEnclaveSecret cfg, err := enclavedb.FetchConfig(s.db.GetSQLDB(), masterSeedCfg) @@ -225,8 +216,7 @@ func (s *storageImpl) FetchSecret() (*crypto.SharedEnclaveSecret, error) { } func (s *storageImpl) IsAncestor(block *types.Block, maybeAncestor *types.Block) bool { - callStart := time.Now() - defer s.logDuration("IsAncestor", callStart) + defer s.logDuration("IsAncestor", measure.NewStopwatch()) if bytes.Equal(maybeAncestor.Hash().Bytes(), block.Hash().Bytes()) { return true } @@ -245,8 +235,7 @@ func (s *storageImpl) IsAncestor(block *types.Block, maybeAncestor *types.Block) } func (s *storageImpl) IsBlockAncestor(block *types.Block, maybeAncestor common.L1BlockHash) bool { - callStart := time.Now() - defer s.logDuration("IsBlockAncestor", callStart) + defer s.logDuration("IsBlockAncestor", measure.NewStopwatch()) resolvedBlock, err := s.FetchBlock(maybeAncestor) if err != nil { return false @@ -255,8 +244,7 @@ func (s *storageImpl) IsBlockAncestor(block *types.Block, maybeAncestor common.L } func (s *storageImpl) HealthCheck() (bool, error) { - callStart := time.Now() - defer s.logDuration("HealthCheck", callStart) + defer s.logDuration("HealthCheck", measure.NewStopwatch()) headBatch, err := s.FetchHeadBatch() if err != nil { s.logger.Info("HealthCheck failed for enclave storage", log.ErrKey, err) @@ -266,14 +254,12 @@ func (s *storageImpl) HealthCheck() (bool, error) { } func (s *storageImpl) FetchHeadBatchForBlock(blockHash common.L1BlockHash) (*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchHeadBatchForBlock", callStart) + defer s.logDuration("FetchHeadBatchForBlock", measure.NewStopwatch()) return enclavedb.ReadHeadBatchForBlock(s.db.GetSQLDB(), blockHash) } func (s *storageImpl) CreateStateDB(hash common.L2BatchHash) (*state.StateDB, error) { - callStart := time.Now() - defer s.logDuration("CreateStateDB", callStart) + defer s.logDuration("CreateStateDB", measure.NewStopwatch()) batch, err := s.FetchBatch(hash) if err != nil { return nil, err @@ -288,8 +274,7 @@ func (s *storageImpl) CreateStateDB(hash common.L2BatchHash) (*state.StateDB, er } func (s *storageImpl) EmptyStateDB() (*state.StateDB, error) { - callStart := time.Now() - defer s.logDuration("EmptyStateDB", callStart) + defer s.logDuration("EmptyStateDB", measure.NewStopwatch()) statedb, err := state.New(types.EmptyRootHash, s.stateDB, nil) if err != nil { return nil, fmt.Errorf("could not create state DB. Cause: %w", err) @@ -299,32 +284,27 @@ func (s *storageImpl) EmptyStateDB() (*state.StateDB, error) { // GetReceiptsByBatchHash retrieves the receipts for all transactions in a given batch. func (s *storageImpl) GetReceiptsByBatchHash(hash gethcommon.Hash) (types.Receipts, error) { - callStart := time.Now() - defer s.logDuration("GetReceiptsByBatchHash", callStart) + defer s.logDuration("GetReceiptsByBatchHash", measure.NewStopwatch()) return enclavedb.ReadReceiptsByBatchHash(s.db.GetSQLDB(), hash, s.chainConfig) } func (s *storageImpl) GetTransaction(txHash gethcommon.Hash) (*types.Transaction, gethcommon.Hash, uint64, uint64, error) { - callStart := time.Now() - defer s.logDuration("GetTransaction", callStart) + defer s.logDuration("GetTransaction", measure.NewStopwatch()) return enclavedb.ReadTransaction(s.db.GetSQLDB(), txHash) } func (s *storageImpl) GetContractCreationTx(address gethcommon.Address) (*gethcommon.Hash, error) { - callStart := time.Now() - defer s.logDuration("GetContractCreationTx", callStart) + defer s.logDuration("GetContractCreationTx", measure.NewStopwatch()) return enclavedb.GetContractCreationTx(s.db.GetSQLDB(), address) } func (s *storageImpl) GetTransactionReceipt(txHash gethcommon.Hash) (*types.Receipt, error) { - callStart := time.Now() - defer s.logDuration("GetTransactionReceipt", callStart) + defer s.logDuration("GetTransactionReceipt", measure.NewStopwatch()) return enclavedb.ReadReceipt(s.db.GetSQLDB(), txHash, s.chainConfig) } func (s *storageImpl) FetchAttestedKey(address gethcommon.Address) (*ecdsa.PublicKey, error) { - callStart := time.Now() - defer s.logDuration("FetchAttestedKey", callStart) + defer s.logDuration("FetchAttestedKey", measure.NewStopwatch()) key, err := enclavedb.FetchAttKey(s.db.GetSQLDB(), address) if err != nil { return nil, fmt.Errorf("could not retrieve attestation key for address %s. Cause: %w", address, err) @@ -339,29 +319,25 @@ func (s *storageImpl) FetchAttestedKey(address gethcommon.Address) (*ecdsa.Publi } func (s *storageImpl) StoreAttestedKey(aggregator gethcommon.Address, key *ecdsa.PublicKey) error { - callStart := time.Now() - defer s.logDuration("StoreAttestedKey", callStart) + defer s.logDuration("StoreAttestedKey", measure.NewStopwatch()) _, err := enclavedb.WriteAttKey(s.db.GetSQLDB(), aggregator, gethcrypto.CompressPubkey(key)) return err } func (s *storageImpl) FetchBatchBySeqNo(seqNum uint64) (*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchBatchBySeqNo", callStart) + defer s.logDuration("FetchBatchBySeqNo", measure.NewStopwatch()) return getCachedValue(s.batchCache, s.logger, seqNum, func(seq any) (*core.Batch, error) { return enclavedb.ReadBatchBySeqNo(s.db.GetSQLDB(), seq.(uint64)) }) } func (s *storageImpl) FetchBatchesByBlock(block common.L1BlockHash) ([]*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchBatchesByBlock", callStart) + defer s.logDuration("FetchBatchesByBlock", measure.NewStopwatch()) return enclavedb.ReadBatchesByBlock(s.db.GetSQLDB(), block) } func (s *storageImpl) StoreBatch(batch *core.Batch) error { - callStart := time.Now() - defer s.logDuration("StoreBatch", callStart) + defer s.logDuration("StoreBatch", measure.NewStopwatch()) // sanity check that this is not overlapping existingBatchWithSameSequence, _ := s.FetchBatchBySeqNo(batch.SeqNo().Uint64()) if existingBatchWithSameSequence != nil && existingBatchWithSameSequence.Hash() != batch.Hash() { @@ -390,8 +366,7 @@ func (s *storageImpl) StoreBatch(batch *core.Batch) error { } func (s *storageImpl) StoreExecutedBatch(batch *core.Batch, receipts []*types.Receipt) error { - callStart := time.Now() - defer s.logDuration("StoreExecutedBatch", callStart) + defer s.logDuration("StoreExecutedBatch", measure.NewStopwatch()) executed, err := enclavedb.BatchWasExecuted(s.db.GetSQLDB(), batch.Hash()) if err != nil { return err @@ -430,14 +405,12 @@ func (s *storageImpl) StoreValueTransfers(blockHash common.L1BlockHash, transfer } func (s *storageImpl) StoreL1Messages(blockHash common.L1BlockHash, messages common.CrossChainMessages) error { - callStart := time.Now() - defer s.logDuration("StoreL1Messages", callStart) + defer s.logDuration("StoreL1Messages", measure.NewStopwatch()) return enclavedb.WriteL1Messages(s.db.GetSQLDB(), blockHash, messages, false) } func (s *storageImpl) GetL1Messages(blockHash common.L1BlockHash) (common.CrossChainMessages, error) { - callStart := time.Now() - defer s.logDuration("GetL1Messages", callStart) + defer s.logDuration("GetL1Messages", measure.NewStopwatch()) return enclavedb.FetchL1Messages[common.CrossChainMessage](s.db.GetSQLDB(), blockHash, false) } @@ -448,8 +421,7 @@ func (s *storageImpl) GetL1Transfers(blockHash common.L1BlockHash) (common.Value const enclaveKeyKey = "ek" func (s *storageImpl) StoreEnclaveKey(enclaveKey *ecdsa.PrivateKey) error { - callStart := time.Now() - defer s.logDuration("StoreEnclaveKey", callStart) + defer s.logDuration("StoreEnclaveKey", measure.NewStopwatch()) if enclaveKey == nil { return errors.New("enclaveKey cannot be nil") } @@ -460,8 +432,7 @@ func (s *storageImpl) StoreEnclaveKey(enclaveKey *ecdsa.PrivateKey) error { } func (s *storageImpl) GetEnclaveKey() (*ecdsa.PrivateKey, error) { - callStart := time.Now() - defer s.logDuration("GetEnclaveKey", callStart) + defer s.logDuration("GetEnclaveKey", measure.NewStopwatch()) keyBytes, err := enclavedb.FetchConfig(s.db.GetSQLDB(), enclaveKeyKey) if err != nil { return nil, err @@ -474,8 +445,7 @@ func (s *storageImpl) GetEnclaveKey() (*ecdsa.PrivateKey, error) { } func (s *storageImpl) StoreRollup(rollup *common.ExtRollup, internalHeader *common.CalldataRollupHeader) error { - callStart := time.Now() - defer s.logDuration("StoreRollup", callStart) + defer s.logDuration("StoreRollup", measure.NewStopwatch()) dbBatch := s.db.NewDBTransaction() if err := enclavedb.WriteRollup(dbBatch, rollup.Header, internalHeader); err != nil { @@ -493,8 +463,7 @@ func (s *storageImpl) FetchReorgedRollup(reorgedBlocks []common.L1BlockHash) (*c } func (s *storageImpl) DebugGetLogs(txHash common.TxHash) ([]*tracers.DebugLogs, error) { - callStart := time.Now() - defer s.logDuration("DebugGetLogs", callStart) + defer s.logDuration("DebugGetLogs", measure.NewStopwatch()) return enclavedb.DebugGetLogs(s.db.GetSQLDB(), txHash) } @@ -505,65 +474,45 @@ func (s *storageImpl) FilterLogs( addresses []gethcommon.Address, topics [][]gethcommon.Hash, ) ([]*types.Log, error) { - callStart := time.Now() - defer s.logDuration("FilterLogs", callStart) + defer s.logDuration("FilterLogs", measure.NewStopwatch()) return enclavedb.FilterLogs(s.db.GetSQLDB(), requestingAccount, fromBlock, toBlock, blockHash, addresses, topics) } func (s *storageImpl) GetContractCount() (*big.Int, error) { - callStart := time.Now() - defer s.logDuration("GetContractCount", callStart) + defer s.logDuration("GetContractCount", measure.NewStopwatch()) return enclavedb.ReadContractCreationCount(s.db.GetSQLDB()) } func (s *storageImpl) FetchCanonicalUnexecutedBatches(from *big.Int) ([]*core.Batch, error) { - callStart := time.Now() - defer s.logDuration("FetchCanonicalUnexecutedBatches", callStart) + defer s.logDuration("FetchCanonicalUnexecutedBatches", measure.NewStopwatch()) return enclavedb.ReadUnexecutedBatches(s.db.GetSQLDB(), from) } func (s *storageImpl) BatchWasExecuted(hash common.L2BatchHash) (bool, error) { - callStart := time.Now() - defer s.logDuration("BatchWasExecuted", callStart) + defer s.logDuration("BatchWasExecuted", measure.NewStopwatch()) return enclavedb.BatchWasExecuted(s.db.GetSQLDB(), hash) } func (s *storageImpl) GetReceiptsPerAddress(address *gethcommon.Address, pagination *common.QueryPagination) (types.Receipts, error) { - callStart := time.Now() - defer s.logDuration("GetReceiptsPerAddress", callStart) + defer s.logDuration("GetReceiptsPerAddress", measure.NewStopwatch()) return enclavedb.GetReceiptsPerAddress(s.db.GetSQLDB(), s.chainConfig, address, pagination) } func (s *storageImpl) GetReceiptsPerAddressCount(address *gethcommon.Address) (uint64, error) { - callStart := time.Now() - defer s.logDuration("GetReceiptsPerAddressCount", callStart) + defer s.logDuration("GetReceiptsPerAddressCount", measure.NewStopwatch()) return enclavedb.GetReceiptsPerAddressCount(s.db.GetSQLDB(), address) } func (s *storageImpl) GetPublicTransactionData(pagination *common.QueryPagination) ([]common.PublicTransaction, error) { - callStart := time.Now() - defer s.logDuration("GetPublicTransactionData", callStart) + defer s.logDuration("GetPublicTransactionData", measure.NewStopwatch()) return enclavedb.GetPublicTransactionData(s.db.GetSQLDB(), pagination) } func (s *storageImpl) GetPublicTransactionCount() (uint64, error) { - callStart := time.Now() - defer s.logDuration("GetPublicTransactionCount", callStart) + defer s.logDuration("GetPublicTransactionCount", measure.NewStopwatch()) return enclavedb.GetPublicTransactionCount(s.db.GetSQLDB()) } -func (s *storageImpl) logDuration(method string, callStart time.Time) { - durationMillis := time.Since(callStart).Milliseconds() - msg := fmt.Sprintf("Storage::%s completed", method) - // we only log 'slow' calls to reduce noise - switch { - case durationMillis > _slowCallErrorThresholdMillis: - s.logger.Error(msg, log.DurationMilliKey, durationMillis) - case durationMillis > _slowCallWarnThresholdMillis: - s.logger.Warn(msg, log.DurationMilliKey, durationMillis) - case durationMillis > _slowCallInfoThresholdMillis: - s.logger.Info(msg, log.DurationMilliKey, durationMillis) - case durationMillis > _slowCallDebugThresholdMillis: - s.logger.Debug(msg, log.DurationMilliKey, durationMillis) - } +func (s *storageImpl) logDuration(method string, stopWatch *measure.Stopwatch) { + core.LogMethodDuration(s.logger, stopWatch, fmt.Sprintf("Storage::%s completed", method)) } diff --git a/go/host/enclave/guardian.go b/go/host/enclave/guardian.go index 83387ebdde..6a897feaf9 100644 --- a/go/host/enclave/guardian.go +++ b/go/host/enclave/guardian.go @@ -111,12 +111,12 @@ func (g *Guardian) Start() error { func (g *Guardian) Stop() error { err := g.enclaveClient.Stop() if err != nil { - g.logger.Warn("error stopping enclave", log.ErrKey, err) + g.logger.Error("error stopping enclave", log.ErrKey, err) } err = g.enclaveClient.StopClient() if err != nil { - g.logger.Warn("error stopping enclave client", log.ErrKey, err) + g.logger.Error("error stopping enclave client", log.ErrKey, err) } return nil @@ -163,7 +163,7 @@ func (g *Guardian) HandleBlock(block *types.Block) { // Note: this should only be called for validators, sequencers produce their own batches func (g *Guardian) HandleBatch(batch *common.ExtBatch) { if g.hostData.IsSequencer { - g.logger.Error("repo received batch but we are a sequencer, ignoring") + g.logger.Error("Repo received batch but we are a sequencer, ignoring") return } g.logger.Debug("Received L2 block", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.Header.SequencerOrderNo) @@ -174,7 +174,7 @@ func (g *Guardian) HandleBatch(batch *common.ExtBatch) { } err := g.submitL2Batch(batch) if err != nil { - g.logger.Warn("error submitting batch to enclave", log.ErrKey, err) + g.logger.Error("Error submitting batch to enclave", log.ErrKey, err) } } @@ -238,7 +238,7 @@ func (g *Guardian) mainLoop() { func (g *Guardian) checkEnclaveStatus() { s, err := g.enclaveClient.Status() if err != nil { - g.logger.Error("could not get enclave status", log.ErrKey, err) + g.logger.Error("Could not get enclave status", log.ErrKey, err) // we record this as a disconnection, we can't get any more info from the enclave about status currently g.state.OnDisconnected() return @@ -281,7 +281,7 @@ func (g *Guardian) provideSecret() error { if scrt.RequesterID.Hex() == g.hostData.ID.Hex() { err = g.enclaveClient.InitEnclave(scrt.Secret) if err != nil { - g.logger.Error("could not initialize enclave with received secret response", log.ErrKey, err) + g.logger.Error("Could not initialize enclave with received secret response", log.ErrKey, err) continue // try the next secret response in the block if there are more } return nil // successfully initialized enclave with secret, break out of retry loop function @@ -424,7 +424,7 @@ func (g *Guardian) submitL1Block(block *common.L1Block, isLatest bool) (bool, er // todo: make sure this doesn't respond to old requests (once we have a proper protocol for that) err = g.publishSharedSecretResponses(resp.ProducedSecretResponses) if err != nil { - g.logger.Error("failed to publish response to secret request", log.ErrKey, err) + g.logger.Error("Failed to publish response to secret request", log.ErrKey, err) } return true, nil } @@ -441,14 +441,14 @@ func (g *Guardian) processL1BlockTransactions(block *common.L1Block) { for _, rollup := range rollupTxs { r, err := common.DecodeRollup(rollup.Rollup) if err != nil { - g.logger.Error("could not decode rollup.", log.ErrKey, err) + g.logger.Error("Could not decode rollup.", log.ErrKey, err) } err = g.db.AddRollupHeader(r, block) if err != nil { if errors.Is(err, errutil.ErrAlreadyExists) { - g.logger.Info("rollup already stored", log.RollupHashKey, r.Hash()) + g.logger.Info("Rollup already stored", log.RollupHashKey, r.Hash()) } else { - g.logger.Error("could not store rollup.", log.ErrKey, err) + g.logger.Error("Could not store rollup.", log.ErrKey, err) } } } @@ -503,13 +503,13 @@ func (g *Guardian) periodicBatchProduction() { case <-batchProdTicker.C: if !g.state.InSyncWithL1() { // if we're behind the L1, we don't want to produce batches - g.logger.Debug("skipping batch production because L1 is not up to date") + g.logger.Debug("Skipping batch production because L1 is not up to date") continue } - g.logger.Debug("create batch") + g.logger.Debug("Create batch") err := g.enclaveClient.CreateBatch() if err != nil { - g.logger.Error("unable to produce batch", log.ErrKey, err) + g.logger.Error("Unable to produce batch", log.ErrKey, err) } case <-g.hostInterrupter.Done(): // interrupted - end periodic process @@ -537,13 +537,15 @@ func (g *Guardian) periodicRollupProduction() { fromBatch, err := g.getLatestBatchNo() if err != nil { - g.logger.Warn("encountered error while trying to retrieve latest sequence number", log.ErrKey, err) + g.logger.Error("encountered error while trying to retrieve latest sequence number", log.ErrKey, err) continue } availBatchesSumSize, err := g.calculateNonRolledupBatchesSize(fromBatch) if err != nil { - g.logger.Error("unable to GetBatchesAfterSize rollup", log.ErrKey, err) + g.logger.Error("Unable to estimate the size of the current rollup", log.ErrKey, err) + // todo - this should not happen. Is it worth continuing? + availBatchesSumSize = 0 } // produce and issue rollup when either: @@ -552,7 +554,7 @@ func (g *Guardian) periodicRollupProduction() { if time.Since(lastSuccessfulRollup) > g.rollupInterval || availBatchesSumSize >= g.maxRollupSize { producedRollup, err := g.enclaveClient.CreateRollup(fromBatch) if err != nil { - g.logger.Error("unable to create rollup", "batchSeqNo", fromBatch) + g.logger.Error("Unable to create rollup", log.BatchSeqNoKey, fromBatch) continue } // this method waits until the receipt is received @@ -586,7 +588,7 @@ func (g *Guardian) streamEnclaveData() { continue } - if resp.Batch != nil { + if resp.Batch != nil { //nolint:nestif lastBatch = resp.Batch g.logger.Trace("Received batch from stream", log.BatchHashKey, lastBatch.Hash()) err := g.sl.L2Repo().AddBatch(resp.Batch) @@ -596,14 +598,15 @@ func (g *Guardian) streamEnclaveData() { } if g.hostData.IsSequencer { // if we are the sequencer we need to broadcast this new batch to the network - g.logger.Info("Batch produced", log.BatchHeightKey, resp.Batch.Header.Number, log.BatchHashKey, resp.Batch.Hash()) + g.logger.Info("Batch produced. Sending to peers..", log.BatchHeightKey, resp.Batch.Header.Number, log.BatchHashKey, resp.Batch.Hash()) err = g.sl.P2P().BroadcastBatches([]*common.ExtBatch{resp.Batch}) if err != nil { - g.logger.Error("failed to broadcast batch", log.BatchHashKey, resp.Batch.Hash(), log.ErrKey, err) + g.logger.Error("Failed to broadcast batch", log.BatchHashKey, resp.Batch.Hash(), log.ErrKey, err) } + } else { + g.logger.Debug("Received batch from enclave", log.BatchSeqNoKey, resp.Batch.Header.SequencerOrderNo, log.BatchHashKey, resp.Batch.Hash()) } - g.logger.Info("Received batch from enclave", log.BatchSeqNoKey, resp.Batch.Header.SequencerOrderNo, log.BatchHashKey, resp.Batch.Hash()) g.state.OnProcessedBatch(resp.Batch.Header.SequencerOrderNo) } diff --git a/go/host/enclave/state.go b/go/host/enclave/state.go index 9ffd80c195..c2ec1875f8 100644 --- a/go/host/enclave/state.go +++ b/go/host/enclave/state.go @@ -156,7 +156,7 @@ func (s *StateTracker) calculateStatus() Status { return Live default: // this shouldn't happen - s.logger.Error("unknown enclave status code - this should not happen", "code", s.enclaveStatusCode) + s.logger.Error("Unknown enclave status code - this should not happen", "code", s.enclaveStatusCode) return Unavailable } } diff --git a/go/host/events/logs.go b/go/host/events/logs.go index 34307decd2..82bb1b089e 100644 --- a/go/host/events/logs.go +++ b/go/host/events/logs.go @@ -60,9 +60,10 @@ func (l *LogEventManager) Subscribe(id rpc.ID, encryptedLogSubscription common.E } func (l *LogEventManager) Unsubscribe(id rpc.ID) { - err := l.sl.Enclaves().Unsubscribe(id) - if err != nil { - l.logger.Warn("could not terminate enclave subscription", log.ErrKey, err) + enclaveUnsubErr := l.sl.Enclaves().Unsubscribe(id) + if enclaveUnsubErr != nil { + // this can happen when the client passes a invalid subscription id + l.logger.Debug("Could not terminate enclave subscription", log.SubIDKey, id, log.ErrKey, enclaveUnsubErr) } l.subscriptionMutex.Lock() defer l.subscriptionMutex.Unlock() @@ -71,6 +72,9 @@ func (l *LogEventManager) Unsubscribe(id rpc.ID) { if found { close(logSubscription.ch) delete(l.subscriptions, id) + if enclaveUnsubErr != nil { + l.logger.Error("The subscription management between the host and the enclave is out of sync", log.SubIDKey, id, log.ErrKey, enclaveUnsubErr) + } } } @@ -88,7 +92,7 @@ func (l *LogEventManager) SendLogsToSubscribers(result *common.EncryptedSubscrip } } -// Pairs the latest seen rollup for a log subscription with the channel on which new logs should be sent. +// Simple wrapper over the channel that logs for this subscription are sent to. type subscription struct { - ch chan []byte // The channel that logs for this subscription are sent to. + ch chan []byte } diff --git a/go/host/host.go b/go/host/host.go index af70d26155..1dacf03d54 100644 --- a/go/host/host.go +++ b/go/host/host.go @@ -164,12 +164,12 @@ func (h *host) Stop() error { // stop all registered services for name, service := range h.services.All() { if err := service.Stop(); err != nil { - h.logger.Error("failed to stop service", "service", name, log.ErrKey, err) + h.logger.Error("Failed to stop service", "service", name, log.ErrKey, err) } } if err := h.db.Stop(); err != nil { - h.logger.Error("failed to stop DB", log.ErrKey, err) + h.logger.Error("Failed to stop DB", log.ErrKey, err) } h.logger.Info("Host shut down complete.") diff --git a/go/host/l1/blockrepository.go b/go/host/l1/blockrepository.go index ae6ebac168..2118e403be 100644 --- a/go/host/l1/blockrepository.go +++ b/go/host/l1/blockrepository.go @@ -176,7 +176,7 @@ func (r *Repository) streamLiveBlocks() { r.head = header.Hash() block, err := r.ethClient.BlockByHash(header.Hash()) if err != nil { - r.logger.Error("error fetching new block", log.BlockHashKey, header.Hash(), + r.logger.Error("Error fetching new block", log.BlockHashKey, header.Hash(), log.BlockHeightKey, header.Number, log.ErrKey, err) continue } diff --git a/go/host/l1/publisher.go b/go/host/l1/publisher.go index 8faafa022c..06c7d9b972 100644 --- a/go/host/l1/publisher.go +++ b/go/host/l1/publisher.go @@ -138,7 +138,7 @@ func (p *Publisher) PublishSecretResponse(secretResponse *common.ProducedSecretR go func() { err := p.publishTransaction(respondSecretTx) if err != nil { - p.logger.Error("could not broadcast secret response L1 tx", log.ErrKey, err) + p.logger.Error("Could not broadcast secret response L1 tx", log.ErrKey, err) } }() @@ -201,7 +201,7 @@ func (p *Publisher) PublishRollup(producedRollup *common.ExtRollup) { err = p.publishTransaction(rollupTx) if err != nil { - p.logger.Error("could not issue rollup tx", log.ErrKey, err) + p.logger.Error("Could not issue rollup tx", log.RollupHashKey, producedRollup.Hash(), log.ErrKey, err) } else { p.logger.Info("Rollup included in L1", log.RollupHashKey, producedRollup.Hash()) } diff --git a/go/host/l2/batchrepository.go b/go/host/l2/batchrepository.go index 5607126449..bb3cd89f1b 100644 --- a/go/host/l2/batchrepository.go +++ b/go/host/l2/batchrepository.go @@ -174,7 +174,7 @@ func (r *Repository) FetchBatchBySeqNo(seqNo *big.Int) (*common.ExtBatch, error) // - when the node is a validator to store batches read from roll-ups // If the repository already has the batch it returns an AlreadyExists error which is typically ignored. func (r *Repository) AddBatch(batch *common.ExtBatch) error { - r.logger.Info("Saving batch", log.BatchSeqNoKey, batch.Header.SequencerOrderNo, log.BatchHashKey, batch.Hash()) + r.logger.Debug("Saving batch", log.BatchSeqNoKey, batch.Header.SequencerOrderNo, log.BatchHashKey, batch.Hash()) err := r.db.AddBatch(batch) if err != nil { return err diff --git a/go/host/p2p/p2p.go b/go/host/p2p/p2p.go index 6097d27c27..8c49c727b1 100644 --- a/go/host/p2p/p2p.go +++ b/go/host/p2p/p2p.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/obscuronet/go-obscuro/go/common/measure" "github.com/obscuronet/go-obscuro/go/common/retry" "github.com/obscuronet/go-obscuro/go/common/subscription" @@ -238,7 +240,7 @@ func (p *Service) RequestBatchesFromSequencer(fromSeqNo *big.Int) error { Requester: p.ourPublicAddress, FromSeqNo: fromSeqNo, } - defer p.logger.Info("Requested batches from sequencer", "fromSeqNo", batchRequest.FromSeqNo, log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(p.logger, measure.NewStopwatch(), "Requested batches from sequencer", "fromSeqNo", batchRequest.FromSeqNo) encodedBatchRequest, err := rlp.EncodeToBytes(batchRequest) if err != nil { @@ -307,7 +309,7 @@ func (p *Service) handleConnections() { conn, err := p.listener.Accept() if err != nil { if p.running.Load() { - p.logger.Warn("host could not form P2P connection", log.ErrKey, err) + p.logger.Debug("Could not form P2P connection", log.ErrKey, err) } return } @@ -323,21 +325,21 @@ func (p *Service) handle(conn net.Conn) { encodedMsg, err := io.ReadAll(conn) if err != nil { - p.logger.Warn("failed to read message from peer", log.ErrKey, err) + p.logger.Debug("Failed to read message from peer", log.ErrKey, err) return } msg := message{} err = rlp.DecodeBytes(encodedMsg, &msg) if err != nil { - p.logger.Warn("failed to decode message received from peer: ", log.ErrKey, err) + p.logger.Debug("Failed to decode message received from peer: ", log.ErrKey, err) return } switch msg.Type { case msgTypeTx: if !p.isSequencer { - p.logger.Error("received transaction from peer, but not a sequencer node") + p.logger.Error("Received transaction from peer, but not a sequencer node") return } // The transaction is encrypted, so we cannot check that it's correctly formed. @@ -356,6 +358,7 @@ func (p *Service) handle(conn net.Conn) { // nothing to send to subscribers break } + // todo - check the batch signature for _, batchSubs := range p.batchSubscribers.Subscribers() { go batchSubs.HandleBatches(batchMsg.Batches, batchMsg.IsLive) } @@ -388,7 +391,7 @@ func (p *Service) broadcast(msg message) error { go func() { err := p.sendBytesWithRetry(closureAddr, msgEncoded) if err != nil { - p.logger.Error("unsuccessful broadcast", log.ErrKey, err) + p.logger.Debug("Could not send message to peer", "peer", closureAddr, log.ErrKey, err) } }() } @@ -437,13 +440,13 @@ func (p *Service) sendBytes(address string, tx []byte) error { defer conn.Close() } if err != nil { - p.logger.Warn(fmt.Sprintf("could not connect to peer on address %s", address), log.ErrKey, err) + p.logger.Debug(fmt.Sprintf("could not connect to peer on address %s", address), log.ErrKey, err) return err } _, err = conn.Write(tx) if err != nil { - p.logger.Warn(fmt.Sprintf("could not send message to peer on address %s", address), log.ErrKey, err) + p.logger.Debug(fmt.Sprintf("could not send message to peer on address %s", address), log.ErrKey, err) return err } return nil diff --git a/go/host/rpc/enclaverpc/enclave_client.go b/go/host/rpc/enclaverpc/enclave_client.go index 2756d1ca46..2643e84382 100644 --- a/go/host/rpc/enclaverpc/enclave_client.go +++ b/go/host/rpc/enclaverpc/enclave_client.go @@ -8,6 +8,8 @@ import ( "math/big" "time" + "github.com/obscuronet/go-obscuro/go/enclave/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" "github.com/obscuronet/go-obscuro/go/common" @@ -186,7 +188,7 @@ func (c *Client) SubmitTx(tx common.EncryptedTx) (*responses.RawTx, common.Syste } func (c *Client) SubmitBatch(batch *common.ExtBatch) common.SystemError { - defer c.logger.Debug("SubmitBatch rpc call", log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "SubmitBatch rpc call") timeoutCtx, cancel := context.WithTimeout(context.Background(), c.config.EnclaveRPCTimeout) defer cancel() @@ -398,7 +400,7 @@ func (c *Client) HealthCheck() (bool, common.SystemError) { } func (c *Client) CreateBatch() common.SystemError { - defer c.logger.Debug("CreateBatch rpc call", log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "CreateBatch rpc call") timeoutCtx, cancel := context.WithTimeout(context.Background(), c.config.EnclaveRPCTimeout) defer cancel() @@ -414,7 +416,7 @@ func (c *Client) CreateBatch() common.SystemError { } func (c *Client) CreateRollup(fromSeqNo uint64) (*common.ExtRollup, common.SystemError) { - defer c.logger.Debug("CreateRollup rpc call", log.DurationKey, measure.NewStopwatch()) + defer core.LogMethodDuration(c.logger, measure.NewStopwatch(), "CreateRollup rpc call") timeoutCtx, cancel := context.WithTimeout(context.Background(), c.config.EnclaveRPCTimeout) defer cancel()