Skip to content

Commit

Permalink
improve logging (#1556)
Browse files Browse the repository at this point in the history
* improve logging

* address pr comments
  • Loading branch information
tudor-malene authored Sep 26, 2023
1 parent a4172f3 commit 7cdf18a
Show file tree
Hide file tree
Showing 21 changed files with 178 additions and 207 deletions.
14 changes: 7 additions & 7 deletions go/enclave/components/batch_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,14 @@ func (executor *batchExecutor) payL1Fees(stateDB *state.StateDB, context *BatchE
for _, tx := range context.Transactions {
sender, err := core.GetAuthenticatedSender(context.ChainConfig.ChainID.Int64(), tx)
if err != nil {
executor.logger.Warn("Unable to extract sender for tx", log.TxKey, tx.Hash())
executor.logger.Error("Unable to extract sender for tx. Should not happen at this point.", log.TxKey, tx.Hash(), log.ErrKey, err)
continue
}
accBalance := stateDB.GetBalance(*sender)

cost, err := executor.gasOracle.EstimateL1StorageGasCost(tx, block)
if err != nil {
executor.logger.Warn("Unable to get gas cost for tx", log.TxKey, tx.Hash(), log.ErrKey, err)
executor.logger.Error("Unable to get gas cost for tx. Should not happen at this point.", log.TxKey, tx.Hash(), log.ErrKey, err)
continue
}

Expand Down Expand Up @@ -108,7 +108,7 @@ func (executor *batchExecutor) payL1Fees(stateDB *state.StateDB, context *BatchE
}

func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*ComputedBatch, error) {
defer executor.logger.Info("Batch context processed", log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(executor.logger, measure.NewStopwatch(), "Batch context processed")

// sanity check that the l1 block exists. We don't have to execute batches of forks.
block, err := executor.storage.FetchBlock(context.BlockPtr)
Expand Down Expand Up @@ -204,14 +204,14 @@ func (executor *batchExecutor) ComputeBatch(context *BatchExecutionContext) (*Co
return gethcommon.Hash{}, fmt.Errorf("commit failure for batch %d. Cause: %w", batch.SeqNo(), err)
}
trieDB := executor.storage.TrieDB()
err = trieDB.Commit(h, true)
err = trieDB.Commit(h, false)
return h, err
},
}, nil
}

func (executor *batchExecutor) ExecuteBatch(batch *core.Batch) (types.Receipts, error) {
defer executor.logger.Info("Executed batch", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(executor.logger, measure.NewStopwatch(), "Executed batch", log.BatchHashKey, batch.Hash())

// Validators recompute the entire batch using the same batch context
// if they have all necessary prerequisites like having the l1 block processed
Expand Down Expand Up @@ -316,8 +316,8 @@ func (executor *batchExecutor) populateOutboundCrossChainData(batch *core.Batch,

valueTransferMessages, err := executor.crossChainProcessors.Local.ExtractOutboundTransfers(receipts)
if err != nil {
executor.logger.Error("Extracting messages L2->L1 failed", log.ErrKey, err, log.CmpKey, log.CrossChainCmp)
return fmt.Errorf("could not extract cross chain messages. Cause: %w", err)
executor.logger.Error("Failed extracting L2->L1 messages value transfers", log.ErrKey, err, log.CmpKey, log.CrossChainCmp)
return fmt.Errorf("could not extract cross chain value transfers. Cause: %w", err)
}

transfersHash := types.DeriveSha(ValueTransfers(valueTransferMessages), &trie.StackTrie{})
Expand Down
2 changes: 1 addition & 1 deletion go/enclave/components/batch_registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func (br *batchRegistry) OnBatchExecuted(batch *core.Batch, receipts types.Recei
br.callbackMutex.RLock()
defer br.callbackMutex.RUnlock()

defer br.logger.Debug("Sending batch and events", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(br.logger, measure.NewStopwatch(), "Sending batch and events", log.BatchHashKey, batch.Hash())

br.headBatchSeq = batch.SeqNo()
if br.batchesCallback != nil {
Expand Down
6 changes: 4 additions & 2 deletions go/enclave/components/block_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ import (
"errors"
"fmt"

"github.com/obscuronet/go-obscuro/go/enclave/core"

"github.com/obscuronet/go-obscuro/go/enclave/gas"
"github.com/obscuronet/go-obscuro/go/enclave/storage"

Expand Down Expand Up @@ -34,7 +36,7 @@ func NewBlockProcessor(storage storage.Storage, cc *crosschain.Processors, gasOr
}

func (bp *l1BlockProcessor) Process(br *common.BlockAndReceipts) (*BlockIngestionType, error) {
defer bp.logger.Info("L1 block processed", log.BlockHashKey, br.Block.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(bp.logger, measure.NewStopwatch(), "L1 block processed", log.BlockHashKey, br.Block.Hash())

ingestion, err := bp.tryAndInsertBlock(br)
if err != nil {
Expand Down Expand Up @@ -78,7 +80,7 @@ func (bp *l1BlockProcessor) tryAndInsertBlock(br *common.BlockAndReceipts) (*Blo
// Do not store the block if the L1 chain insertion failed
return nil, err
}
bp.logger.Trace("block inserted successfully",
bp.logger.Trace("Block inserted successfully",
log.BlockHeightKey, block.NumberU64(), log.BlockHashKey, block.Hash(), "ingestionType", ingestionType)

err = bp.storage.StoreBlock(block, ingestionType.ChainFork)
Expand Down
5 changes: 3 additions & 2 deletions go/enclave/components/rollup_consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package components
import (
"fmt"

"github.com/obscuronet/go-obscuro/go/enclave/core"

"github.com/obscuronet/go-obscuro/go/enclave/storage"

"github.com/obscuronet/go-obscuro/go/common/measure"
Expand Down Expand Up @@ -45,8 +47,7 @@ func NewRollupConsumer(
}

func (rc *rollupConsumerImpl) ProcessRollupsInBlock(b *common.BlockAndReceipts) error {
stopwatch := measure.NewStopwatch()
defer rc.logger.Info("Rollup consumer processed block", log.BlockHashKey, b.Block.Hash(), log.DurationKey, stopwatch)
defer core.LogMethodDuration(rc.logger, measure.NewStopwatch(), "Rollup consumer processed block", log.BlockHashKey, b.Block.Hash())

rollups := rc.extractRollups(b)
if len(rollups) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion go/enclave/container/enclave_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (e *EnclaveContainer) Start() error {
func (e *EnclaveContainer) Stop() error {
_, err := e.RPCServer.Stop(context.Background(), nil)
if err != nil {
e.Logger.Warn("unable to cleanly stop enclave", log.ErrKey, err)
e.Logger.Error("Unable to cleanly stop enclave", log.ErrKey, err)
return err
}
return nil
Expand Down
66 changes: 34 additions & 32 deletions go/enclave/core/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,41 +4,13 @@ import (
"fmt"
"math/big"

"github.com/ethereum/go-ethereum/core/types"

"github.com/obscuronet/go-obscuro/go/common"

gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
gethlog "github.com/ethereum/go-ethereum/log"
"github.com/obscuronet/go-obscuro/go/common/log"
"github.com/obscuronet/go-obscuro/go/common/measure"
)

func MakeMap(txs []*common.L2Tx) map[gethcommon.Hash]*common.L2Tx {
m := make(map[gethcommon.Hash]*common.L2Tx)
for _, tx := range txs {
m[tx.Hash()] = tx
}
return m
}

func ToMap(txs []*common.L2Tx) map[gethcommon.Hash]gethcommon.Hash {
m := make(map[gethcommon.Hash]gethcommon.Hash)
for _, tx := range txs {
m[tx.Hash()] = tx.Hash()
}
return m
}

func PrintTxs(txs []*common.L2Tx) (txsString []string) {
for _, t := range txs {
txsString = printTx(t, txsString)
}
return txsString
}

func printTx(t *common.L2Tx, txsString []string) []string {
txsString = append(txsString, fmt.Sprintf("%s,", t.Hash().Hex()))
return txsString
}

// VerifySignature - Checks that the L2Tx has a valid signature.
func VerifySignature(chainID int64, tx *types.Transaction) error {
signer := types.NewLondonSigner(big.NewInt(chainID))
Expand All @@ -55,3 +27,33 @@ func GetAuthenticatedSender(chainID int64, tx *types.Transaction) (*gethcommon.A
}
return &sender, nil
}

const (
// log level for requests that take longer than this threshold in millis
_errorThreshold = 500
_warnThreshold = 200
_infoThreshold = 100
_debugThreshold = 50
)

// LogMethodDuration - call only with "defer"
func LogMethodDuration(logger gethlog.Logger, stopWatch *measure.Stopwatch, msg string, args ...any) {
var f func(msg string, ctx ...interface{})
durationMillis := stopWatch.Measure().Milliseconds()

// we adjust the logging level based on the time
switch {
case durationMillis > _errorThreshold:
f = logger.Error
case durationMillis > _warnThreshold:
f = logger.Warn
case durationMillis > _infoThreshold:
f = logger.Info
case durationMillis > _debugThreshold:
f = logger.Debug
default:
f = logger.Trace
}
newArgs := append([]any{log.DurationKey, stopWatch}, args...)
f(fmt.Sprintf("LogMethodDuration::%s", msg), newArgs...)
}
6 changes: 4 additions & 2 deletions go/enclave/crosschain/block_message_extractor.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package crosschain
import (
"fmt"

"github.com/obscuronet/go-obscuro/go/enclave/core"

"github.com/obscuronet/go-obscuro/go/enclave/storage"

gethcommon "github.com/ethereum/go-ethereum/common"
Expand Down Expand Up @@ -35,7 +37,7 @@ func (m *blockMessageExtractor) Enabled() bool {
}

func (m *blockMessageExtractor) StoreCrossChainValueTransfers(block *common.L1Block, receipts common.L1Receipts) error {
defer m.logger.Info("Block value transfer messages processed", log.BlockHashKey, block.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(m.logger, measure.NewStopwatch(), "Block value transfer messages processed", log.BlockHashKey, block.Hash())

/*areReceiptsValid := common.VerifyReceiptHash(block, receipts)
Expand Down Expand Up @@ -74,7 +76,7 @@ func (m *blockMessageExtractor) StoreCrossChainValueTransfers(block *common.L1Bl
// block - the L1 block for which events are extracted.
// receipts - all of the receipts for the corresponding block. This is validated.
func (m *blockMessageExtractor) StoreCrossChainMessages(block *common.L1Block, receipts common.L1Receipts) error {
defer m.logger.Info("Block cross chain messages processed", log.BlockHashKey, block.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(m.logger, measure.NewStopwatch(), "Block cross chain messages processed", log.BlockHashKey, block.Hash())

if len(receipts) == 0 {
return nil
Expand Down
6 changes: 5 additions & 1 deletion go/enclave/crosschain/message_bus_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,11 @@ func (m *MessageBusManager) RetrieveInboundMessages(fromBlock *common.L1Block, t
b = p
}

m.logger.Info(fmt.Sprintf("Extracted cross chain messages for block height %d ->%d: %d.", fromBlock.NumberU64(), toBlock.NumberU64(), len(messages)))
logf := m.logger.Info
if len(messages)+len(transfers) == 0 {
logf = m.logger.Debug
}
logf(fmt.Sprintf("Extracted cross chain messages for block height %d ->%d", fromBlock.NumberU64(), toBlock.NumberU64()), "no_msgs", len(messages), "no_value_transfers", len(transfers))

return messages, transfers
}
Expand Down
22 changes: 11 additions & 11 deletions go/enclave/enclave.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"sync"
"time"

"github.com/obscuronet/go-obscuro/go/common/measure"

"github.com/obscuronet/go-obscuro/go/enclave/gas"
"github.com/obscuronet/go-obscuro/go/enclave/storage"

Expand Down Expand Up @@ -363,9 +365,10 @@ func (e *enclaveImpl) StopClient() common.SystemError {
}

func (e *enclaveImpl) sendBatch(batch *core.Batch, outChannel chan common.StreamL2UpdatesResponse) {
e.logger.Info("Streaming batch to client", log.BatchHashKey, batch.Hash())
e.logger.Info("Streaming batch to host", log.BatchHashKey, batch.Hash(), log.BatchSeqNoKey, batch.SeqNo())
extBatch, err := batch.ToExtBatch(e.dataEncryptionService, e.dataCompressionService)
if err != nil {
// this error is unrecoverable
e.logger.Crit("failed to convert batch", log.ErrKey, err)
}
resp := common.StreamL2UpdatesResponse{
Expand All @@ -377,7 +380,7 @@ func (e *enclaveImpl) sendBatch(batch *core.Batch, outChannel chan common.Stream
// this function is only called when the executed batch is the new head
func (e *enclaveImpl) streamEventsForNewHeadBatch(batch *core.Batch, receipts types.Receipts, outChannel chan common.StreamL2UpdatesResponse) {
logs, err := e.subscriptionManager.GetSubscribedLogsForBatch(batch, receipts)
e.logger.Info("Stream Events for", log.BatchHashKey, batch.Hash(), "nr_events", len(logs))
e.logger.Debug("Stream Events for", log.BatchHashKey, batch.Hash(), "nr_events", len(logs))
if err != nil {
e.logger.Error("Error while getting subscription logs", log.ErrKey, err)
return
Expand Down Expand Up @@ -449,7 +452,7 @@ func (e *enclaveImpl) ingestL1Block(br *common.BlockAndReceipts) (*components.Bl
if err != nil {
// only warn for unexpected errors
if errors.Is(err, errutil.ErrBlockAncestorNotFound) || errors.Is(err, errutil.ErrBlockAlreadyProcessed) {
e.logger.Debug("Failed ingesting block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash())
e.logger.Debug("Did not ingest block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash())
} else {
e.logger.Warn("Failed ingesting block", log.ErrKey, err, log.BlockHashKey, br.Block.Hash())
}
Expand Down Expand Up @@ -547,12 +550,9 @@ func (e *enclaveImpl) SubmitBatch(extBatch *common.ExtBatch) common.SystemError
return responses.ToInternalError(fmt.Errorf("requested SubmitBatch with the enclave stopping"))
}

callStart := time.Now()
defer func() {
e.logger.Info("SubmitBatch call completed.", "start", callStart, log.DurationKey, time.Since(callStart), log.BatchHashKey, extBatch.Hash())
}()
core.LogMethodDuration(e.logger, measure.NewStopwatch(), "SubmitBatch call completed.", log.BatchHashKey, extBatch.Hash())

e.logger.Info("SubmitBatch", log.BatchHeightKey, extBatch.Header.Number, log.BatchHashKey, extBatch.Hash(), "l1", extBatch.Header.L1Proof)
e.logger.Info("Received new p2p batch", log.BatchHeightKey, extBatch.Header.Number, log.BatchHashKey, extBatch.Hash(), "l1", extBatch.Header.L1Proof)
batch, err := core.ToBatch(extBatch, e.dataEncryptionService, e.dataCompressionService)
if err != nil {
return responses.ToInternalError(fmt.Errorf("could not convert batch. Cause: %w", err))
Expand Down Expand Up @@ -988,7 +988,7 @@ func (e *enclaveImpl) Stop() common.SystemError {

if e.profiler != nil {
if err := e.profiler.Stop(); err != nil {
e.logger.Error("Could not profiler", log.ErrKey, err)
e.logger.Error("Could not stop profiler", log.ErrKey, err)
return err
}
}
Expand Down Expand Up @@ -1212,14 +1212,14 @@ func (e *enclaveImpl) DoEstimateGas(args *gethapi.TransactionArgs, blkNumber *ge
if transfer == nil {
transfer = new(hexutil.Big)
}
e.logger.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance,
e.logger.Debug("Gas estimation capped by limited funds", "original", hi, "balance", balance,
"sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance)
hi = allowance.Uint64()
}
}
// Recap the highest gas allowance with specified gascap.
if gasCap != 0 && hi > gasCap {
e.logger.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
e.logger.Debug("Caller gas above allowance, capping", "requested", hi, "cap", gasCap)
hi = gasCap
}
cap = hi //nolint: revive
Expand Down
12 changes: 6 additions & 6 deletions go/enclave/events/subscription_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func (s *SubscriptionManager) GetSubscribedLogsForBatch(batch *core.Batch, recei
if relevant {
relevantLogsForSub = append(relevantLogsForSub, logItem)
}
s.logger.Info(fmt.Sprintf("Subscription %s. Account %s. Log %v. Extracted addresses: %v. Relevant: %t", id, sub.Account, logItem, userAddrs, relevant))
s.logger.Debug(fmt.Sprintf("Subscription %s. Account %s. Log %v. Extracted addresses: %v. Relevant: %t", id, sub.Account, logItem, userAddrs, relevant))
}
if len(relevantLogsForSub) > 0 {
relevantLogsPerSubscription[id] = relevantLogsForSub
Expand Down Expand Up @@ -237,21 +237,21 @@ func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []geth
Logs:
for _, logItem := range logs {
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > logItem.BlockNumber {
logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the past. The starting block num for filter is bigger than log")
logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the past. The starting block num for filter is bigger than log")
continue
}
if toBlock != nil && toBlock.Int64() > 0 && toBlock.Uint64() < logItem.BlockNumber {
logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the future. The ending block num for filter is smaller than log")
logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "In the future. The ending block num for filter is smaller than log")
continue
}

if len(addresses) > 0 && !includes(addresses, logItem.Address) {
logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "The contract address of the log is not an address of interest")
logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "The contract address of the log is not an address of interest")
continue
}
// If the to filtered topics is greater than the amount of topics in logs, skip.
if len(topics) > len(logItem.Topics) {
logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Insufficient topics. The log has less topics than the required one to satisfy the query")
logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Insufficient topics. The log has less topics than the required one to satisfy the query")
continue
}
for i, sub := range topics {
Expand All @@ -263,7 +263,7 @@ Logs:
}
}
if !match {
logger.Info(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Topics do not match.")
logger.Debug(fmt.Sprintf("Skipping log = %v", logItem), "reason", "Topics do not match.")
continue Logs
}
}
Expand Down
5 changes: 2 additions & 3 deletions go/enclave/nodetype/sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func (s *sequencer) produceBatch(sequencerNo *big.Int, l1Hash common.L1BlockHash
// StoreExecutedBatch - stores an executed batch in one go. This can be done for the sequencer because it is guaranteed
// that all dependencies are in place for the execution to be successful.
func (s *sequencer) StoreExecutedBatch(batch *core.Batch, receipts types.Receipts) error {
defer s.logger.Info("Registry StoreBatch() exit", log.BatchHashKey, batch.Hash(), log.DurationKey, measure.NewStopwatch())
defer core.LogMethodDuration(s.logger, measure.NewStopwatch(), "Registry StoreBatch() exit", log.BatchHashKey, batch.Hash())

// Check if this batch is already stored.
if _, err := s.storage.FetchBatchHeader(batch.Hash()); err == nil {
Expand Down Expand Up @@ -268,8 +268,6 @@ func (s *sequencer) CreateRollup(lastBatchNo uint64) (*common.ExtRollup, error)
return nil, fmt.Errorf("failed to sign created rollup: %w", err)
}

s.logger.Info("Created new head rollup", log.RollupHashKey, rollup.Hash(), "numBatches", len(rollup.Batches))

return s.rollupCompression.CreateExtRollup(rollup)
}

Expand Down Expand Up @@ -339,6 +337,7 @@ func (s *sequencer) OnL1Fork(fork *common.ChainFork) error {
rollup, err := s.storage.FetchReorgedRollup(fork.NonCanonicalPath)
if err == nil {
s.logger.Error("Reissue rollup", log.RollupHashKey, rollup)
// todo - tudor - finalise the logic to reissue a rollup when the block used for compression was reorged
return nil
}
if !errors.Is(err, errutil.ErrNotFound) {
Expand Down
Loading

0 comments on commit 7cdf18a

Please sign in to comment.