From 4519370b2119d4a867a6f1ec9db0f15ea9a1c892 Mon Sep 17 00:00:00 2001 From: Mateusz Sekara Date: Mon, 16 Oct 2023 15:52:32 +0200 Subject: [PATCH] Support for finality tags in LogPoller (#10762) * Support for finality in the LogPoller * Post review fixes * Post rebase fixes --- core/chains/evm/chain.go | 11 +- .../evm/client/simulated_backend_client.go | 48 +- .../evm/forwarders/forwarder_manager_test.go | 4 +- core/chains/evm/logpoller/disabled.go | 4 - core/chains/evm/logpoller/helper_test.go | 7 +- core/chains/evm/logpoller/log_poller.go | 207 +-- .../evm/logpoller/log_poller_internal_test.go | 83 +- core/chains/evm/logpoller/log_poller_test.go | 1145 ++++++++++++----- core/chains/evm/logpoller/mocks/log_poller.go | 33 - core/chains/evm/logpoller/models.go | 7 +- core/chains/evm/logpoller/observability.go | 10 +- .../evm/logpoller/observability_test.go | 91 +- core/chains/evm/logpoller/orm.go | 70 +- core/chains/evm/logpoller/orm_test.go | 204 +-- core/chains/evm/logpoller/query.go | 33 +- core/chains/evm/txmgr/txmgr_test.go | 2 +- .../evm21/logprovider/integration_test.go | 2 +- core/services/pg/q.go | 1 + core/services/relay/evm/config_poller_test.go | 2 +- .../relay/evm/functions/config_poller_test.go | 2 +- .../relay/evm/mercury/helpers_test.go | 2 +- core/store/migrate/migrate_test.go | 142 ++ .../0201_add_finalized_block_number.sql | 11 + ...lt_values_for_last_finalized_block.sql.sql | 33 + 24 files changed, 1440 insertions(+), 714 deletions(-) create mode 100644 core/store/migrate/migrations/0201_add_finalized_block_number.sql create mode 100644 core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql diff --git a/core/chains/evm/chain.go b/core/chains/evm/chain.go index 58c793cc646..2a8f8a2e840 100644 --- a/core/chains/evm/chain.go +++ b/core/chains/evm/chain.go @@ -248,7 +248,16 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod if opts.GenLogPoller != nil { logPoller = opts.GenLogPoller(chainID) } else { - logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, db, l, cfg.Database()), client, l, cfg.EVM().LogPollInterval(), int64(cfg.EVM().FinalityDepth()), int64(cfg.EVM().LogBackfillBatchSize()), int64(cfg.EVM().RPCDefaultBatchSize()), int64(cfg.EVM().LogKeepBlocksDepth())) + logPoller = logpoller.NewLogPoller( + logpoller.NewObservedORM(chainID, db, l, cfg.Database()), + client, + l, + cfg.EVM().LogPollInterval(), + cfg.EVM().FinalityTagEnabled(), + int64(cfg.EVM().FinalityDepth()), + int64(cfg.EVM().LogBackfillBatchSize()), + int64(cfg.EVM().RPCDefaultBatchSize()), + int64(cfg.EVM().LogKeepBlocksDepth())) } } diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go index dd79c549bfe..abab2046620 100644 --- a/core/chains/evm/client/simulated_backend_client.go +++ b/core/chains/evm/client/simulated_backend_client.go @@ -545,7 +545,7 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B if len(elem.Args) != 2 { return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getBlockByNumber", len(elem.Args)) } - blockNum, is := elem.Args[0].(string) + blockNumOrTag, is := elem.Args[0].(string) if !is { return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getBlockByNumber, got: %T", elem.Args[0]) } @@ -553,31 +553,24 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B if !is { return fmt.Errorf("SimulatedBackendClient expected second arg to be a boolean for eth_getBlockByNumber, got: %T", elem.Args[1]) } - n, ok := new(big.Int).SetString(blockNum, 0) - if !ok { - return fmt.Errorf("error while converting block number string: %s to big.Int ", blockNum) - } - header, err := c.b.HeaderByNumber(ctx, n) + header, err := c.fetchHeader(ctx, blockNumOrTag) if err != nil { return err } - switch v := elem.Result.(type) { + switch res := elem.Result.(type) { case *evmtypes.Head: - b[i].Result = &evmtypes.Head{ - Number: header.Number.Int64(), - Hash: header.Hash(), - Timestamp: time.Unix(int64(header.Time), 0).UTC(), - } + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() case *evmtypes.Block: - b[i].Result = &evmtypes.Block{ - Number: header.Number.Int64(), - Hash: header.Hash(), - Timestamp: time.Unix(int64(header.Time), 0), - } + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() default: - return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", v) + return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", elem.Result) } - b[i].Error = err case "eth_call": if len(elem.Args) != 2 { @@ -718,3 +711,20 @@ func toCallMsg(params map[string]interface{}) ethereum.CallMsg { func (c *SimulatedBackendClient) IsL2() bool { return false } + +func (c *SimulatedBackendClient) fetchHeader(ctx context.Context, blockNumOrTag string) (*types.Header, error) { + switch blockNumOrTag { + case rpc.SafeBlockNumber.String(): + return c.b.Blockchain().CurrentSafeBlock(), nil + case rpc.LatestBlockNumber.String(): + return c.b.Blockchain().CurrentHeader(), nil + case rpc.FinalizedBlockNumber.String(): + return c.b.Blockchain().CurrentFinalBlock(), nil + default: + blockNum, ok := new(big.Int).SetString(blockNumOrTag, 0) + if !ok { + return nil, fmt.Errorf("error while converting block number string: %s to big.Int ", blockNumOrTag) + } + return c.b.HeaderByNumber(ctx, blockNum) + } +} diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index bcbe43ef9b6..0117c2f2c07 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -58,7 +58,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { t.Log(authorized) evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.TestLogger(t), cfg.Database()) @@ -111,7 +111,7 @@ func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { ec.Commit() evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.TestLogger(t), cfg.Database()) diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index 06f0b9200a3..4bcf1c50863 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -104,7 +104,3 @@ func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.A func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { return 0, ErrDisabled } - -func (d disabled) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return nil, ErrDisabled -} diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index 447a4673588..86208f2c5b4 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -45,7 +45,7 @@ type TestHarness struct { EthDB ethdb.Database } -func SetupTH(t testing.TB, finalityDepth, backfillBatchSize, rpcBatchSize int64) TestHarness { +func SetupTH(t testing.TB, useFinalityTag bool, finalityDepth, backfillBatchSize, rpcBatchSize int64) TestHarness { lggr := logger.TestLogger(t) chainID := testutils.NewRandomEVMChainID() chainID2 := testutils.NewRandomEVMChainID() @@ -63,7 +63,10 @@ func SetupTH(t testing.TB, finalityDepth, backfillBatchSize, rpcBatchSize int64) // Poll period doesn't matter, we intend to call poll and save logs directly in the test. // Set it to some insanely high value to not interfere with any tests. esc := client.NewSimulatedBackendClient(t, ec, chainID) - lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, finalityDepth, backfillBatchSize, rpcBatchSize, 1000) + // Mark genesis block as finalized to avoid any nulls in the tests + head := esc.Backend().Blockchain().CurrentHeader() + esc.Backend().Blockchain().SetFinalized(head) + lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, useFinalityTag, finalityDepth, backfillBatchSize, rpcBatchSize, 1000) emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) emitterAddress2, _, emitter2, err := log_emitter.DeployLogEmitter(owner, ec) diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 9ce296e1a0b..95039f59cb0 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -58,11 +58,15 @@ type LogPoller interface { IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) } type Confirmations int +const ( + Finalized = Confirmations(-1) + Unconfirmed = Confirmations(0) +) + type LogPollerTest interface { LogPoller PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) @@ -88,15 +92,16 @@ var ( type logPoller struct { utils.StartStopOnce - ec Client - orm ORM - lggr logger.Logger - pollPeriod time.Duration // poll period set by block production rate - finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized - keepBlocksDepth int64 // the number of blocks behind the head for which we keep the blocks. Must be greater than finality depth + 1. - backfillBatchSize int64 // batch size to use when backfilling finalized logs - rpcBatchSize int64 // batch size to use for fallback RPC calls made in GetBlocks - backupPollerNextBlock int64 + ec Client + orm ORM + lggr logger.Logger + pollPeriod time.Duration // poll period set by block production rate + useFinalityTag bool // indicates whether logPoller should use chain's finality or pick a fixed depth for finality + finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized. If `useFinalityTag` is set to true, this value is ignored, because finalityDepth is fetched from chain + keepFinalizedBlocksDepth int64 // the number of blocks behind the last finalized block we keep in database + backfillBatchSize int64 // batch size to use when backfilling finalized logs + rpcBatchSize int64 // batch size to use for fallback RPC calls made in GetBlocks + backupPollerNextBlock int64 filterMu sync.RWMutex filters map[string]Filter @@ -122,21 +127,22 @@ type logPoller struct { // How fast that can be done depends largely on network speed and DB, but even for the fastest // support chain, polygon, which has 2s block times, we need RPCs roughly with <= 500ms latency func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Duration, - finalityDepth int64, backfillBatchSize int64, rpcBatchSize int64, keepBlocksDepth int64) *logPoller { + useFinalityTag bool, finalityDepth int64, backfillBatchSize int64, rpcBatchSize int64, keepFinalizedBlocksDepth int64) *logPoller { return &logPoller{ - ec: ec, - orm: orm, - lggr: lggr.Named("LogPoller"), - replayStart: make(chan int64), - replayComplete: make(chan error), - pollPeriod: pollPeriod, - finalityDepth: finalityDepth, - backfillBatchSize: backfillBatchSize, - rpcBatchSize: rpcBatchSize, - keepBlocksDepth: keepBlocksDepth, - filters: make(map[string]Filter), - filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet. + ec: ec, + orm: orm, + lggr: lggr.Named("LogPoller"), + replayStart: make(chan int64), + replayComplete: make(chan error), + pollPeriod: pollPeriod, + finalityDepth: finalityDepth, + useFinalityTag: useFinalityTag, + backfillBatchSize: backfillBatchSize, + rpcBatchSize: rpcBatchSize, + keepFinalizedBlocksDepth: keepFinalizedBlocksDepth, + filters: make(map[string]Filter), + filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet. } } @@ -371,11 +377,6 @@ func (lp *logPoller) ReplayAsync(fromBlock int64) { } func (lp *logPoller) Start(parentCtx context.Context) error { - if lp.keepBlocksDepth < (lp.finalityDepth + 1) { - // We add 1 since for reorg detection on the first unfinalized block - // we need to keep 1 finalized block. - return errors.Errorf("keepBlocksDepth %d must be greater than finality %d + 1", lp.keepBlocksDepth, lp.finalityDepth) - } return lp.StartOnce("LogPoller", func() error { ctx, cancel := context.WithCancel(parentCtx) lp.ctx = ctx @@ -497,21 +498,20 @@ func (lp *logPoller) run() { } // Otherwise this is the first poll _ever_ on a new chain. // Only safe thing to do is to start at the first finalized block. - latest, err := lp.ec.HeadByNumber(lp.ctx, nil) + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(lp.ctx) if err != nil { lp.lggr.Warnw("Unable to get latest for first poll", "err", err) continue } - latestNum := latest.Number // Do not support polling chains which don't even have finality depth worth of blocks. // Could conceivably support this but not worth the effort. - // Need finality depth + 1, no block 0. - if latestNum <= lp.finalityDepth { - lp.lggr.Warnw("Insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latestNum, "finality", lp.finalityDepth) + // Need last finalized block number to be higher than 0 + if latestFinalizedBlockNumber <= 0 { + lp.lggr.Warnw("Insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latestBlock.Number) continue } // Starting at the first finalized block. We do not backfill the first finalized block. - start = latestNum - lp.finalityDepth + start = latestFinalizedBlockNumber } else { start = lastProcessed.BlockNumber + 1 } @@ -558,22 +558,19 @@ func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context, backupPollerBloc } return } - - // If this is our first run, start max(finalityDepth+1, backupPollerBlockDelay) blocks behind the last processed + // If this is our first run, start from block min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) + backupStartBlock := mathutil.Min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) // (or at block 0 if whole blockchain is too short) - lp.backupPollerNextBlock = lastProcessed.BlockNumber - mathutil.Max(lp.finalityDepth+1, backupPollerBlockDelay) - if lp.backupPollerNextBlock < 0 { - lp.backupPollerNextBlock = 0 - } + lp.backupPollerNextBlock = mathutil.Max(backupStartBlock, 0) } - latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + _, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) if err != nil { lp.lggr.Warnw("Backup logpoller failed to get latest block", "err", err) return } - lastSafeBackfillBlock := latestBlock.Number - lp.finalityDepth - 1 + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 if lastSafeBackfillBlock >= lp.backupPollerNextBlock { lp.lggr.Infow("Backup poller backfilling logs", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock) if err = lp.backfill(ctx, lp.backupPollerNextBlock, lastSafeBackfillBlock); err != nil { @@ -735,7 +732,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // There can be another reorg while we're finding the LCA. // That is ok, since we'll detect it on the next iteration. // Since we go currentBlock by currentBlock for unfinalized logs, the mismatch starts at currentBlockNumber - 1. - blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock) + blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock, expectedParent.FinalizedBlockNumber) if err2 != nil { lp.lggr.Warnw("Unable to find LCA after reorg, retrying", "err", err2) return nil, errors.New("Unable to find LCA after reorg, retrying") @@ -780,7 +777,9 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // conditions this would be equal to lastProcessed.BlockNumber + 1. func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) { lp.lggr.Debugw("Polling for logs", "currentBlockNumber", currentBlockNumber) - latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + // Intentionally not using logPoller.finalityDepth directly but the latestFinalizedBlockNumber returned from lp.latestBlocks() + // latestBlocks knows how to pick a proper latestFinalizedBlockNumber based on the logPoller's configuration + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) if err != nil { lp.lggr.Warnw("Unable to get latestBlockNumber block", "err", err, "currentBlockNumber", currentBlockNumber) return @@ -813,7 +812,7 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int // E.g. 1<-2<-3(currentBlockNumber)<-4<-5<-6<-7(latestBlockNumber), finality is 2. So 3,4 can be batched. // Although 5 is finalized, we still need to save it to the db for reorg detection if 6 is a reorg. // start = currentBlockNumber = 3, end = latestBlockNumber - finality - 1 = 7-2-1 = 4 (inclusive range). - lastSafeBackfillBlock := latestBlockNumber - lp.finalityDepth - 1 + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 if lastSafeBackfillBlock >= currentBlockNumber { lp.lggr.Infow("Backfilling logs", "start", currentBlockNumber, "end", lastSafeBackfillBlock) if err = lp.backfill(ctx, currentBlockNumber, lastSafeBackfillBlock); err != nil { @@ -847,7 +846,7 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int } lp.lggr.Debugw("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber, "blockHash", currentBlock.Hash, "timestamp", currentBlock.Timestamp.Unix()) err = lp.orm.Q().WithOpts(pg.WithParentCtx(ctx)).Transaction(func(tx pg.Queryer) error { - if err2 := lp.orm.InsertBlock(h, currentBlockNumber, currentBlock.Timestamp, pg.WithQueryer(tx)); err2 != nil { + if err2 := lp.orm.InsertBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber, pg.WithQueryer(tx)); err2 != nil { return err2 } if len(logs) == 0 { @@ -881,9 +880,37 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int } } +// Returns information about latestBlock, latestFinalizedBlockNumber +// If finality tag is not enabled, latestFinalizedBlockNumber is calculated as latestBlockNumber - lp.finalityDepth (configured param) +// Otherwise, we return last finalized block number returned from chain +func (lp *logPoller) latestBlocks(ctx context.Context) (*evmtypes.Head, int64, error) { + // If finality is not enabled, we can only fetch the latest block + if !lp.useFinalityTag { + // Example: + // finalityDepth = 2 + // Blocks: 1->2->3->4->5(latestBlock) + // latestFinalizedBlockNumber would be 3 + latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + if err != nil { + return nil, 0, err + } + // If chain has fewer blocks than finalityDepth, return 0 + return latestBlock, mathutil.Max(latestBlock.Number-lp.finalityDepth, 0), nil + } + + // If finality is enabled, we need to get the latest and finalized blocks. + blocks, err := lp.batchFetchBlocks(ctx, []string{rpc.LatestBlockNumber.String(), rpc.FinalizedBlockNumber.String()}, 2) + if err != nil { + return nil, 0, err + } + latest := blocks[0] + finalized := blocks[1] + return latest, finalized.Number, nil +} + // Find the first place where our chain and their chain have the same block, // that block number is the LCA. Return the block after that, where we want to resume polling. -func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.Head) (*evmtypes.Head, error) { +func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.Head, latestFinalizedBlockNumber int64) (*evmtypes.Head, error) { // Current is where the mismatch starts. // Check its parent to see if its the same as ours saved. parent, err := lp.ec.HeadByHash(ctx, current.ParentHash) @@ -891,12 +918,11 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He return nil, err } blockAfterLCA := *current - reorgStart := parent.Number - // We expect reorgs up to the block after (current - finalityDepth), - // since the block at (current - finalityDepth) is finalized. + // We expect reorgs up to the block after latestFinalizedBlock // We loop via parent instead of current so current always holds the LCA+1. // If the parent block number becomes < the first finalized block our reorg is too deep. - for parent.Number >= (reorgStart - lp.finalityDepth) { + // This can happen only if finalityTag is not enabled and fixed finalityDepth is provided via config. + for parent.Number >= latestFinalizedBlockNumber { ourParentBlockHash, err := lp.orm.SelectBlockByNumber(parent.Number, pg.WithParentCtx(ctx)) if err != nil { return nil, err @@ -912,28 +938,25 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He return nil, err } } - lp.lggr.Criticalw("Reorg greater than finality depth detected", "max reorg depth", lp.finalityDepth-1) + lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) rerr := errors.New("Reorg greater than finality depth") lp.SvcErrBuffer.Append(rerr) return nil, rerr } -// pruneOldBlocks removes blocks that are > lp.ancientBlockDepth behind the head. +// pruneOldBlocks removes blocks that are > lp.ancientBlockDepth behind the latest finalized block. func (lp *logPoller) pruneOldBlocks(ctx context.Context) error { - latest, err := lp.ec.HeadByNumber(ctx, nil) + _, latestFinalizedBlock, err := lp.latestBlocks(ctx) if err != nil { return err } - if latest == nil { - return errors.Errorf("received nil block from RPC") - } - if latest.Number <= lp.keepBlocksDepth { + if latestFinalizedBlock <= lp.keepFinalizedBlocksDepth { // No-op, keep all blocks return nil } - // 1-2-3-4-5(latest), keepBlocksDepth=3 + // 1-2-3-4-5(finalized)-6-7(latest), keepFinalizedBlocksDepth=3 // Remove <= 2 - return lp.orm.DeleteBlocksBefore(latest.Number-lp.keepBlocksDepth, pg.WithParentCtx(ctx)) + return lp.orm.DeleteBlocksBefore(latestFinalizedBlock-lp.keepFinalizedBlocksDepth, pg.WithParentCtx(ctx)) } // Logs returns logs matching topics and address (exactly) in the given block range, @@ -984,12 +1007,6 @@ func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c return lp.orm.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) } -// LogsUntilBlockHashDataWordGreaterThan note index is 0 based. -// If the blockhash is not found (i.e. a stale fork) it will error. -func (lp *logPoller) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsUntilBlockHashDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, untilBlockHash, qopts...) -} - func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { return lp.orm.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) } @@ -1043,7 +1060,7 @@ func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts qopts = append(qopts, pg.WithParentCtx(ctx)) minRequestedBlock := int64(mathutil.Min(numbers[0], numbers[1:]...)) maxRequestedBlock := int64(mathutil.Max(numbers[0], numbers[1:]...)) - lpBlocks, err := lp.orm.GetBlocksRange(minRequestedBlock, maxRequestedBlock, qopts...) + lpBlocks, err := lp.orm.GetBlocksRange(int64(minRequestedBlock), int64(maxRequestedBlock), qopts...) if err != nil { lp.lggr.Warnw("Error while retrieving blocks from log pollers blocks table. Falling back to RPC...", "requestedBlocks", numbers, "err", err) } else { @@ -1086,17 +1103,10 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( blocksRequested map[uint64]struct{}, blocksFound map[uint64]LogPollerBlock, ) (map[uint64]LogPollerBlock, error) { - var reqs []rpc.BatchElem - var remainingBlocks []uint64 + var remainingBlocks []string for num := range blocksRequested { if _, ok := blocksFound[num]; !ok { - req := rpc.BatchElem{ - Method: "eth_getBlockByNumber", - Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(num)), false}, - Result: &evmtypes.Head{}, - } - reqs = append(reqs, req) - remainingBlocks = append(remainingBlocks, num) + remainingBlocks = append(remainingBlocks, hexutil.EncodeBig(new(big.Int).SetUint64(num))) } } @@ -1105,8 +1115,37 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( "remainingBlocks", remainingBlocks) } - for i := 0; i < len(reqs); i += int(lp.rpcBatchSize) { - j := i + int(lp.rpcBatchSize) + evmBlocks, err := lp.batchFetchBlocks(ctx, remainingBlocks, lp.rpcBatchSize) + if err != nil { + return nil, err + } + + logPollerBlocks := make(map[uint64]LogPollerBlock) + for _, head := range evmBlocks { + logPollerBlocks[uint64(head.Number)] = LogPollerBlock{ + EvmChainId: head.EVMChainID, + BlockHash: head.Hash, + BlockNumber: head.Number, + BlockTimestamp: head.Timestamp, + CreatedAt: head.Timestamp, + } + } + return logPollerBlocks, nil +} + +func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []string, batchSize int64) ([]*evmtypes.Head, error) { + reqs := make([]rpc.BatchElem, 0, len(blocksRequested)) + for _, num := range blocksRequested { + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{num, false}, + Result: &evmtypes.Head{}, + } + reqs = append(reqs, req) + } + + for i := 0; i < len(reqs); i += int(batchSize) { + j := i + int(batchSize) if j > len(reqs) { j = len(reqs) } @@ -1117,7 +1156,7 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( } } - var blocksFoundFromRPC = make(map[uint64]LogPollerBlock) + var blocks = make([]*evmtypes.Head, 0, len(reqs)) for _, r := range reqs { if r.Error != nil { return nil, r.Error @@ -1136,16 +1175,10 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( if block.Number < 0 { return nil, errors.Errorf("expected block number to be >= to 0, got %d", block.Number) } - blocksFoundFromRPC[uint64(block.Number)] = LogPollerBlock{ - EvmChainId: block.EVMChainID, - BlockHash: block.Hash, - BlockNumber: block.Number, - BlockTimestamp: block.Timestamp, - CreatedAt: block.Timestamp, - } + blocks = append(blocks, block) } - return blocksFoundFromRPC, nil + return blocks, nil } // IndexedLogsWithSigsExcluding returns the set difference(A-B) of logs with signature sigA and sigB, matching is done on the topics index diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 271d8c2a582..b9474158a6b 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -2,7 +2,9 @@ package logpoller import ( "context" + "fmt" "math/big" + "reflect" "strings" "sync" "testing" @@ -11,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -24,6 +27,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/utils" ) var ( @@ -57,7 +61,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) // Set up a test chain with a log emitting contract deployed. - lp := NewLogPoller(orm, nil, lggr, time.Hour, 1, 1, 2, 1000) + lp := NewLogPoller(orm, nil, lggr, time.Hour, false, 1, 1, 2, 1000) // We expect a zero Filter if nothing registered yet. f := lp.Filter(nil, nil, nil) @@ -211,7 +215,7 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { ctx := testutils.Context(t) - lp := NewLogPoller(orm, ec, lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := NewLogPoller(orm, ec, lggr, 1*time.Hour, false, 2, 3, 2, 1000) lp.BackupPollAndSaveLogs(ctx, 100) assert.Equal(t, int64(0), lp.backupPollerNextBlock) assert.Equal(t, 1, observedLogs.FilterMessageSnippet("ran before first successful log poller run").Len()) @@ -252,7 +256,7 @@ func TestLogPoller_Replay(t *testing.T) { ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil).Once() ec.On("ConfiguredChainID").Return(chainID, nil) - lp := NewLogPoller(orm, ec, lggr, time.Hour, 3, 3, 3, 20) + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, 3, 3, 3, 20) // process 1 log in block 3 lp.PollAndSaveLogs(tctx, 4) @@ -456,9 +460,80 @@ func TestLogPoller_Replay(t *testing.T) { }) } +func Test_latestBlockAndFinalityDepth(t *testing.T) { + tctx := testutils.Context(t) + lggr, _ := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + t.Run("pick latest block from chain and use finality from config with finality disabled", func(t *testing.T) { + head := evmtypes.Head{Number: 4} + finalityDepth := int64(3) + ec := evmclimocks.NewClient(t) + ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, finalityDepth, 3, 3, 20) + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(tctx) + require.NoError(t, err) + require.Equal(t, latestBlock.Number, head.Number) + require.Equal(t, finalityDepth, latestBlock.Number-lastFinalizedBlockNumber) + }) + + t.Run("finality tags in use", func(t *testing.T) { + t.Run("client returns data properly", func(t *testing.T) { + expectedLatestBlockNumber := int64(20) + expectedLastFinalizedBlockNumber := int64(12) + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + reflect.DeepEqual(b[0].Args, []interface{}{"latest", false}) && + reflect.DeepEqual(b[1].Args, []interface{}{"finalized", false}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLatestBlockNumber, Hash: utils.RandomBytes32()} + // Finalized block details + *(elems[1].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLastFinalizedBlockNumber, Hash: utils.RandomBytes32()} + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(tctx) + require.NoError(t, err) + require.Equal(t, expectedLatestBlockNumber, latestBlock.Number) + require.Equal(t, expectedLastFinalizedBlockNumber, lastFinalizedBlockNumber) + }) + + t.Run("client returns error for at least one of the calls", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: 10} + // Finalized block details + elems[1].Error = fmt.Errorf("some error") + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(tctx) + require.Error(t, err) + }) + + t.Run("BatchCall returns an error", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(fmt.Errorf("some error")) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(tctx) + require.Error(t, err) + }) + }) +} + func benchmarkFilter(b *testing.B, nFilters, nAddresses, nEvents int) { lggr := logger.TestLogger(b) - lp := NewLogPoller(nil, nil, lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := NewLogPoller(nil, nil, lggr, 1*time.Hour, false, 2, 3, 2, 1000) for i := 0; i < nFilters; i++ { var addresses []common.Address var events []common.Hash diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index f6be57aa6e7..f6fc333ff73 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -76,7 +76,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo } require.NoError(t, o.InsertLogs(logs)) - require.NoError(t, o.InsertBlock(utils.RandomAddress().Hash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour))) + require.NoError(t, o.InsertBlock(utils.RandomAddress().Hash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) } return event1, address1, address2 @@ -120,7 +120,7 @@ func TestPopulateLoadedDB(t *testing.T) { }() // Confirm all the logs. - require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now())) + require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now(), 0)) func() { defer logRuntime(t, time.Now()) lgs, err1 := o.SelectLogsDataWordRange(address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) @@ -145,7 +145,7 @@ func TestPopulateLoadedDB(t *testing.T) { } func TestLogPoller_Integration(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) th.Client.Commit() // Block 2. Ensure we have finality number of blocks err := th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0}) @@ -221,149 +221,344 @@ func TestLogPoller_Integration(t *testing.T) { // for the same block hash. We should be able to handle this without missing any logs, as // long as the logs returned for finalized blocks are consistent. func Test_BackupLogPoller(t *testing.T) { - th := SetupTH(t, 2, 3, 2) - // later, we will need at least 32 blocks filled with logs for cache invalidation - for i := int64(0); i < 32; i++ { - // to invalidate geth's internal read-cache, a matching log must be found in the bloom Filter - // for each of the 32 blocks - tx, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(i + 7)}) - require.NoError(t, err) - require.NotNil(t, tx) - th.Client.Commit() + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 2, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, } - ctx := testutils.Context(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) + // later, we will need at least 32 blocks filled with logs for cache invalidation + for i := int64(0); i < 32; i++ { + // to invalidate geth's internal read-cache, a matching log must be found in the bloom Filter + // for each of the 32 blocks + tx, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(i + 7)}) + require.NoError(t, err) + require.NotNil(t, tx) + th.Client.Commit() + } - filter1 := logpoller.Filter{"filter1", []common.Hash{ - EmitterABI.Events["Log1"].ID, - EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1}, - 0} - err := th.LogPoller.RegisterFilter(filter1) - require.NoError(t, err) + ctx := testutils.Context(t) - filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) - require.NoError(t, err) - require.Equal(t, 1, len(filters)) - require.Equal(t, filter1, filters["filter1"]) + filter1 := logpoller.Filter{"filter1", []common.Hash{ + EmitterABI.Events["Log1"].ID, + EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1}, + 0} + err := th.LogPoller.RegisterFilter(filter1) + require.NoError(t, err) - err = th.LogPoller.RegisterFilter( - logpoller.Filter{"filter2", - []common.Hash{EmitterABI.Events["Log1"].ID}, - []common.Address{th.EmitterAddress2}, 0}) - require.NoError(t, err) + filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + require.Equal(t, 1, len(filters)) + require.Equal(t, filter1, filters["filter1"]) - defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) - }() - defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) - }() + err = th.LogPoller.RegisterFilter( + logpoller.Filter{"filter2", + []common.Hash{EmitterABI.Events["Log1"].ID}, + []common.Address{th.EmitterAddress2}, 0}) + require.NoError(t, err) - // generate some tx's with logs - tx1, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) - require.NoError(t, err) - require.NotNil(t, tx1) + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) + }() + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) + }() - tx2, err := th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) - require.NoError(t, err) - require.NotNil(t, tx2) + // generate some tx's with logs + tx1, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + require.NotNil(t, tx1) - tx3, err := th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) - require.NoError(t, err) - require.NotNil(t, tx3) + tx2, err := th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + require.NotNil(t, tx2) + + tx3, err := th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + require.NotNil(t, tx3) + + th.Client.Commit() // commit block 34 with 3 tx's included + + h := th.Client.Blockchain().CurrentHeader() // get latest header + require.Equal(t, uint64(34), h.Number.Uint64()) + + // save these 3 receipts for later + receipts := rawdb.ReadReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), uint64(time.Now().Unix()), params.AllEthashProtocolChanges) + require.NotZero(t, receipts.Len()) + + // Simulate a situation where the rpc server has a block, but no logs available for it yet + // this can't happen with geth itself, but can with other clients. + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), types.Receipts{}) // wipes out all logs for block 34 + + body := rawdb.ReadBody(th.EthDB, h.Hash(), h.Number.Uint64()) + require.Equal(t, 3, len(body.Transactions)) + txs := body.Transactions // save transactions for later + body.Transactions = types.Transactions{} // number of tx's must match # of logs for GetLogs() to succeed + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + currentBlock := th.PollAndSaveLogs(ctx, 1) + assert.Equal(t, int64(35), currentBlock) + + // simulate logs becoming available + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), receipts) + require.True(t, rawdb.HasReceipts(th.EthDB, h.Hash(), h.Number.Uint64())) + body.Transactions = txs + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + // flush out cached block 34 by reading logs from first 32 blocks + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(2)), + ToBlock: big.NewInt(int64(33)), + Addresses: []common.Address{th.EmitterAddress1}, + Topics: [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, + } + fLogs, err := th.Client.FilterLogs(ctx, query) + require.NoError(t, err) + require.Equal(t, 32, len(fLogs)) - th.Client.Commit() // commit block 34 with 3 tx's included + // logs shouldn't show up yet + logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + + th.Client.Commit() + th.Client.Commit() + markBlockAsFinalized(t, th, 34) - h := th.Client.Blockchain().CurrentHeader() // get latest header - require.Equal(t, uint64(34), h.Number.Uint64()) + // Run ordinary poller + backup poller at least once + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) - // save these 3 receipts for later - receipts := rawdb.ReadReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), uint64(time.Now().Unix()), params.AllEthashProtocolChanges) - require.NotZero(t, receipts.Len()) + require.Equal(t, int64(37), currentBlock+1) - // Simulate a situation where the rpc server has a block, but no logs available for it yet - // this can't happen with geth itself, but can with other clients. - rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), types.Receipts{}) // wipes out all logs for block 34 + // logs still shouldn't show up, because we don't want to backfill the last finalized log + // to help with reorg detection + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + th.Client.Commit() + markBlockAsFinalized(t, th, 35) - body := rawdb.ReadBody(th.EthDB, h.Hash(), h.Number.Uint64()) - require.Equal(t, 3, len(body.Transactions)) - txs := body.Transactions // save transactions for later - body.Transactions = types.Transactions{} // number of tx's must match # of logs for GetLogs() to succeed - rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + // Run ordinary poller + backup poller at least once more + th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + require.Equal(t, int64(38), currentBlock+1) + + // all 3 logs in block 34 should show up now, thanks to backup logger + logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 5, len(logs)) + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + }) + } +} + +func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { + emittedLogs := 30 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + backupLogPollerDelay := int64(0) + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + // 0 (finalized) -> 1 -> 2 -> ... currentBlock := th.PollAndSaveLogs(ctx, 1) - assert.Equal(t, int64(35), currentBlock) + // currentBlock should be blockChain start + number of emitted logs + 1 + assert.Equal(t, int64(emittedLogs)+header.Number.Int64()+1, currentBlock) - // simulate logs becoming available - rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), receipts) - require.True(t, rawdb.HasReceipts(th.EthDB, h.Hash(), h.Number.Uint64())) - body.Transactions = txs - rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + // LogPoller not working, but chain in the meantime has progressed + // 0 -> 1 -> 2 -> ... -> currentBlock - 10 (finalized) -> .. -> currentBlock + markBlockAsFinalized(t, th, currentBlock-10) - // flush out cached block 34 by reading logs from first 32 blocks - query := ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(2)), - ToBlock: big.NewInt(int64(33)), + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, - Topics: [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, - } - fLogs, err := th.Client.FilterLogs(ctx, query) + }) require.NoError(t, err) - require.Equal(t, 32, len(fLogs)) - // logs shouldn't show up yet - logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // LogPoller should backfill starting from the last finalized block stored in db (genesis block) + // till the latest finalized block reported by chain. + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + require.NoError(t, err) + + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) require.NoError(t, err) - assert.Equal(t, 0, len(logs)) + require.Len(t, logs, emittedLogs-10) + // Progressing even more, move blockchain forward by 1 block and mark it as finalized th.Client.Commit() + markBlockAsFinalized(t, th, currentBlock) + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + + // All emitted logs should be backfilled + logs, err = th.LogPoller.Logs( + 0, + currentBlock+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { + emittedLogs := 30 + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Emit one more empty block th.Client.Commit() - // Run ordinary poller + backup poller at least once - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) - th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + // Mark everything as finalized + markBlockAsFinalized(t, th, header.Number.Int64()) - require.Equal(t, int64(37), currentBlock+1) + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... + th.PollAndSaveLogs(ctx, 1) - // logs still shouldn't show up, because we don't want to backfill the last finalized log - // to help with reorg detection - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // Register filter + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) require.NoError(t, err) - assert.Equal(t, 0, len(logs)) - th.Client.Commit() + // Should fallback to the backupPollerBlockDelay when finalization was very high in a previous PollAndSave + th.LogPoller.BackupPollAndSaveLogs(ctx, int64(emittedLogs)) + require.NoError(t, err) - // Run ordinary poller + backup poller at least once more - th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + // All emitted logs should be backfilled + logs, err := th.LogPoller.Logs( + 0, + header.Number.Int64()+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) { + logsBatch := 10 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) - require.Equal(t, int64(38), currentBlock+1) + //header, err := th.Client.HeaderByNumber(ctx, nil) + //require.NoError(t, err) - // all 3 logs in block 34 should show up now, thanks to backup logger - logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // Emit some logs in blocks + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... -> firstBatchBlock + firstBatchBlock := th.PollAndSaveLogs(ctx, 1) + // Mark current tip of the chain as finalized (after emitting 10 logs) + markBlockAsFinalized(t, th, firstBatchBlock) + + // Emit 2nd batch of block + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(100 + i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // 1 -> 2 -> ... -> firstBatchBlock (finalized) -> .. -> firstBatchBlock + emitted logs + secondBatchBlock := th.PollAndSaveLogs(ctx, firstBatchBlock) + // Mark current tip of the block as finalized (after emitting 20 logs) + markBlockAsFinalized(t, th, secondBatchBlock) + + // Register filter + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) require.NoError(t, err) - assert.Equal(t, 5, len(logs)) - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + + // Should pick logs starting from one block behind the latest finalized block + th.LogPoller.BackupPollAndSaveLogs(ctx, 0) require.NoError(t, err) - assert.Equal(t, 1, len(logs)) - logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, - pg.WithParentCtx(testutils.Context(t))) + + // Only the 2nd batch + 1 log from a previous batch should be backfilled, because we perform backfill starting + // from one block behind the latest finalized block + logs, err := th.LogPoller.Logs( + 0, + secondBatchBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) require.NoError(t, err) - assert.Equal(t, 1, len(logs)) + require.Len(t, logs, logsBatch+1) + require.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), logs[0].Data) } func TestLogPoller_BlockTimestamps(t *testing.T) { t.Parallel() ctx := testutils.Context(t) - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) addresses := []common.Address{th.EmitterAddress1, th.EmitterAddress2} topics := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} @@ -468,7 +663,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { }, 10e6) _, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) - lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, int64(finalityDepth), 3, 2, 1000) + lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, false, int64(finalityDepth), 3, 2, 1000) for i := 0; i < finalityDepth; i++ { // Have enough blocks that we could reorg the full finalityDepth-1. ec.Commit() } @@ -535,223 +730,335 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { func TestLogPoller_PollAndSaveLogs(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) - // Set up a log poller listening for log emitter logs. - err := th.LogPoller.RegisterFilter(logpoller.Filter{ - "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, - }) - require.NoError(t, err) + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } - b, err := th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(1), b.NumberU64()) - require.Equal(t, uint64(10), b.Time()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) - // Test scenario: single block in chain, no logs. - // Chain genesis <- 1 - // DB: empty - newStart := th.PollAndSaveLogs(testutils.Context(t), 1) - assert.Equal(t, int64(2), newStart) + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, + }) + require.NoError(t, err) - // We expect to have saved block 1. - lpb, err := th.ORM.SelectBlockByNumber(1) - require.NoError(t, err) - assert.Equal(t, lpb.BlockHash, b.Hash()) - assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) - assert.Equal(t, int64(1), int64(b.NumberU64())) - assert.Equal(t, uint64(10), b.Time()) + b, err := th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(1), b.NumberU64()) + require.Equal(t, uint64(10), b.Time()) - // No logs. - lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) - require.NoError(t, err) - assert.Equal(t, 0, len(lgs)) - th.assertHaveCanonical(t, 1, 1) + // Test scenario: single block in chain, no logs. + // Chain genesis <- 1 + // DB: empty + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(2), newStart) - // Polling again should be a noop, since we are at the latest. - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(2), newStart) - latest, err := th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(1), latest.BlockNumber) - th.assertHaveCanonical(t, 1, 1) + // We expect to have saved block 1. + lpb, err := th.ORM.SelectBlockByNumber(1) + require.NoError(t, err) + assert.Equal(t, lpb.BlockHash, b.Hash()) + assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) + assert.Equal(t, int64(1), int64(b.NumberU64())) + assert.Equal(t, uint64(10), b.Time()) - // Test scenario: one log 2 block chain. - // Chain gen <- 1 <- 2 (L1) - // DB: 1 - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) - require.NoError(t, err) - th.Client.Commit() + // No logs. + lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + th.assertHaveCanonical(t, 1, 1) - // Polling should get us the L1 log. - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(3), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(2), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - require.Equal(t, 1, len(lgs)) - assert.Equal(t, th.EmitterAddress1, lgs[0].Address) - assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) - assert.Equal(t, latest.BlockTimestamp, lgs[0].BlockTimestamp) - assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), - lgs[0].Data) - - // Test scenario: single block reorg with log. - // Chain gen <- 1 <- 2 (L1_1) - // \ 2'(L1_2) <- 3 - // DB: 1, 2 - // - Detect a reorg, - // - Update the block 2's hash - // - Save L1' - // - L1_1 deleted - reorgedOutBlock, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(2)) - require.NoError(t, err) - lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) - require.NoError(t, err) - require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) - require.NoError(t, err) - // Create 2' - th.Client.Commit() - // Create 3 (we need a new block for us to do any polling and detect the reorg). - th.Client.Commit() + // Polling again should be a noop, since we are at the latest. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(2), newStart) + latest, err := th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(1), latest.BlockNumber) + th.assertHaveCanonical(t, 1, 1) - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(4), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(3), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - require.Equal(t, 1, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) - th.assertHaveCanonical(t, 1, 3) - - // Test scenario: reorg back to previous tip. - // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 - // \ 2'(L1_2) <- 3 - require.NoError(t, th.Client.Fork(testutils.Context(t), reorgedOutBlock.Hash())) - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) - require.NoError(t, err) - // Create 3' - th.Client.Commit() - // Create 4 - th.Client.Commit() - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(5), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(4), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. - assert.Equal(t, 2, len(lgs)) - assert.Equal(t, int64(2), lgs[0].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) - assert.Equal(t, int64(3), lgs[1].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) - th.assertHaveCanonical(t, 1, 1) - th.assertHaveCanonical(t, 3, 4) - th.assertDontHave(t, 2, 2) // 2 gets backfilled - - // Test scenario: multiple logs per block for many blocks (also after reorg). - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) - // \ 2'(L1_2) <- 3 - // DB: 1, 2', 3' - // - Should save 4, 5, 6 blocks - // - Should obtain logs L1_3, L2_5, L1_6 - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(4)}) - require.NoError(t, err) - _, err = th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(5)}) - require.NoError(t, err) - // Create 4 - th.Client.Commit() - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(6)}) - require.NoError(t, err) - // Create 5 - th.Client.Commit() + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(7), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) - require.NoError(t, err) - require.Equal(t, 3, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) - assert.Equal(t, th.EmitterAddress1, lgs[0].Address) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) - assert.Equal(t, th.EmitterAddress2, lgs[1].Address) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) - assert.Equal(t, th.EmitterAddress1, lgs[2].Address) - th.assertHaveCanonical(t, 1, 1) - th.assertDontHave(t, 2, 2) // 2 gets backfilled - th.assertHaveCanonical(t, 3, 6) - - // Test scenario: node down for exactly finality + 2 blocks - // Note we only backfill up to finalized - 1 blocks, because we need to save the - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10 (L1_10) - // \ 2'(L1_2) <- 3 - // DB: 1, 2, 3, 4, 5, 6 - // - We expect block 7 to backfilled (treated as finalized) - // - Then block 8-10 to be handled block by block (treated as unfinalized). - for i := 7; i < 11; i++ { - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) - th.Client.Commit() + // Polling should get us the L1 log. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(3), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(2), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) + assert.Equal(t, latest.BlockTimestamp, lgs[0].BlockTimestamp) + assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), + lgs[0].Data) + + // Test scenario: single block reorg with log. + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 + // DB: 1, 2 + // - Detect a reorg, + // - Update the block 2's hash + // - Save L1' + // - L1_1 deleted + reorgedOutBlock, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(2)) + require.NoError(t, err) + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + // Create 2' + th.Client.Commit() + // Create 3 (we need a new block for us to do any polling and detect the reorg). + th.Client.Commit() + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(4), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(3), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 3) + + // Test scenario: reorg back to previous tip. + // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 + // \ 2'(L1_2) <- 3 + require.NoError(t, th.Client.Fork(testutils.Context(t), reorgedOutBlock.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + // Create 3' + th.Client.Commit() + // Create 4 + th.Client.Commit() + // Mark block 1 as finalized + markBlockAsFinalized(t, th, 1) + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(5), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(4), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. + assert.Equal(t, 2, len(lgs)) + assert.Equal(t, int64(2), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + assert.Equal(t, int64(3), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertHaveCanonical(t, 3, 4) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + + // Test scenario: multiple logs per block for many blocks (also after reorg). + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) + // \ 2'(L1_2) <- 3 + // DB: 1, 2', 3' + // - Should save 4, 5, 6 blocks + // - Should obtain logs L1_3, L2_5, L1_6 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(4)}) + require.NoError(t, err) + _, err = th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(5)}) + require.NoError(t, err) + // Create 4 + th.Client.Commit() + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(6)}) + require.NoError(t, err) + // Create 5 + th.Client.Commit() + // Mark block 2 as finalized + markBlockAsFinalized(t, th, 3) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(7), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) + assert.Equal(t, th.EmitterAddress2, lgs[1].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) + assert.Equal(t, th.EmitterAddress1, lgs[2].Address) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + th.assertHaveCanonical(t, 3, 6) + + // Test scenario: node down for exactly finality + 2 blocks + // Note we only backfill up to finalized - 1 blocks, because we need to save the + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10 (L1_10) + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6 + // - We expect block 7 to backfilled (treated as finalized) + // - Then block 8-10 to be handled block by block (treated as unfinalized). + for i := 7; i < 11; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 7 as finalized + markBlockAsFinalized(t, th, 7) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(11), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) + assert.Equal(t, int64(7), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) + assert.Equal(t, int64(8), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) + assert.Equal(t, int64(9), lgs[2].BlockNumber) + th.assertDontHave(t, 7, 7) // Do not expect to save backfilled blocks. + th.assertHaveCanonical(t, 8, 10) + + // Test scenario large backfill (multiple batches) + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..16 + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9, 10 + // - 11, 12, 13 backfilled in batch 1 + // - 14 backfilled in batch 2 + // - 15, 16, 17 to be treated as unfinalized + for i := 11; i < 18; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 14 as finalized + markBlockAsFinalized(t, th, 14) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(18), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) + require.NoError(t, err) + assert.Equal(t, 7, len(lgs)) + th.assertHaveCanonical(t, 15, 16) + th.assertDontHave(t, 11, 14) // Do not expect to save backfilled blocks. + + // Verify that a custom block timestamp will get written to db correctly also + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(17), b.NumberU64()) + require.Equal(t, uint64(170), b.Time()) + require.NoError(t, th.Client.AdjustTime(1*time.Hour)) + th.Client.Commit() + + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(180+time.Hour.Seconds()), b.Time()) + }) } - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(11), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) - require.NoError(t, err) - require.Equal(t, 3, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) - assert.Equal(t, int64(7), lgs[0].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) - assert.Equal(t, int64(8), lgs[1].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) - assert.Equal(t, int64(9), lgs[2].BlockNumber) - th.assertDontHave(t, 7, 7) // Do not expect to save backfilled blocks. - th.assertHaveCanonical(t, 8, 10) - - // Test scenario large backfill (multiple batches) - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..16 - // \ 2'(L1_2) <- 3 - // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9, 10 - // - 11, 12, 13 backfilled in batch 1 - // - 14 backfilled in batch 2 - // - 15, 16, 17 to be treated as unfinalized - for i := 11; i < 18; i++ { - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) - th.Client.Commit() +} + +func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, } - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(18), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) - require.NoError(t, err) - assert.Equal(t, 7, len(lgs)) - th.assertHaveCanonical(t, 15, 16) - th.assertDontHave(t, 11, 14) // Do not expect to save backfilled blocks. - // Verify that a custom block timestamp will get written to db correctly also - b, err = th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(17), b.NumberU64()) - require.Equal(t, uint64(170), b.Time()) - require.NoError(t, th.Client.AdjustTime(1*time.Hour)) - th.Client.Commit() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) - b, err = th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(180+time.Hour.Seconds()), b.Time()) + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1_1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() + markBlockAsFinalized(t, th, 1) + + // Polling should get us the L1 log. + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(3), newStart) + // Check that L1_1 has a proper data payload + lgs, err := th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + + // Single block reorg and log poller not working for a while, mine blocks and progress with finalization + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 <- 4 <- 5 <- 6 (finalized on chain) <- 7 <- 8 <- 9 <- 10 + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + // Create 2' + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + th.Client.Commit() + // Create 3-10 + for i := 3; i < 10; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + markBlockAsFinalized(t, th, 6) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(10), newStart) + + // Expect L1_2 to be properly updated + lgs, err = th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 5) // These blocks are backfilled + th.assertHaveCanonical(t, 6, 10) + }) + } } func TestLogPoller_LoadFilters(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) filter1 := logpoller.Filter{"first Filter", []common.Hash{ EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} @@ -802,7 +1109,7 @@ func TestLogPoller_LoadFilters(t *testing.T) { func TestLogPoller_GetBlocks_Range(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) err := th.LogPoller.RegisterFilter(logpoller.Filter{"GetBlocks Test", []common.Hash{ EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0}, @@ -912,7 +1219,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { func TestGetReplayFromBlock(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) // Commit a few blocks for i := 0; i < 10; i++ { th.Client.Commit() @@ -973,7 +1280,7 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { ec.Commit() ec.Commit() - lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, 1*time.Hour, false, 2, 3, 2, 1000) err = lp.Replay(ctx, 5) // block number too high require.ErrorContains(t, err, "Invalid replay block number") @@ -1076,7 +1383,7 @@ func TestTooManyLogResults(t *testing.T) { chainID := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) - lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, 2, 20, 10, 1000) + lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, false, 2, 20, 10, 1000) expected := []int64{10, 5, 2, 1} clientErr := client.JsonError{ @@ -1153,62 +1460,206 @@ func TestTooManyLogResults(t *testing.T) { assert.Contains(t, crit[0].Message, "Too many log results in a single block") } -func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { - emittedLogs := 60 - finalityDepth := 10 +func Test_PollAndQueryFinalizedBlocks(t *testing.T) { + t.Parallel() ctx := testutils.Context(t) - th := SetupTH(t, int64(finalityDepth), 3, 2) + firstBatchLen := 3 + secondBatchLen := 5 - header, err := th.Client.HeaderByNumber(ctx, nil) - require.NoError(t, err) + th := SetupTH(t, true, 2, 3, 2) - genesisBlockTime := time.UnixMilli(int64(header.Time)) + eventSig := EmitterABI.Events["Log1"].ID + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "GetBlocks Test", + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{th.EmitterAddress1}}, + ) + require.NoError(t, err) - // Emit some logs in blocks - for i := 0; i < emittedLogs; i++ { - _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) + // Generate block that will be finalized + for i := 0; i < firstBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) th.Client.Commit() } - // First PollAndSave, no filters are registered - currentBlock := th.PollAndSaveLogs(ctx, 1) - - err = th.LogPoller.RegisterFilter(logpoller.Filter{ - Name: "Test Emitter", - EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, - Addresses: []common.Address{th.EmitterAddress1}, - }) - require.NoError(t, err) + // Mark current head as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) - // Emit blocks to cover finality depth, because backup always backfill up to the one block before last finalized - for i := 0; i < finalityDepth+1; i++ { + // Generate next blocks, not marked as finalized + for i := 0; i < secondBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) th.Client.Commit() } - // LogPoller should backfill entire history - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - require.NoError(t, err) + currentBlock := th.PollAndSaveLogs(ctx, 1) + require.Equal(t, int(currentBlock), firstBatchLen+secondBatchLen+2) - // Make sure that all logs are backfilled - logs, err := th.LogPoller.Logs( - 0, - currentBlock, - EmitterABI.Events["Log1"].ID, + finalizedLogs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), + 0, + common.Hash{}, + logpoller.Finalized, ) require.NoError(t, err) - require.Len(t, logs, emittedLogs) + require.Len(t, finalizedLogs, firstBatchLen) - // We should get all the logs by the block_timestamp - logs, err = th.LogPoller.LogsCreatedAfter( - EmitterABI.Events["Log1"].ID, + numberOfConfirmations := 1 + logsByConfs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, th.EmitterAddress1, - genesisBlockTime, 0, - pg.WithParentCtx(testutils.Context(t)), + common.Hash{}, + logpoller.Confirmations(numberOfConfirmations), ) require.NoError(t, err) - require.Len(t, logs, emittedLogs) + require.Len(t, logsByConfs, firstBatchLen+secondBatchLen-numberOfConfirmations) +} + +func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { + ctx := testutils.Context(t) + numberOfBlocks := 10 + + tests := []struct { + name string + useFinalityTag bool + finalityDepth int64 + expectedFinalizedBlock int64 + }{ + { + name: "using fixed finality depth", + useFinalityTag: false, + finalityDepth: 2, + expectedFinalizedBlock: int64(numberOfBlocks - 2), + }, + { + name: "setting last finalized block number to 0 if finality is too deep", + useFinalityTag: false, + finalityDepth: 20, + expectedFinalizedBlock: 0, + }, + { + name: "using finality from chain", + useFinalityTag: true, + finalityDepth: 0, + expectedFinalizedBlock: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.useFinalityTag, tt.finalityDepth, 3, 2) + // Mark first block as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) + + // Create a couple of blocks + for i := 0; i < numberOfBlocks-1; i++ { + th.Client.Commit() + } + + th.PollAndSaveLogs(ctx, 1) + + latestBlock, err := th.ORM.SelectLatestBlock() + require.NoError(t, err) + require.Equal(t, int64(numberOfBlocks), latestBlock.BlockNumber) + require.Equal(t, tt.expectedFinalizedBlock, latestBlock.FinalizedBlockNumber) + }) + } +} + +func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { + emittedLogs := 60 + ctx := testutils.Context(t) + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 10, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + genesisBlockTime := time.UnixMilli(int64(header.Time)) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + currentBlock := th.PollAndSaveLogs(ctx, 1) + + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Emit blocks to cover finality depth, because backup always backfill up to the one block before last finalized + for i := 0; i < int(tt.finalityDepth)+1; i++ { + bh := th.Client.Commit() + markBlockAsFinalizedByHash(t, th, bh) + } + + // LogPoller should backfill entire history + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + require.NoError(t, err) + + // Make sure that all logs are backfilled + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + + // We should get all the logs by the block_timestamp + logs, err = th.LogPoller.LogsCreatedAfter( + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + genesisBlockTime, + 0, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + }) + } +} + +func markBlockAsFinalized(t *testing.T, th TestHarness, blockNumber int64) { + b, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(blockNumber)) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) +} + +func markBlockAsFinalizedByHash(t *testing.T, th TestHarness, blockHash common.Hash) { + b, err := th.Client.BlockByHash(testutils.Context(t), blockHash) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) } diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go index 59526cd19ea..f4357341646 100644 --- a/core/chains/evm/logpoller/mocks/log_poller.go +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -588,39 +588,6 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr return r0, r1 } -// LogsUntilBlockHashDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts -func (_m *LogPoller) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, untilBlockHash) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []logpoller.Log - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]logpoller.Log) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address, qopts func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { _va := make([]interface{}, len(qopts)) diff --git a/core/chains/evm/logpoller/models.go b/core/chains/evm/logpoller/models.go index 2848239e67f..9c55786777c 100644 --- a/core/chains/evm/logpoller/models.go +++ b/core/chains/evm/logpoller/models.go @@ -16,9 +16,10 @@ type LogPollerBlock struct { EvmChainId *utils.Big BlockHash common.Hash // Note geth uses int64 internally https://github.com/ethereum/go-ethereum/blob/f66f1a16b3c480d3a43ac7e8a09ab3e362e96ae4/eth/filters/api.go#L340 - BlockNumber int64 - BlockTimestamp time.Time - CreatedAt time.Time + BlockNumber int64 + BlockTimestamp time.Time + FinalizedBlockNumber int64 + CreatedAt time.Time } // Log represents an EVM log. diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go index 5935c25637a..7f54fa9f09a 100644 --- a/core/chains/evm/logpoller/observability.go +++ b/core/chains/evm/logpoller/observability.go @@ -78,9 +78,9 @@ func (o *ObservedORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { }) } -func (o *ObservedORM) InsertBlock(h common.Hash, n int64, t time.Time, qopts ...pg.QOpt) error { +func (o *ObservedORM) InsertBlock(hash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlock int64, qopts ...pg.QOpt) error { return withObservedExec(o, "InsertBlock", func() error { - return o.ORM.InsertBlock(h, n, t, qopts...) + return o.ORM.InsertBlock(hash, blockNumber, blockTimestamp, lastFinalizedBlock, qopts...) }) } @@ -222,12 +222,6 @@ func (o *ObservedORM) SelectLogsDataWordGreaterThan(address common.Address, even }) } -func (o *ObservedORM) SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return withObservedQueryAndResults(o, "SelectLogsUntilBlockHashDataWordGreaterThan", func() ([]Log, error) { - return o.ORM.SelectLogsUntilBlockHashDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, untilBlockHash, qopts...) - }) -} - func (o *ObservedORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsTopicGreaterThan", func() ([]Log, error) { return o.ORM.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) diff --git a/core/chains/evm/logpoller/observability_test.go b/core/chains/evm/logpoller/observability_test.go index 1f0c0abfeaf..0d3eadf47d7 100644 --- a/core/chains/evm/logpoller/observability_test.go +++ b/core/chains/evm/logpoller/observability_test.go @@ -22,84 +22,81 @@ import ( func TestMultipleMetricsArePublished(t *testing.T) { ctx := testutils.Context(t) - lp := createObservedPollLogger(t, 100) - require.Equal(t, 0, testutil.CollectAndCount(lp.queryDuration)) - - _, _ = lp.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsUntilBlockHashDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, pg.WithParentCtx(ctx)) - _ = lp.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) - _ = lp.InsertBlock(common.Hash{}, 0, time.Now(), pg.WithParentCtx(ctx)) - - require.Equal(t, 14, testutil.CollectAndCount(lp.queryDuration)) - require.Equal(t, 10, testutil.CollectAndCount(lp.datasetSize)) - resetMetrics(*lp) + orm := createObservedORM(t, 100) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) + + _, _ = orm.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _ = orm.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) + _ = orm.InsertBlock(common.Hash{}, 1, time.Now(), 0, pg.WithParentCtx(ctx)) + + require.Equal(t, 13, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 10, testutil.CollectAndCount(orm.datasetSize)) } func TestShouldPublishDurationInCaseOfError(t *testing.T) { ctx := testutils.Context(t) - lp := createObservedPollLogger(t, 200) - require.Equal(t, 0, testutil.CollectAndCount(lp.queryDuration)) + orm := createObservedORM(t, 200) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) - _, err := lp.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, err := orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) require.Error(t, err) - require.Equal(t, 1, testutil.CollectAndCount(lp.queryDuration)) - require.Equal(t, 1, counterFromHistogramByLabels(t, lp.queryDuration, "200", "SelectLatestLogByEventSigWithConfs")) - - resetMetrics(*lp) + require.Equal(t, 1, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "200", "SelectLatestLogByEventSigWithConfs")) } func TestMetricsAreProperlyPopulatedWithLabels(t *testing.T) { - lp := createObservedPollLogger(t, 420) + orm := createObservedORM(t, 420) + t.Cleanup(func() { resetMetrics(*orm) }) expectedCount := 9 expectedSize := 2 for i := 0; i < expectedCount; i++ { - _, err := withObservedQueryAndResults(lp, "query", func() ([]string, error) { return []string{"value1", "value2"}, nil }) + _, err := withObservedQueryAndResults(orm, "query", func() ([]string, error) { return []string{"value1", "value2"}, nil }) require.NoError(t, err) } - require.Equal(t, expectedCount, counterFromHistogramByLabels(t, lp.queryDuration, "420", "query")) - require.Equal(t, expectedSize, counterFromGaugeByLabels(lp.datasetSize, "420", "query")) - - require.Equal(t, 0, counterFromHistogramByLabels(t, lp.queryDuration, "420", "other_query")) - require.Equal(t, 0, counterFromHistogramByLabels(t, lp.queryDuration, "5", "query")) + require.Equal(t, expectedCount, counterFromHistogramByLabels(t, orm.queryDuration, "420", "query")) + require.Equal(t, expectedSize, counterFromGaugeByLabels(orm.datasetSize, "420", "query")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "420", "other_query")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "5", "query")) + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "420", "other_query")) + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "5", "query")) - resetMetrics(*lp) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "other_query")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "5", "query")) } func TestNotPublishingDatasetSizeInCaseOfError(t *testing.T) { - lp := createObservedPollLogger(t, 420) + orm := createObservedORM(t, 420) - _, err := withObservedQueryAndResults(lp, "errorQuery", func() ([]string, error) { return nil, fmt.Errorf("error") }) + _, err := withObservedQueryAndResults(orm, "errorQuery", func() ([]string, error) { return nil, fmt.Errorf("error") }) require.Error(t, err) - require.Equal(t, 1, counterFromHistogramByLabels(t, lp.queryDuration, "420", "errorQuery")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "420", "errorQuery")) + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "420", "errorQuery")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "errorQuery")) } func TestMetricsAreProperlyPopulatedForWrites(t *testing.T) { - lp := createObservedPollLogger(t, 420) - require.NoError(t, withObservedExec(lp, "execQuery", func() error { return nil })) - require.Error(t, withObservedExec(lp, "execQuery", func() error { return fmt.Errorf("error") })) + orm := createObservedORM(t, 420) + require.NoError(t, withObservedExec(orm, "execQuery", func() error { return nil })) + require.Error(t, withObservedExec(orm, "execQuery", func() error { return fmt.Errorf("error") })) - require.Equal(t, 2, counterFromHistogramByLabels(t, lp.queryDuration, "420", "execQuery")) + require.Equal(t, 2, counterFromHistogramByLabels(t, orm.queryDuration, "420", "execQuery")) } -func createObservedPollLogger(t *testing.T, chainId int64) *ObservedORM { +func createObservedORM(t *testing.T, chainId int64) *ObservedORM { lggr, _ := logger.TestLoggerObserved(t, zapcore.ErrorLevel) db := pgtest.NewSqlxDB(t) return NewObservedORM( diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index f8d0e618762..06f4acbb4f1 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -22,7 +22,7 @@ import ( type ORM interface { Q() pg.Q InsertLogs(logs []Log, qopts ...pg.QOpt) error - InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, qopts ...pg.QOpt) error + InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlockNumber int64, qopts ...pg.QOpt) error InsertFilter(filter Filter, qopts ...pg.QOpt) error LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) @@ -53,7 +53,6 @@ type ORM interface { SelectIndexedLogsByTxHash(eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) } type DbORM struct { @@ -76,19 +75,20 @@ func (o *DbORM) Q() pg.Q { } // InsertBlock is idempotent to support replays. -func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, qopts ...pg.QOpt) error { +func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64, qopts ...pg.QOpt) error { args, err := newQueryArgs(o.chainID). withCustomHashArg("block_hash", blockHash). withCustomArg("block_number", blockNumber). withCustomArg("block_timestamp", blockTimestamp). + withCustomArg("finalized_block_number", finalizedBlock). toArgs() if err != nil { return err } return o.q.WithOpts(qopts...).ExecQNamed(` INSERT INTO evm.log_poller_blocks - (evm_chain_id, block_hash, block_number, block_timestamp, created_at) - VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, NOW()) + (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) ON CONFLICT DO NOTHING`, args) } @@ -183,7 +183,7 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address AND event_sig = :event_sig AND address = :address AND block_number <= %s - ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) var l Log if err := o.q.WithOpts(qopts...).GetNamed(query, &l, args); err != nil { return nil, err @@ -330,7 +330,7 @@ func (o *DbORM) SelectLogsCreatedAfter(address common.Address, eventSig common.H AND event_sig = :event_sig AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { @@ -408,7 +408,7 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresse AND block_number <= %s GROUP BY event_sig, address ) - ORDER BY block_number ASC`, nestedBlockNumberQuery()) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, errors.Wrap(err, "failed to execute query") @@ -433,7 +433,7 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, even AND event_sig = ANY(:event_sig_array) AND address = ANY(:address_array) AND block_number > :start_block - AND block_number <= %s`, nestedBlockNumberQuery()) + AND block_number <= %s`, nestedBlockNumberQuery(confs)) var blockNumber int64 if err := o.q.WithOpts(qopts...).GetNamed(query, &blockNumber, args); err != nil { return 0, err @@ -458,7 +458,7 @@ func (o *DbORM) SelectLogsDataWordRange(address common.Address, eventSig common. AND substring(data from 32*:word_index+1 for 32) >= :word_value_min AND substring(data from 32*:word_index+1 for 32) <= :word_value_max AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -482,7 +482,7 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig c AND event_sig = :event_sig AND substring(data from 32*:word_index+1 for 32) >= :word_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -506,7 +506,7 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventS AND event_sig = :event_sig AND topics[:topic_index] >= :topic_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -532,7 +532,7 @@ func (o *DbORM) SelectIndexedLogsTopicRange(address common.Address, eventSig com AND topics[:topic_index] >= :topic_value_min AND topics[:topic_index] <= :topic_value_max AND block_number <= %s - ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery()) + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -556,7 +556,7 @@ func (o *DbORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, AND event_sig = :event_sig AND topics[:topic_index] = ANY(:topic_values) AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -610,7 +610,7 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig c AND topics[:topic_index] = ANY(:topic_values) AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { @@ -655,7 +655,7 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topic return nil, err } - nestedQuery := nestedBlockNumberQuery() + nestedQuery := nestedBlockNumberQuery(confs) query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id @@ -681,36 +681,20 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topic return logs, nil } -func (o *DbORM) SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - var logs []Log - q := o.q.WithOpts(qopts...) - err := q.Transaction(func(tx pg.Queryer) error { - // We want to mimic the behaviour of the ETH RPC which errors if blockhash not found. - var block LogPollerBlock - if err := tx.Get(&block, - `SELECT * FROM evm.log_poller_blocks - WHERE evm_chain_id = $1 AND block_hash = $2`, utils.NewBig(o.chainID), untilBlockHash); err != nil { - return err - } - return q.Select(&logs, - `SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) >= $5 - AND block_number <= $6 - ORDER BY (block_number, log_index)`, utils.NewBig(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes(), block.BlockNumber) - }) - if err != nil { - return nil, err +func nestedBlockNumberQuery(confs Confirmations) string { + if confs == Finalized { + return ` + (SELECT finalized_block_number + FROM evm.log_poller_blocks + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` } - return logs, nil -} - -func nestedBlockNumberQuery() string { + // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number + // It doesn't impact logic of the outer query, because block numbers are never less or equal to 0 (guarded by log_poller_blocks_block_number_check) return ` - (SELECT COALESCE(block_number, 0) + (SELECT greatest(block_number - :confs, 0) FROM evm.log_poller_blocks WHERE evm_chain_id = :evm_chain_id - ORDER BY block_number DESC LIMIT 1) - :confs` + ORDER BY block_number DESC LIMIT 1) ` } diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 1f43586548b..380d8528d27 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -47,7 +47,7 @@ func GenLogWithTimestamp(chainID *big.Int, logIndex int64, blockNum int64, block func TestLogPoller_Batching(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) var logs []logpoller.Log // Inserts are limited to 65535 parameters. A log being 10 parameters this results in // a maximum of 6553 log inserts per tx. As inserting more than 6553 would result in @@ -63,7 +63,7 @@ func TestLogPoller_Batching(t *testing.T) { } func TestORM_GetBlocks_From_Range(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert many blocks and read them back together blocks := []block{ @@ -94,7 +94,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { }, } for _, b := range blocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC())) + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC(), 0)) } var blockNumbers []int64 @@ -118,7 +118,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { } func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert many blocks and read them back together var recentBlocks []block @@ -126,7 +126,7 @@ func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { recentBlocks = append(recentBlocks, block{number: int64(i), hash: common.HexToHash(fmt.Sprintf("0x%d", i))}) } for _, b := range recentBlocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now())) + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now(), 0)) } var blockNumbers []int64 @@ -150,11 +150,11 @@ func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { } func TestORM(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM o2 := th.ORM2 // Insert and read back a block. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) b, err := o1.SelectBlockByHash(common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b.BlockNumber, int64(10)) @@ -162,8 +162,8 @@ func TestORM(t *testing.T) { assert.Equal(t, b.EvmChainId.String(), th.ChainID.String()) // Insert blocks from a different chain - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now())) - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now())) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) b2, err := o2.SelectBlockByHash(common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b2.BlockNumber, int64(11)) @@ -310,7 +310,7 @@ func TestORM(t *testing.T) { require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 10, only 0 confs should work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) log, err := o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) assert.Equal(t, int64(10), log.BlockNumber) @@ -319,8 +319,8 @@ func TestORM(t *testing.T) { assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 12, anything <=2 should work require.NoError(t, o1.DeleteBlocksAfter(10)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) @@ -332,9 +332,9 @@ func TestORM(t *testing.T) { assert.True(t, errors.Is(err, sql.ErrNoRows)) // Required for confirmations to work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now(), 0)) // Latest log for topic for addr "0x1234" is @ block 11 lgs, err := o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic}, 0) @@ -368,8 +368,8 @@ func TestORM(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now(), 0)) filter0 := logpoller.Filter{ Name: "permanent retention filter", @@ -447,11 +447,11 @@ func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbOR } func TestORM_IndexedLogs(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 1, eventSig, 1, 3) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 2, eventSig, 4, 4) // unconfirmed @@ -497,24 +497,24 @@ func TestORM_IndexedLogs(t *testing.T) { assert.Equal(t, 3, len(lgs)) // Check confirmations work as expected. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) } func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { - th := SetupTH(t, 0, 3, 2) + th := SetupTH(t, false, 0, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") txHash := common.HexToHash("0x1888") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) logs := []logpoller.Log{ { EvmChainId: utils.NewBig(th.ChainID), @@ -574,11 +574,11 @@ func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { } func TestORM_DataWords(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) require.NoError(t, o1.InsertLogs([]logpoller.Log{ { EvmChainId: utils.NewBig(th.ChainID), @@ -624,7 +624,7 @@ func TestORM_DataWords(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(lgs)) // Confirm it, then can query. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) @@ -634,25 +634,10 @@ func TestORM_DataWords(t *testing.T) { lgs, err = o1.SelectLogsDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), 0) require.NoError(t, err) assert.Equal(t, 2, len(lgs)) - - // Unknown hash should an error - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x3")) - require.Error(t, err) - assert.Equal(t, 0, len(lgs)) - - // 1 block should include first log - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x1")) - require.NoError(t, err) - assert.Equal(t, 1, len(lgs)) - - // 2 block should include both - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x2")) - require.NoError(t, err) - assert.Equal(t, 2, len(lgs)) } func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert logs on different topics, should be able to read them @@ -746,10 +731,10 @@ func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { } func TestORM_DeleteBlocksBefore(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now(), 0)) require.NoError(t, o1.DeleteBlocksBefore(1)) // 1 should be gone. _, err := o1.SelectBlockByNumber(1) @@ -758,8 +743,8 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(2), b.BlockNumber) // Clear multiple - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now(), 0)) require.NoError(t, o1.DeleteBlocksBefore(3)) _, err = o1.SelectBlockByNumber(2) require.Equal(t, err, sql.ErrNoRows) @@ -769,7 +754,7 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { func TestLogPoller_Logs(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") @@ -817,7 +802,7 @@ func TestLogPoller_Logs(t *testing.T) { } func BenchmarkLogs(b *testing.B) { - th := SetupTH(b, 2, 3, 2) + th := SetupTH(b, false, 2, 3, 2) o := th.ORM var lgs []logpoller.Log addr := common.HexToAddress("0x1234") @@ -843,7 +828,7 @@ func BenchmarkLogs(b *testing.B) { } func TestSelectLogsWithSigsExcluding(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) orm := th.ORM addressA := common.HexToAddress("0x11111") addressB := common.HexToAddress("0x22222") @@ -886,7 +871,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-B1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) //Get any requestSigA from addressA that do not have a equivalent responseSigA logs, err := orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) @@ -915,7 +900,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("responseID-A1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) //Should return nothing as requestID-A1 has been fulfilled logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) @@ -966,7 +951,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-C3"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) //Get all unfulfilled requests from addressC, match on topic index 3 logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) @@ -1026,13 +1011,13 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { require.NoError(t, err) require.Len(t, logs, 0) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now(), 0)) //Fulfill requestID-C3 require.NoError(t, orm.InsertLogs([]logpoller.Log{ @@ -1062,9 +1047,9 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { require.Equal(t, logs[0].Data, []byte("requestID-C1")) //Insert 3 more blocks so that the requestID-C1 has enough confirmations - require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now(), 0)) logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) require.NoError(t, err) @@ -1089,7 +1074,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { } func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := utils.RandomAddress() @@ -1101,7 +1086,7 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { GenLog(th.ChainID, 2, 2, utils.RandomAddress().String(), event2[:], address2), GenLog(th.ChainID, 2, 3, utils.RandomAddress().String(), event2[:], address2), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, time.Now())) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, time.Now(), 1)) tests := []struct { name string @@ -1135,6 +1120,14 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { fromBlock: 0, expectedBlockNumber: 1, }, + { + name: "only finalized log is picked", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: logpoller.Finalized, + fromBlock: 0, + expectedBlockNumber: 1, + }, { name: "picks max block from two events", events: []common.Hash{event1, event2}, @@ -1178,23 +1171,23 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { } func TestSelectLogsCreatedAfter(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() - past := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) - now := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) - future := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) + block1ts := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) + block2ts := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) + block3ts := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ - GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, past), - GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, now), - GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, now), - GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, future), + GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, block1ts), + GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, block3ts), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 1, past)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 2, now)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, future)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 1, block1ts, 0)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 2, block2ts, 1)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, block3ts, 2)) type expectedLog struct { block int64 @@ -1210,7 +1203,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "picks logs after block 1", confs: 0, - after: past, + after: block1ts, expectedLogs: []expectedLog{ {block: 2, log: 1}, {block: 2, log: 2}, @@ -1220,7 +1213,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "skips blocks with not enough confirmations", confs: 1, - after: past, + after: block1ts, expectedLogs: []expectedLog{ {block: 2, log: 1}, {block: 2, log: 2}, @@ -1229,7 +1222,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "limits number of blocks by block_timestamp", confs: 0, - after: now, + after: block2ts, expectedLogs: []expectedLog{ {block: 3, log: 1}, }, @@ -1237,37 +1230,74 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "returns empty dataset for future timestamp", confs: 0, - after: future, + after: block3ts, expectedLogs: []expectedLog{}, }, { name: "returns empty dataset when too many confirmations are required", confs: 3, - after: past, + after: block1ts, expectedLogs: []expectedLog{}, }, + { + name: "returns only finalized log", + confs: logpoller.Finalized, + after: block1ts, + expectedLogs: []expectedLog{ + {block: 2, log: 1}, + {block: 2, log: 2}, + }, + }, } for _, tt := range tests { t.Run("SelectLogsCreatedAfter"+tt.name, func(t *testing.T) { logs, err := th.ORM.SelectLogsCreatedAfter(address, event, tt.after, tt.confs) require.NoError(t, err) - assert.Len(t, logs, len(tt.expectedLogs)) + require.Len(t, logs, len(tt.expectedLogs)) for i, log := range logs { - assert.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) - assert.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) } }) t.Run("SelectIndexedLogsCreatedAfter"+tt.name, func(t *testing.T) { logs, err := th.ORM.SelectIndexedLogsCreatedAfter(address, event, 1, []common.Hash{event}, tt.after, tt.confs) require.NoError(t, err) - assert.Len(t, logs, len(tt.expectedLogs)) + require.Len(t, logs, len(tt.expectedLogs)) for i, log := range logs { - assert.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) - assert.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) } }) } } + +func TestNestedLogPollerBlocksQuery(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2) + event := EmitterABI.Events["Log1"].ID + address := utils.RandomAddress() + + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLog(th.ChainID, 1, 8, utils.RandomAddress().String(), event[:], address), + })) + + // Empty logs when block are not persisted + logs, err := th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 0) + + // Persist block + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 10, time.Now(), 0)) + + // Check if query actually works well with provided dataset + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 1) + + // Empty logs when number of confirmations is too deep + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Confirmations(4)) + require.NoError(t, err) + require.Len(t, logs, 0) +} diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go index 2a50dc282e4..7443a860a85 100644 --- a/core/chains/evm/logpoller/query.go +++ b/core/chains/evm/logpoller/query.go @@ -51,33 +51,27 @@ func (q *queryArgs) withEventSig(eventSig common.Hash) *queryArgs { } func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { - q.args["event_sig_array"] = concatBytes(eventSigs) - return q + return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) } func (q *queryArgs) withAddress(address common.Address) *queryArgs { - q.args["address"] = address - return q + return q.withCustomArg("address", address) } func (q *queryArgs) withAddressArray(addresses []common.Address) *queryArgs { - q.args["address_array"] = concatBytes(addresses) - return q + return q.withCustomArg("address_array", concatBytes(addresses)) } func (q *queryArgs) withStartBlock(startBlock int64) *queryArgs { - q.args["start_block"] = startBlock - return q + return q.withCustomArg("start_block", startBlock) } func (q *queryArgs) withEndBlock(endBlock int64) *queryArgs { - q.args["end_block"] = endBlock - return q + return q.withCustomArg("end_block", endBlock) } func (q *queryArgs) withWordIndex(wordIndex int) *queryArgs { - q.args["word_index"] = wordIndex - return q + return q.withCustomArg("word_index", wordIndex) } func (q *queryArgs) withWordValueMin(wordValueMin common.Hash) *queryArgs { @@ -89,8 +83,7 @@ func (q *queryArgs) withWordValueMax(wordValueMax common.Hash) *queryArgs { } func (q *queryArgs) withConfs(confs Confirmations) *queryArgs { - q.args["confs"] = confs - return q + return q.withCustomArg("confs", confs) } func (q *queryArgs) withTopicIndex(index int) *queryArgs { @@ -99,8 +92,7 @@ func (q *queryArgs) withTopicIndex(index int) *queryArgs { q.err = append(q.err, fmt.Errorf("invalid index for topic: %d", index)) } // Add 1 since postgresql arrays are 1-indexed. - q.args["topic_index"] = index + 1 - return q + return q.withCustomArg("topic_index", index+1) } func (q *queryArgs) withTopicValueMin(valueMin common.Hash) *queryArgs { @@ -112,13 +104,11 @@ func (q *queryArgs) withTopicValueMax(valueMax common.Hash) *queryArgs { } func (q *queryArgs) withTopicValues(values []common.Hash) *queryArgs { - q.args["topic_values"] = concatBytes(values) - return q + return q.withCustomArg("topic_values", concatBytes(values)) } func (q *queryArgs) withBlockTimestampAfter(after time.Time) *queryArgs { - q.args["block_timestamp_after"] = after - return q + return q.withCustomArg("block_timestamp_after", after) } func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { @@ -126,8 +116,7 @@ func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { } func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { - q.args[name] = arg.Bytes() - return q + return q.withCustomArg(name, arg.Bytes()) } func (q *queryArgs) withCustomArg(name string, arg any) *queryArgs { diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 7932167ff48..de8c6ff4ef8 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -44,7 +44,7 @@ import ( func makeTestEvmTxm( t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth, eventBroadcaster pg.EventBroadcaster) (txmgr.TxManager, error) { lggr := logger.TestLogger(t) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) // logic for building components (from evm/evm_txm.go) ------- lggr.Infow("Initializing EVM transaction manager", diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go index 30a7bafceb9..506dcb9ea33 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go @@ -662,7 +662,7 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac pollerLggr := logger.TestLogger(t) pollerLggr.SetLogLevel(zapcore.WarnLevel) lorm := logpoller.NewORM(big.NewInt(1337), db, pollerLggr, pgtest.NewQConfig(false)) - lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, false, 1, 2, 2, 1000) return lp, ethClient } diff --git a/core/services/pg/q.go b/core/services/pg/q.go index 098334bf1e7..3210af1c1d6 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -256,6 +256,7 @@ func (q Q) Get(dest interface{}, query string, args ...interface{}) error { return ql.withLogError(q.Queryer.GetContext(ctx, dest, query, args...)) } + func (q Q) GetNamed(sql string, dest interface{}, arg interface{}) error { query, args, err := q.BindNamed(sql, arg) if err != nil { diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 73c16a19596..0a433c3bc54 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -87,7 +87,7 @@ func TestConfigPoller(t *testing.T) { ethClient = evmclient.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) ctx := testutils.Context(t) lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr, cfg) - lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) require.NoError(t, lp.Start(ctx)) t.Cleanup(func() { lp.Close() }) } diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index d6573ef3544..085f0c6e317 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -80,7 +80,7 @@ func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDig lggr := logger.TestLogger(t) ctx := testutils.Context(t) lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) defer lp.Close() require.NoError(t, lp.Start(ctx)) configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go index 4e3587b5de6..3a58a25a557 100644 --- a/core/services/relay/evm/mercury/helpers_test.go +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -169,7 +169,7 @@ func SetupTH(t *testing.T, feedID common.Hash) TestHarness { lggr := logger.TestLogger(t) ctx := testutils.Context(t) lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) eventBroadcaster := pgmocks.NewEventBroadcaster(t) subscription := pgmocks.NewSubscription(t) require.NoError(t, lp.Start(ctx)) diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index d6135ce4529..7a1e38fb030 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -15,12 +15,16 @@ import ( "github.com/smartcontractkit/chainlink-relay/pkg/types" evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/config/env" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" configtest "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest/v2" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" "github.com/smartcontractkit/chainlink/v2/core/services/relay" "github.com/smartcontractkit/chainlink/v2/core/store/migrate" @@ -437,3 +441,141 @@ func TestSetMigrationENVVars(t *testing.T) { require.Equal(t, actualChainID, chainID.String()) }) } + +func TestDatabaseBackFillWithMigration202(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil) + + err := goose.UpTo(db.DB, migrationDir, 201) + require.NoError(t, err) + + simulatedOrm := logpoller.NewORM(testutils.SimulatedChainID, db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 10, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 51, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 90, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 120, time.Now(), 23), err) + + baseOrm := logpoller.NewORM(big.NewInt(int64(84531)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, baseOrm.InsertBlock(testutils.Random32Byte(), 400, time.Now(), 0), err) + + klaytnOrm := logpoller.NewORM(big.NewInt(int64(1001)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, klaytnOrm.InsertBlock(testutils.Random32Byte(), 100, time.Now(), 0), err) + + err = goose.UpTo(db.DB, migrationDir, 202) + require.NoError(t, err) + + tests := []struct { + name string + blockNumber int64 + expectedFinalizedBlock int64 + orm *logpoller.DbORM + }{ + { + name: "last finalized block not changed if finality is too deep", + blockNumber: 10, + expectedFinalizedBlock: 0, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated for first block", + blockNumber: 51, + expectedFinalizedBlock: 1, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated", + blockNumber: 90, + expectedFinalizedBlock: 40, + orm: simulatedOrm, + }, + { + name: "last finalized block is not changed when finality is set", + blockNumber: 120, + expectedFinalizedBlock: 23, + orm: simulatedOrm, + }, + { + name: "use non default finality depth for chain 84531", + blockNumber: 400, + expectedFinalizedBlock: 200, + orm: baseOrm, + }, + { + name: "use default finality depth for chain 1001", + blockNumber: 100, + expectedFinalizedBlock: 99, + orm: klaytnOrm, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + block, err := tt.orm.SelectBlockByNumber(tt.blockNumber) + require.NoError(t, err) + require.Equal(t, tt.expectedFinalizedBlock, block.FinalizedBlockNumber) + }) + } +} + +func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) { + previousMigration := int64(201) + backfillMigration := int64(202) + chainCount := 2 + // By default, log poller keeps up to 100_000 blocks in the database, this is the pessimistic case + maxLogsSize := 100_000 + // Disable Goose logging for benchmarking + goose.SetLogger(goose.NopLogger()) + _, db := heavyweight.FullTestDBEmptyV2(b, migrationDir, nil) + + err := goose.UpTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + q := pg.NewQ(db, logger.NullLogger, pgtest.NewQConfig(true)) + for j := 0; j < chainCount; j++ { + // Insert 100_000 block to database, can't do all at once, so batching by 10k + var blocks []logpoller.LogPollerBlock + for i := 0; i < maxLogsSize; i++ { + blocks = append(blocks, logpoller.LogPollerBlock{ + EvmChainId: utils.NewBigI(int64(j + 1)), + BlockHash: testutils.Random32Byte(), + BlockNumber: int64(i + 1000), + FinalizedBlockNumber: 0, + }) + } + batchInsertSize := 10_000 + for i := 0; i < maxLogsSize; i += batchInsertSize { + start, end := i, i+batchInsertSize + if end > maxLogsSize { + end = maxLogsSize + } + + err = q.ExecQNamed(` + INSERT INTO evm.log_poller_blocks + (evm_chain_id, block_hash, block_number, finalized_block_number, block_timestamp, created_at) + VALUES + (:evm_chain_id, :block_hash, :block_number, :finalized_block_number, NOW(), NOW()) + ON CONFLICT DO NOTHING`, blocks[start:end]) + require.NoError(b, err) + } + } + + b.ResetTimer() + + // 1. Measure time of migration 200 + // 2. Goose down to 199 + // 3. Reset last_finalized_block_number to 0 + // Repeat 1-3 + for i := 0; i < b.N; i++ { + b.StartTimer() + err = goose.UpTo(db.DB, migrationDir, backfillMigration) + require.NoError(b, err) + b.StopTimer() + + // Cleanup + err = goose.DownTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + err = q.ExecQ(` + UPDATE evm.log_poller_blocks + SET finalized_block_number = 0`) + require.NoError(b, err) + } +} diff --git a/core/store/migrate/migrations/0201_add_finalized_block_number.sql b/core/store/migrate/migrations/0201_add_finalized_block_number.sql new file mode 100644 index 00000000000..db15ebbed6e --- /dev/null +++ b/core/store/migrate/migrations/0201_add_finalized_block_number.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE evm.log_poller_blocks + ADD COLUMN finalized_block_number + bigint not null + default 0 + check (finalized_block_number >= 0); + + +-- +goose Down +ALTER TABLE evm.log_poller_blocks + DROP COLUMN finalized_block_number; diff --git a/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql new file mode 100644 index 00000000000..0f93cd27482 --- /dev/null +++ b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql @@ -0,0 +1,33 @@ +-- +goose Up + +WITH variables AS ( + SELECT + evm_chain_id, + CASE + WHEN evm_chain_id = 43113 then 1 -- Avax Fuji + WHEN evm_chain_id = 43114 then 1 -- Avax Mainnet + WHEN evm_chain_id = 84531 THEN 200 -- Base Goerli + WHEN evm_chain_id = 8453 THEN 200 -- Base Mainnet + WHEN evm_chain_id = 42220 THEN 1 -- Celo Mainnet + WHEN evm_chain_id = 44787 THEN 1 -- Celo Testnet + WHEN evm_chain_id = 8217 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1001 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1088 THEN 1 -- Metis Mainnet + WHEN evm_chain_id = 588 THEN 1 -- Metis Rinkeby + WHEN evm_chain_id = 420 THEN 200 -- Optimism Goerli + WHEN evm_chain_id = 10 THEN 200 -- Optimism Mainnet + WHEN evm_chain_id = 137 THEN 500 -- Polygon Mainnet + WHEN evm_chain_id = 80001 THEN 500 -- Polygon Mumbai + WHEN evm_chain_id = 534352 THEN 1 -- Scroll Mainnet + WHEN evm_chain_id = 534351 THEN 1 -- Scroll Sepolia + ELSE 50 -- all other chains + END AS finality_depth + FROM evm.log_poller_blocks + GROUP BY evm_chain_id +) + +UPDATE evm.log_poller_blocks AS lpb +SET finalized_block_number = greatest(lpb.block_number - v.finality_depth, 0) +FROM variables v +WHERE lpb.evm_chain_id = v.evm_chain_id + AND lpb.finalized_block_number = 0; \ No newline at end of file