From 30cf88bc5759abc7f51e6fde4fcf9771303cc87a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 25 Jan 2024 12:41:09 -0500 Subject: [PATCH 01/65] Initial commit --- .../evm/headtracker/head_broadcaster_test.go | 2 +- .../chains/evm/headtracker/head_saver_test.go | 5 +- .../evm/headtracker/head_tracker_test.go | 78 +++++++++---------- core/chains/evm/headtracker/orm.go | 73 +++++++++++++---- core/chains/evm/headtracker/orm_test.go | 40 +++++----- core/chains/legacyevm/chain.go | 2 +- core/internal/cltest/factories.go | 2 +- 7 files changed, 121 insertions(+), 81 deletions(-) diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go index dcbb9bd0396..b54c7124f67 100644 --- a/core/chains/evm/headtracker/head_broadcaster_test.go +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -70,7 +70,7 @@ func TestHeadBroadcaster_Subscribe(t *testing.T) { checker1 := &cltest.MockHeadTrackable{} checker2 := &cltest.MockHeadTrackable{} - orm := headtracker.NewORM(db, logger, cfg.Database(), *ethClient.ConfiguredChainID()) + orm := headtracker.NewORM(*ethClient.ConfiguredChainID(), db) // TODO:, logger, cfg.Database(), ) hs := headtracker.NewHeadSaver(logger, orm, evmCfg.EVM(), evmCfg.EVM().HeadTracker()) mailMon := mailboxtest.NewMonitor(t) servicetest.Run(t, mailMon) diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go index f541330bc98..6df9d62ea4a 100644 --- a/core/chains/evm/headtracker/head_saver_test.go +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -11,7 +11,6 @@ import ( httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" ) @@ -44,9 +43,9 @@ func (c *config) BlockEmissionIdleWarningThreshold() time.Duration { func configureSaver(t *testing.T) (httypes.HeadSaver, headtracker.ORM) { db := pgtest.NewSqlxDB(t) lggr := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) + //cfg := configtest.NewGeneralConfig(t, nil) htCfg := &config{finalityDepth: uint32(1)} - orm := headtracker.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, lggr, cfg.Database(), ) saver := headtracker.NewHeadSaver(lggr, orm, htCfg, &headTrackerConfig{historyDepth: 6}) return saver, orm } diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 22e931d6d0f..93cfdb0bfd4 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -52,12 +52,12 @@ func TestHeadTracker_New(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - config := configtest.NewGeneralConfig(t, nil) + //logger := logger.Test(t) + //config := configtest.NewGeneralConfig(t, nil) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(1))) last := cltest.Head(16) assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), last)) @@ -76,11 +76,11 @@ func TestHeadTracker_Save_InsertsAndTrimsTable(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) for idx := 0; idx < 200; idx++ { assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(idx))) @@ -121,9 +121,9 @@ func TestHeadTracker_Get(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) @@ -168,9 +168,9 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) @@ -193,9 +193,9 @@ func TestHeadTracker_Start_CancelContext(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Run(func(args mock.Arguments) { @@ -233,9 +233,9 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -272,9 +272,9 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) mockEth := &evmtest.MockEth{EthClient: ethClient} @@ -308,9 +308,9 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -355,7 +355,7 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -389,7 +389,7 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO;, logger, config.Database(), ) trackable := &cltest.MockHeadTrackable{} ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, trackable) @@ -420,7 +420,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.EVM[0].FinalityDepth = ptr[uint32](50) @@ -432,7 +432,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) - orm := headtracker.NewORM(db, logger, config.Database(), *evmtest.MustGetDefaultChainID(t, config.EVMConfigs())) + orm := headtracker.NewORM(*evmtest.MustGetDefaultChainID(t, config.EVMConfigs()), db) // TODO:, logger, config.Database(), ) csCfg := evmtest.NewChainScopedConfig(t, config) ht := createHeadTrackerWithChecker(t, ethClient, csCfg.EVM(), csCfg.EVM().HeadTracker(), orm, checker) @@ -548,7 +548,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) + //logger := logger.Test(t) config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.EVM[0].FinalityDepth = ptr[uint32](50) @@ -560,7 +560,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T ethClient := evmtest.NewEthClientMockWithDefaultChain(t) checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) - orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO: , logger, config.Database(), ) evmcfg := evmtest.NewChainScopedConfig(t, config) ht := createHeadTrackerWithChecker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm, checker) @@ -776,8 +776,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("does nothing if all the heads are in database", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -793,8 +793,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("fetches a missing head", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO, logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -829,8 +829,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("fetches only heads that are missing", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -862,8 +862,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("does not backfill if chain length is already greater than or equal to depth", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO: , logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -883,8 +883,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("only backfills to height 0 if chain length would otherwise cause it to try and fetch a negative head", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(),) ethClient := evmtest.NewEthClientMock(t) ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) @@ -908,8 +908,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error if the eth node returns not found", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -939,8 +939,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error if the context time budget is exceeded", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -968,8 +968,8 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error when fetching a block by hash fails, indicating a reorg", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - logger := logger.Test(t) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO:, logger, cfg.Database(), ) ethClient := evmtest.NewEthClientMock(t) ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 859f6764b63..e98954e67e9 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -3,17 +3,13 @@ package headtracker import ( "context" "database/sql" - "math/big" - "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "math/big" - "github.com/jmoiron/sqlx" - - "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) type ORM interface { @@ -30,29 +26,48 @@ type ORM interface { HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) } +var _ ORM = &orm{} + type orm struct { - q pg.Q + //q pg.Q chainID ubig.Big + db sqlutil.Queryer } +// NewORM creates an ORM scoped to chainID. +func NewORM(chainID big.Int, db sqlutil.Queryer) ORM { + return &orm{ + chainID: ubig.Big(chainID), + db: db, + } +} + +/* func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, chainID big.Int) ORM { return &orm{pg.NewQ(db, logger.Named(lggr, "HeadTrackerORM"), cfg), ubig.Big(chainID)} } +*/ func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error { // listener guarantees head.EVMChainID to be equal to orm.chainID - q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + query := ` INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, l1_block_number, evm_chain_id, base_fee_per_gas) VALUES ( :hash, :number, :parent_hash, :created_at, :timestamp, :l1_block_number, :evm_chain_id, :base_fee_per_gas) ON CONFLICT (evm_chain_id, hash) DO NOTHING` - err := q.ExecQNamed(query, head) + + //err := q.ExecQNamed(query, head) + + _, err := orm.db.ExecContext(ctx, query, head) + return errors.Wrap(err, "IdempotentInsertHead failed to insert head") } func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { - q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - return q.ExecQ(` + //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + + _, err = orm.db.ExecContext(ctx, ` DELETE FROM evm.heads WHERE evm_chain_id = $1 AND number < ( SELECT min(number) FROM ( @@ -63,12 +78,32 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { LIMIT $2 ) numbers )`, orm.chainID, n) + + return err + + /* + return q.ExecQ(` + DELETE FROM evm.heads + WHERE evm_chain_id = $1 AND number < ( + SELECT min(number) FROM ( + SELECT number + FROM evm.heads + WHERE evm_chain_id = $1 + ORDER BY number DESC + LIMIT $2 + ) numbers + )`, orm.chainID, n) + */ } func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) - q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) + //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + //err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) + + if err := orm.db.SelectContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID); err != nil { + return nil, err + } if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -77,16 +112,20 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) } func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { - q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - err = q.Select(&heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) + //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + //err = q.Select(&heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) + + err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) err = errors.Wrap(err, "LatestHeads failed") return } func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { - q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) head = new(evmtypes.Head) - err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) + //err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) + err = orm.db.SelectContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) + if errors.Is(err, sql.ErrNoRows) { return nil, nil } diff --git a/core/chains/evm/headtracker/orm_test.go b/core/chains/evm/headtracker/orm_test.go index c9a2146daf2..b2f68f70794 100644 --- a/core/chains/evm/headtracker/orm_test.go +++ b/core/chains/evm/headtracker/orm_test.go @@ -3,15 +3,12 @@ package headtracker_test import ( "testing" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" - "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" @@ -21,9 +18,10 @@ func TestORM_IdempotentInsertHead(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + //cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(cltest.FixtureChainID, db) + //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) // Returns nil when inserting first head head := cltest.Head(0) @@ -47,9 +45,10 @@ func TestORM_TrimOldHeads(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + //cfg := configtest.NewGeneralConfig(t, nil) + //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := 0; i < 10; i++ { head := cltest.Head(i) @@ -72,9 +71,10 @@ func TestORM_HeadByHash(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + //cfg := configtest.NewGeneralConfig(t, nil) + //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) var hash common.Hash for i := 0; i < 10; i++ { @@ -95,9 +95,10 @@ func TestORM_HeadByHash_NotFound(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + //cfg := configtest.NewGeneralConfig(t, nil) + //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) hash := cltest.Head(123).Hash head, err := orm.HeadByHash(testutils.Context(t), hash) @@ -110,9 +111,10 @@ func TestORM_LatestHeads_NoRows(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - logger := logger.Test(t) - cfg := configtest.NewGeneralConfig(t, nil) - orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + //logger := logger.Test(t) + //cfg := configtest.NewGeneralConfig(t, nil) + //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + orm := headtracker.NewORM(cltest.FixtureChainID, db) heads, err := orm.LatestHeads(testutils.Context(t), 100) diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index 92936299cdb..acf195d20d3 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -230,7 +230,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod if !cfg.EVMRPCEnabled() { headTracker = headtracker.NullTracker } else if opts.GenHeadTracker == nil { - orm := headtracker.NewORM(db, l, cfg.Database(), *chainID) + orm := headtracker.NewORM(*chainID, db) // TODO: remove, l, cfg.Database(), ) headSaver = headtracker.NewHeadSaver(l, orm, cfg.EVM(), cfg.EVM().HeadTracker()) headTracker = headtracker.NewHeadTracker(l, client, cfg.EVM(), cfg.EVM().HeadTracker(), headBroadcaster, headSaver, opts.MailMon) } else { diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index 804dbe2d088..7ebc031687e 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -314,7 +314,7 @@ func MustGenerateRandomKeyState(_ testing.TB) ethkey.State { func MustInsertHead(t *testing.T, db *sqlx.DB, cfg pg.QConfig, number int64) evmtypes.Head { h := evmtypes.NewHead(big.NewInt(number), evmutils.NewHash(), evmutils.NewHash(), 0, ubig.New(&FixtureChainID)) - horm := headtracker.NewORM(db, logger.TestLogger(t), cfg, FixtureChainID) + horm := headtracker.NewORM(FixtureChainID, db) // TODO: Remove, logger.TestLogger(t), cfg, ) err := horm.IdempotentInsertHead(testutils.Context(t), &h) require.NoError(t, err) From 216fea04faeae40f21005f60ee1801a155a355f6 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 25 Jan 2024 13:07:09 -0500 Subject: [PATCH 02/65] Refactor headtracker orm --- .../evm/headtracker/head_broadcaster_test.go | 2 +- .../chains/evm/headtracker/head_saver_test.go | 2 +- .../evm/headtracker/head_tracker_test.go | 35 +++++++------- core/chains/evm/headtracker/orm.go | 47 +++---------------- core/chains/evm/headtracker/orm_test.go | 18 +------ core/chains/legacyevm/chain.go | 2 +- core/internal/cltest/factories.go | 2 +- 7 files changed, 28 insertions(+), 80 deletions(-) diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go index b54c7124f67..d8277582f61 100644 --- a/core/chains/evm/headtracker/head_broadcaster_test.go +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -70,7 +70,7 @@ func TestHeadBroadcaster_Subscribe(t *testing.T) { checker1 := &cltest.MockHeadTrackable{} checker2 := &cltest.MockHeadTrackable{} - orm := headtracker.NewORM(*ethClient.ConfiguredChainID(), db) // TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(*ethClient.ConfiguredChainID(), db) hs := headtracker.NewHeadSaver(logger, orm, evmCfg.EVM(), evmCfg.EVM().HeadTracker()) mailMon := mailboxtest.NewMonitor(t) servicetest.Run(t, mailMon) diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go index 6df9d62ea4a..f503aab89cb 100644 --- a/core/chains/evm/headtracker/head_saver_test.go +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -45,7 +45,7 @@ func configureSaver(t *testing.T) (httypes.HeadSaver, headtracker.ORM) { lggr := logger.Test(t) //cfg := configtest.NewGeneralConfig(t, nil) htCfg := &config{finalityDepth: uint32(1)} - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, lggr, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) saver := headtracker.NewHeadSaver(lggr, orm, htCfg, &headTrackerConfig{historyDepth: 6}) return saver, orm } diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 93cfdb0bfd4..e2f64cba785 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -57,7 +57,7 @@ func TestHeadTracker_New(t *testing.T) { ethClient := evmtest.NewEthClientMockWithDefaultChain(t) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(1))) last := cltest.Head(16) assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), last)) @@ -80,7 +80,7 @@ func TestHeadTracker_Save_InsertsAndTrimsTable(t *testing.T) { config := cltest.NewTestChainScopedConfig(t) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for idx := 0; idx < 200; idx++ { assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(idx))) @@ -123,7 +123,7 @@ func TestHeadTracker_Get(t *testing.T) { db := pgtest.NewSqlxDB(t) //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) @@ -170,7 +170,7 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { db := pgtest.NewSqlxDB(t) //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) @@ -195,7 +195,7 @@ func TestHeadTracker_Start_CancelContext(t *testing.T) { db := pgtest.NewSqlxDB(t) //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chStarted := make(chan struct{}) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Run(func(args mock.Arguments) { @@ -235,7 +235,7 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { db := pgtest.NewSqlxDB(t) //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -272,9 +272,8 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) mockEth := &evmtest.MockEth{EthClient: ethClient} @@ -310,7 +309,7 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { db := pgtest.NewSqlxDB(t) //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -432,7 +431,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) - orm := headtracker.NewORM(*evmtest.MustGetDefaultChainID(t, config.EVMConfigs()), db) // TODO:, logger, config.Database(), ) + orm := headtracker.NewORM(*evmtest.MustGetDefaultChainID(t, config.EVMConfigs()), db) csCfg := evmtest.NewChainScopedConfig(t, config) ht := createHeadTrackerWithChecker(t, ethClient, csCfg.EVM(), csCfg.EVM().HeadTracker(), orm, checker) @@ -560,7 +559,7 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T ethClient := evmtest.NewEthClientMockWithDefaultChain(t) checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO: , logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) evmcfg := evmtest.NewChainScopedConfig(t, config) ht := createHeadTrackerWithChecker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm, checker) @@ -777,7 +776,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -830,7 +829,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -863,7 +862,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO: , logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -884,7 +883,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(),) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMock(t) ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) @@ -909,7 +908,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -940,7 +939,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -969,7 +968,7 @@ func TestHeadTracker_Backfill(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO:, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMock(t) ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index e98954e67e9..5c73919e5ac 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -3,9 +3,10 @@ package headtracker import ( "context" "database/sql" + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" - "math/big" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" @@ -29,7 +30,6 @@ type ORM interface { var _ ORM = &orm{} type orm struct { - //q pg.Q chainID ubig.Big db sqlutil.Queryer } @@ -42,31 +42,19 @@ func NewORM(chainID big.Int, db sqlutil.Queryer) ORM { } } -/* -func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, chainID big.Int) ORM { - return &orm{pg.NewQ(db, logger.Named(lggr, "HeadTrackerORM"), cfg), ubig.Big(chainID)} -} -*/ - func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error { // listener guarantees head.EVMChainID to be equal to orm.chainID - //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - query := ` INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, l1_block_number, evm_chain_id, base_fee_per_gas) VALUES ( - :hash, :number, :parent_hash, :created_at, :timestamp, :l1_block_number, :evm_chain_id, :base_fee_per_gas) + $1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (evm_chain_id, hash) DO NOTHING` - //err := q.ExecQNamed(query, head) - - _, err := orm.db.ExecContext(ctx, query, head) + _, err := orm.db.ExecContext(ctx, query, head.Hash, head.Number, head.ParentHash, head.CreatedAt, head.Timestamp, head.L1BlockNumber, orm.chainID, head.BaseFeePerGas) return errors.Wrap(err, "IdempotentInsertHead failed to insert head") } func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { - //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - _, err = orm.db.ExecContext(ctx, ` DELETE FROM evm.heads WHERE evm_chain_id = $1 AND number < ( @@ -80,28 +68,11 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { )`, orm.chainID, n) return err - - /* - return q.ExecQ(` - DELETE FROM evm.heads - WHERE evm_chain_id = $1 AND number < ( - SELECT min(number) FROM ( - SELECT number - FROM evm.heads - WHERE evm_chain_id = $1 - ORDER BY number DESC - LIMIT $2 - ) numbers - )`, orm.chainID, n) - */ } func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) - //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - //err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) - - if err := orm.db.SelectContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID); err != nil { + if err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID); err != nil { return nil, err } if errors.Is(err, sql.ErrNoRows) { @@ -112,20 +83,14 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) } func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { - //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) - //err = q.Select(&heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) - err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) err = errors.Wrap(err, "LatestHeads failed") return } func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { - //q := orm.q.WithOpts(pg.WithParentCtx(ctx)) head = new(evmtypes.Head) - //err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) - err = orm.db.SelectContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) - + err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) if errors.Is(err, sql.ErrNoRows) { return nil, nil } diff --git a/core/chains/evm/headtracker/orm_test.go b/core/chains/evm/headtracker/orm_test.go index b2f68f70794..19acf7a3d8f 100644 --- a/core/chains/evm/headtracker/orm_test.go +++ b/core/chains/evm/headtracker/orm_test.go @@ -4,13 +4,12 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" ) @@ -18,10 +17,7 @@ func TestORM_IdempotentInsertHead(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) orm := headtracker.NewORM(cltest.FixtureChainID, db) - //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) // Returns nil when inserting first head head := cltest.Head(0) @@ -45,9 +41,6 @@ func TestORM_TrimOldHeads(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) - //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := 0; i < 10; i++ { @@ -71,9 +64,6 @@ func TestORM_HeadByHash(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) - //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) orm := headtracker.NewORM(cltest.FixtureChainID, db) var hash common.Hash @@ -95,9 +85,6 @@ func TestORM_HeadByHash_NotFound(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) - //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) orm := headtracker.NewORM(cltest.FixtureChainID, db) hash := cltest.Head(123).Hash @@ -111,9 +98,6 @@ func TestORM_LatestHeads_NoRows(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) - //orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) orm := headtracker.NewORM(cltest.FixtureChainID, db) heads, err := orm.LatestHeads(testutils.Context(t), 100) diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index acf195d20d3..3a91f7083a3 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -230,7 +230,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod if !cfg.EVMRPCEnabled() { headTracker = headtracker.NullTracker } else if opts.GenHeadTracker == nil { - orm := headtracker.NewORM(*chainID, db) // TODO: remove, l, cfg.Database(), ) + orm := headtracker.NewORM(*chainID, db) headSaver = headtracker.NewHeadSaver(l, orm, cfg.EVM(), cfg.EVM().HeadTracker()) headTracker = headtracker.NewHeadTracker(l, client, cfg.EVM(), cfg.EVM().HeadTracker(), headBroadcaster, headSaver, opts.MailMon) } else { diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index 7ebc031687e..1b933c79b94 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -314,7 +314,7 @@ func MustGenerateRandomKeyState(_ testing.TB) ethkey.State { func MustInsertHead(t *testing.T, db *sqlx.DB, cfg pg.QConfig, number int64) evmtypes.Head { h := evmtypes.NewHead(big.NewInt(number), evmutils.NewHash(), evmutils.NewHash(), 0, ubig.New(&FixtureChainID)) - horm := headtracker.NewORM(FixtureChainID, db) // TODO: Remove, logger.TestLogger(t), cfg, ) + horm := headtracker.NewORM(FixtureChainID, db) err := horm.IdempotentInsertHead(testutils.Context(t), &h) require.NoError(t, err) From 438fd43a2b7623376e267cb99a227dd3b1705832 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 25 Jan 2024 13:10:22 -0500 Subject: [PATCH 03/65] Remove unused loggers --- core/chains/evm/headtracker/head_tracker_test.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index e2f64cba785..01473cd8d21 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -52,8 +52,6 @@ func TestHeadTracker_New(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) - //config := configtest.NewGeneralConfig(t, nil) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) @@ -76,7 +74,6 @@ func TestHeadTracker_Save_InsertsAndTrimsTable(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -121,7 +118,6 @@ func TestHeadTracker_Get(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) @@ -168,7 +164,6 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) @@ -193,7 +188,6 @@ func TestHeadTracker_Start_CancelContext(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -233,7 +227,6 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) @@ -861,7 +854,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("does not backfill if chain length is already greater than or equal to depth", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) @@ -882,7 +874,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("only backfills to height 0 if chain length would otherwise cause it to try and fetch a negative head", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMock(t) @@ -907,7 +898,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error if the eth node returns not found", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) @@ -938,7 +928,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error if the context time budget is exceeded", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) From f774cb9e1e84101fcdc89d54caba31cd9da53af3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 25 Jan 2024 13:12:18 -0500 Subject: [PATCH 04/65] Remove comments --- core/chains/evm/headtracker/head_saver_test.go | 1 - core/chains/evm/headtracker/head_tracker_test.go | 12 ++---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go index f503aab89cb..27caca42b6c 100644 --- a/core/chains/evm/headtracker/head_saver_test.go +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -43,7 +43,6 @@ func (c *config) BlockEmissionIdleWarningThreshold() time.Duration { func configureSaver(t *testing.T) (httypes.HeadSaver, headtracker.ORM) { db := pgtest.NewSqlxDB(t) lggr := logger.Test(t) - //cfg := configtest.NewGeneralConfig(t, nil) htCfg := &config{finalityDepth: uint32(1)} orm := headtracker.NewORM(cltest.FixtureChainID, db) saver := headtracker.NewHeadSaver(lggr, orm, htCfg, &headTrackerConfig{historyDepth: 6}) diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 01473cd8d21..abe5df5d495 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -300,7 +300,6 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { g := gomega.NewWithT(t) db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) @@ -347,7 +346,6 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := cltest.NewTestChainScopedConfig(t) ethClient := evmtest.NewEthClientMockWithDefaultChain(t) @@ -381,7 +379,7 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) - orm := headtracker.NewORM(cltest.FixtureChainID, db) //TODO;, logger, config.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) trackable := &cltest.MockHeadTrackable{} ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, trackable) @@ -412,7 +410,6 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.EVM[0].FinalityDepth = ptr[uint32](50) @@ -540,7 +537,6 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T t.Parallel() db := pgtest.NewSqlxDB(t) - //logger := logger.Test(t) config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.EVM[0].FinalityDepth = ptr[uint32](50) @@ -768,7 +764,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("does nothing if all the heads are in database", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) @@ -785,8 +780,7 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("fetches a missing head", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) - orm := headtracker.NewORM(cltest.FixtureChainID, db) // TODO, logger, cfg.Database(), ) + orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) } @@ -821,7 +815,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("fetches only heads that are missing", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) for i := range heads { require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) @@ -956,7 +949,6 @@ func TestHeadTracker_Backfill(t *testing.T) { t.Run("abandons backfill and returns error when fetching a block by hash fails, indicating a reorg", func(t *testing.T) { db := pgtest.NewSqlxDB(t) cfg := configtest.NewGeneralConfig(t, nil) - //logger := logger.Test(t) orm := headtracker.NewORM(cltest.FixtureChainID, db) ethClient := evmtest.NewEthClientMock(t) ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) From a0c67a362f8dd788a6b6b3722438f23bf2d12d3b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 25 Jan 2024 14:06:03 -0500 Subject: [PATCH 05/65] Add timeout --- core/chains/evm/headtracker/orm.go | 14 ++++++++++++++ core/services/vrf/delegate_test.go | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 5c73919e5ac..dcd949b3425 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "math/big" + "time" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" @@ -29,6 +30,9 @@ type ORM interface { var _ ORM = &orm{} +// TODO: Set a reasonable timeout +const defaultTimeout = 100 * time.Millisecond + type orm struct { chainID ubig.Big db sqlutil.Queryer @@ -49,12 +53,16 @@ func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) e $1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (evm_chain_id, hash) DO NOTHING` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() _, err := orm.db.ExecContext(ctx, query, head.Hash, head.Number, head.ParentHash, head.CreatedAt, head.Timestamp, head.L1BlockNumber, orm.chainID, head.BaseFeePerGas) return errors.Wrap(err, "IdempotentInsertHead failed to insert head") } func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() _, err = orm.db.ExecContext(ctx, ` DELETE FROM evm.heads WHERE evm_chain_id = $1 AND number < ( @@ -72,6 +80,8 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() if err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID); err != nil { return nil, err } @@ -83,6 +93,8 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) } func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) err = errors.Wrap(err, "LatestHeads failed") return @@ -90,6 +102,8 @@ func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes. func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) if errors.Is(err, sql.ErrNoRows) { return nil, nil diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go index 8ad88d7b73b..66a3dd71ab3 100644 --- a/core/services/vrf/delegate_test.go +++ b/core/services/vrf/delegate_test.go @@ -83,7 +83,7 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg chainlink.GeneralConfig) vrfUniv ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) _, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil) - orm := headtracker.NewORM(db, lggr, cfg.Database(), *testutils.FixtureChainID) + orm := headtracker.NewORM(*testutils.FixtureChainID, db) require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(51))) jrm := job.NewORM(db, prm, btORM, ks, lggr, cfg.Database()) t.Cleanup(func() { assert.NoError(t, jrm.Close()) }) From 3f9ee21b8a003c5e52ce3cd01561aec9fd6bb282 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 9 Feb 2024 17:06:39 -0500 Subject: [PATCH 06/65] Refactor log_poller ORM --- .../evm/forwarders/forwarder_manager.go | 1 - core/chains/evm/logpoller/disabled.go | 42 +- core/chains/evm/logpoller/helper_test.go | 11 +- core/chains/evm/logpoller/log_poller.go | 149 ++++--- .../evm/logpoller/log_poller_internal_test.go | 18 +- core/chains/evm/logpoller/log_poller_test.go | 137 +++---- core/chains/evm/logpoller/observability.go | 115 +++--- .../evm/logpoller/observability_test.go | 42 +- core/chains/evm/logpoller/orm.go | 368 +++++++++++------- core/chains/evm/logpoller/orm_test.go | 354 +++++++++-------- core/chains/legacyevm/chain.go | 2 +- core/services/blockhashstore/coordinators.go | 25 +- core/services/blockhashstore/delegate.go | 2 +- core/services/ocr2/delegate.go | 2 +- .../evmregistry/v20/log_provider.go | 9 +- .../ocr2keeper/evmregistry/v20/registry.go | 4 +- .../evmregistry/v21/block_subscriber.go | 3 +- .../evmregistry/v21/logprovider/block_time.go | 3 +- .../evmregistry/v21/logprovider/provider.go | 7 +- .../v21/logprovider/provider_life_cycle.go | 3 +- .../evmregistry/v21/logprovider/recoverer.go | 13 +- .../ocr2keeper/evmregistry/v21/registry.go | 8 +- .../v21/transmit/event_provider.go | 4 +- .../evmregistry/v21/upkeepstate/scanner.go | 3 +- .../ocr2vrf/coordinator/coordinator.go | 13 +- core/services/relay/evm/config_poller.go | 7 +- .../relay/evm/contract_transmitter.go | 3 +- .../relay/evm/functions/config_poller.go | 7 +- .../evm/functions/contract_transmitter.go | 3 +- .../relay/evm/functions/logpoller_wrapper.go | 2 +- .../relay/evm/mercury/config_poller.go | 7 +- .../vrf/v2/listener_v2_log_listener.go | 9 +- .../universal/log_poller/helpers.go | 16 +- 33 files changed, 712 insertions(+), 680 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index cabedf79aee..53f8ed86356 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -255,7 +255,6 @@ func (f *FwdMgr) runLoop() { []common.Hash{authChangedTopic}, addrs, evmlogpoller.Confirmations(f.cfg.FinalityDepth()), - pg.WithParentCtx(f.ctx), ) if err != nil { f.logger.Errorw("Failed to retrieve latest log round", "err", err) diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index 05d591042f4..15d86bf23d2 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -6,8 +6,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" - - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -33,80 +31,80 @@ func (disabled) Replay(ctx context.Context, fromBlock int64) error { return ErrD func (disabled) ReplayAsync(fromBlock int64) {} -func (disabled) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { return ErrDisabled } +func (disabled) RegisterFilter(filter Filter) error { return ErrDisabled } -func (disabled) UnregisterFilter(name string, qopts ...pg.QOpt) error { return ErrDisabled } +func (disabled) UnregisterFilter(name string) error { return ErrDisabled } func (disabled) HasFilter(name string) bool { return false } -func (disabled) LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) { +func (disabled) LatestBlock(ctx context.Context) (LogPollerBlock, error) { return LogPollerBlock{}, ErrDisabled } -func (disabled) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { +func (disabled) GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPollerBlock, error) { return nil, ErrDisabled } -func (disabled) Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { +func (disabled) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { return nil, ErrDisabled } -func (disabled) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (d disabled) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (disabled) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (d disabled) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (d disabled) LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { +func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { return 0, ErrDisabled } -func (d disabled) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (d disabled) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index 9e48690a249..cd248cf5e70 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -25,7 +25,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -51,8 +50,8 @@ func SetupTH(t testing.TB, useFinalityTag bool, finalityDepth, backfillBatchSize chainID2 := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) - o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) - o2 := logpoller.NewORM(chainID2, db, lggr, pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID, db, lggr) + o2 := logpoller.NewORM(chainID2, db, lggr) owner := testutils.MustNewSimTransactor(t) ethDB := rawdb.NewMemoryDatabase() ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ @@ -91,20 +90,20 @@ func SetupTH(t testing.TB, useFinalityTag bool, finalityDepth, backfillBatchSize func (th *TestHarness) PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) int64 { th.LogPoller.PollAndSaveLogs(ctx, currentBlockNumber) - latest, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(ctx)) + latest, _ := th.LogPoller.LatestBlock(ctx) return latest.BlockNumber + 1 } func (th *TestHarness) assertDontHave(t *testing.T, start, end int) { for i := start; i < end; i++ { - _, err := th.ORM.SelectBlockByNumber(int64(i)) + _, err := th.ORM.SelectBlockByNumber(testutils.Context(t), int64(i)) assert.True(t, errors.Is(err, sql.ErrNoRows)) } } func (th *TestHarness) assertHaveCanonical(t *testing.T, start, end int) { for i := start; i < end; i++ { - blk, err := th.ORM.SelectBlockByNumber(int64(i)) + blk, err := th.ORM.SelectBlockByNumber(testutils.Context(t), int64(i)) require.NoError(t, err, "block %v", i) chainBlk, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(int64(i))) require.NoError(t, err) diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 7006c1762ef..2c1c674fc29 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -28,7 +28,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) //go:generate mockery --quiet --name LogPoller --output ./mocks/ --case=underscore --structname LogPoller --filename log_poller.go @@ -36,31 +35,31 @@ type LogPoller interface { services.Service Replay(ctx context.Context, fromBlock int64) error ReplayAsync(fromBlock int64) - RegisterFilter(filter Filter, qopts ...pg.QOpt) error - UnregisterFilter(name string, qopts ...pg.QOpt) error + RegisterFilter(filter Filter) error + UnregisterFilter(name string) error HasFilter(name string) bool - LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) - GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) + LatestBlock(ctx context.Context) (LogPollerBlock, error) + GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPollerBlock, error) // General querying - Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) - LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) - LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) - LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) + Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) + LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) + LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) + LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) + LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) + LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) // Content based querying - IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) + IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) + IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) + IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) + LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) + LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) + LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) } type Confirmations int @@ -218,7 +217,7 @@ func (filter *Filter) Contains(other *Filter) bool { // which means that anonymous events are not supported and log.Topics >= 1 always (log.Topics[0] is the event signature). // The filter may be unregistered later by Filter.Name // Warnings/debug information is keyed by filter name. -func (lp *logPoller) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { +func (lp *logPoller) RegisterFilter(filter Filter) error { if len(filter.Addresses) == 0 { return errors.Errorf("at least one address must be specified") } @@ -249,7 +248,7 @@ func (lp *logPoller) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { lp.lggr.Warnw("Updating existing filter with more events or addresses", "name", filter.Name, "filter", filter) } - if err := lp.orm.InsertFilter(filter, qopts...); err != nil { + if err := lp.orm.InsertFilter(lp.ctx, filter); err != nil { return errors.Wrap(err, "error inserting filter") } lp.filters[filter.Name] = filter @@ -260,7 +259,7 @@ func (lp *logPoller) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { // UnregisterFilter will remove the filter with the given name. // If the name does not exist, it will log an error but not return an error. // Warnings/debug information is keyed by filter name. -func (lp *logPoller) UnregisterFilter(name string, qopts ...pg.QOpt) error { +func (lp *logPoller) UnregisterFilter(name string) error { lp.filterMu.Lock() defer lp.filterMu.Unlock() @@ -270,7 +269,7 @@ func (lp *logPoller) UnregisterFilter(name string, qopts ...pg.QOpt) error { return nil } - if err := lp.orm.DeleteFilter(name, qopts...); err != nil { + if err := lp.orm.DeleteFilter(lp.ctx, name); err != nil { return errors.Wrap(err, "error deleting filter") } delete(lp.filters, name) @@ -414,7 +413,7 @@ func (lp *logPoller) HealthReport() map[string]error { } func (lp *logPoller) GetReplayFromBlock(ctx context.Context, requested int64) (int64, error) { - lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + lastProcessed, err := lp.orm.SelectLatestBlock(lp.ctx) if err != nil { if !errors.Is(err, sql.ErrNoRows) { // Real DB error @@ -441,7 +440,7 @@ func (lp *logPoller) run() { loadFilters := func() error { lp.filterMu.Lock() defer lp.filterMu.Unlock() - filters, err := lp.orm.LoadFilters(pg.WithParentCtx(lp.ctx)) + filters, err := lp.orm.LoadFilters(lp.ctx) if err != nil { return errors.Wrapf(err, "Failed to load initial filters from db, retrying") @@ -496,7 +495,7 @@ func (lp *logPoller) run() { // Always start from the latest block in the db. var start int64 - lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(lp.ctx)) + lastProcessed, err := lp.orm.SelectLatestBlock(lp.ctx) if err != nil { if !errors.Is(err, sql.ErrNoRows) { // Assume transient db reading issue, retry forever. @@ -547,7 +546,7 @@ func (lp *logPoller) run() { } case <-logPruneTick: logPruneTick = time.After(utils.WithJitter(lp.pollPeriod * 2401)) // = 7^5 avoids common factors with 1000 - if err := lp.orm.DeleteExpiredLogs(pg.WithParentCtx(lp.ctx)); err != nil { + if err := lp.orm.DeleteExpiredLogs(lp.ctx); err != nil { lp.lggr.Error(err) } } @@ -556,7 +555,7 @@ func (lp *logPoller) run() { func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context, backupPollerBlockDelay int64) { if lp.backupPollerNextBlock == 0 { - lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + lastProcessed, err := lp.orm.SelectLatestBlock(ctx) if err != nil { if errors.Is(err, sql.ErrNoRows) { lp.lggr.Warnw("Backup log poller ran before first successful log poller run, skipping") @@ -683,7 +682,7 @@ func (lp *logPoller) backfill(ctx context.Context, start, end int64) error { } lp.lggr.Debugw("Backfill found logs", "from", from, "to", to, "logs", len(gethLogs), "blocks", blocks) - err = lp.orm.InsertLogsWithBlock(convertLogs(gethLogs, blocks, lp.lggr, lp.ec.ConfiguredChainID()), blocks[len(blocks)-1], pg.WithParentCtx(ctx)) + err = lp.orm.InsertLogsWithBlock(ctx, convertLogs(gethLogs, blocks, lp.lggr, lp.ec.ConfiguredChainID()), blocks[len(blocks)-1]) if err != nil { lp.lggr.Warnw("Unable to insert logs, retrying", "err", err, "from", from, "to", to) return err @@ -721,7 +720,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren } // Does this currentBlock point to the same parent that we have saved? // If not, there was a reorg, so we need to rewind. - expectedParent, err1 := lp.orm.SelectBlockByNumber(currentBlockNumber-1, pg.WithParentCtx(ctx)) + expectedParent, err1 := lp.orm.SelectBlockByNumber(ctx, currentBlockNumber-1) if err1 != nil && !errors.Is(err1, sql.ErrNoRows) { // If err is not a 'no rows' error, assume transient db issue and retry lp.lggr.Warnw("Unable to read latestBlockNumber currentBlock saved", "err", err1, "currentBlockNumber", currentBlockNumber) @@ -752,7 +751,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // the canonical set per read. Typically, if an application took action on a log // it would be saved elsewhere e.g. evm.txes, so it seems better to just support the fast reads. // Its also nicely analogous to reading from the chain itself. - err2 = lp.orm.DeleteLogsAndBlocksAfter(blockAfterLCA.Number, pg.WithParentCtx(ctx)) + err2 = lp.orm.DeleteLogsAndBlocksAfter(ctx, blockAfterLCA.Number) if err2 != nil { // If we error on db commit, we can't know if the tx went through or not. // We return an error here which will cause us to restart polling from lastBlockSaved + 1 @@ -839,6 +838,7 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int lp.lggr.Debugw("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber, "blockHash", currentBlock.Hash, "timestamp", currentBlock.Timestamp.Unix()) block := NewLogPollerBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber) err = lp.orm.InsertLogsWithBlock( + ctx, convertLogs(logs, []LogPollerBlock{block}, lp.lggr, lp.ec.ConfiguredChainID()), block, ) @@ -906,7 +906,7 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He // If the parent block number becomes < the first finalized block our reorg is too deep. // This can happen only if finalityTag is not enabled and fixed finalityDepth is provided via config. for parent.Number >= latestFinalizedBlockNumber { - ourParentBlockHash, err := lp.orm.SelectBlockByNumber(parent.Number, pg.WithParentCtx(ctx)) + ourParentBlockHash, err := lp.orm.SelectBlockByNumber(ctx, parent.Number) if err != nil { return nil, err } @@ -929,7 +929,7 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He // PruneOldBlocks removes blocks that are > lp.keepFinalizedBlocksDepth behind the latest finalized block. func (lp *logPoller) PruneOldBlocks(ctx context.Context) error { - latestBlock, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + latestBlock, err := lp.orm.SelectLatestBlock(ctx) if err != nil { return err } @@ -943,65 +943,65 @@ func (lp *logPoller) PruneOldBlocks(ctx context.Context) error { } // 1-2-3-4-5(finalized)-6-7(latest), keepFinalizedBlocksDepth=3 // Remove <= 2 - return lp.orm.DeleteBlocksBefore(latestBlock.FinalizedBlockNumber-lp.keepFinalizedBlocksDepth, pg.WithParentCtx(ctx)) + return lp.orm.DeleteBlocksBefore(ctx, latestBlock.FinalizedBlockNumber-lp.keepFinalizedBlocksDepth) } // Logs returns logs matching topics and address (exactly) in the given block range, // which are canonical at time of query. -func (lp *logPoller) Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogs(start, end, address, eventSig, qopts...) +func (lp *logPoller) Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { + return lp.orm.SelectLogs(lp.ctx, start, end, address, eventSig) } -func (lp *logPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsWithSigs(start, end, address, eventSigs, qopts...) +func (lp *logPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { + return lp.orm.SelectLogsWithSigs(lp.ctx, start, end, address, eventSigs) } -func (lp *logPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsCreatedAfter(address, eventSig, after, confs, qopts...) +func (lp *logPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, after time.Time, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsCreatedAfter(lp.ctx, address, eventSig, after, confs) } // IndexedLogs finds all the logs that have a topic value in topicValues at index topicIndex. -func (lp *logPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogs(address, eventSig, topicIndex, topicValues, confs, qopts...) +func (lp *logPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogs(lp.ctx, address, eventSig, topicIndex, topicValues, confs) } // IndexedLogsByBlockRange finds all the logs that have a topic value in topicValues at index topicIndex within the block range -func (lp *logPoller) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsByBlockRange(start, end, address, eventSig, topicIndex, topicValues, qopts...) +func (lp *logPoller) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { + return lp.orm.SelectIndexedLogsByBlockRange(lp.ctx, start, end, address, eventSig, topicIndex, topicValues) } -func (lp *logPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsCreatedAfter(address, eventSig, topicIndex, topicValues, after, confs, qopts...) +func (lp *logPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsCreatedAfter(lp.ctx, address, eventSig, topicIndex, topicValues, after, confs) } -func (lp *logPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsByTxHash(address, eventSig, txHash, qopts...) +func (lp *logPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { + return lp.orm.SelectIndexedLogsByTxHash(lp.ctx, address, eventSig, txHash) } // LogsDataWordGreaterThan note index is 0 based. -func (lp *logPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, confs, qopts...) +func (lp *logPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordGreaterThan(lp.ctx, address, eventSig, wordIndex, wordValueMin, confs) } // LogsDataWordRange note index is 0 based. -func (lp *logPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsDataWordRange(address, eventSig, wordIndex, wordValueMin, wordValueMax, confs, qopts...) +func (lp *logPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordRange(lp.ctx, address, eventSig, wordIndex, wordValueMin, wordValueMax, confs) } // IndexedLogsTopicGreaterThan finds all the logs that have a topic value greater than topicValueMin at index topicIndex. // Only works for integer topics. -func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) +func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicGreaterThan(lp.ctx, address, eventSig, topicIndex, topicValueMin, confs) } -func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) +func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicRange(lp.ctx, address, eventSig, topicIndex, topicValueMin, topicValueMax, confs) } // LatestBlock returns the latest block the log poller is on. It tracks blocks to be able // to detect reorgs. -func (lp *logPoller) LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) { - b, err := lp.orm.SelectLatestBlock(qopts...) +func (lp *logPoller) LatestBlock(ctx context.Context) (LogPollerBlock, error) { + b, err := lp.orm.SelectLatestBlock(ctx) if err != nil { return LogPollerBlock{}, err } @@ -1009,21 +1009,21 @@ func (lp *logPoller) LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) { return *b, nil } -func (lp *logPoller) BlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { - return lp.orm.SelectBlockByNumber(n, qopts...) +func (lp *logPoller) BlockByNumber(n int64) (*LogPollerBlock, error) { + return lp.orm.SelectBlockByNumber(lp.ctx, n) } // LatestLogByEventSigWithConfs finds the latest log that has confs number of blocks on top of the log. -func (lp *logPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { - return lp.orm.SelectLatestLogByEventSigWithConfs(eventSig, address, confs, qopts...) +func (lp *logPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { + return lp.orm.SelectLatestLogByEventSigWithConfs(lp.ctx, eventSig, address, confs) } -func (lp *logPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLatestLogEventSigsAddrsWithConfs(fromBlock, addresses, eventSigs, confs, qopts...) +func (lp *logPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLatestLogEventSigsAddrsWithConfs(lp.ctx, fromBlock, addresses, eventSigs, confs) } -func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { - return lp.orm.SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock, eventSigs, addresses, confs, qopts...) +func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { + return lp.orm.SelectLatestBlockByEventSigsAddrsWithConfs(lp.ctx, fromBlock, eventSigs, addresses, confs) } // LogsDataWordBetween retrieves a slice of Log records that match specific criteria. @@ -1035,13 +1035,13 @@ func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event // // This function is particularly useful for filtering logs by data word values and their positions within the event data. // It returns an empty slice if no logs match the provided criteria. -func (lp *logPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsDataWordBetween(address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) +func (lp *logPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordBetween(lp.ctx, address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs) } // GetBlocksRange tries to get the specified block numbers from the log pollers // blocks table. It falls back to the RPC for any unfulfilled requested blocks. -func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { +func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPollerBlock, error) { var blocks []LogPollerBlock // Do nothing if no blocks are requested. @@ -1057,10 +1057,9 @@ func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts // Retrieve all blocks within this range from the log poller. blocksFound := make(map[uint64]LogPollerBlock) - qopts = append(qopts, pg.WithParentCtx(ctx)) minRequestedBlock := int64(mathutil.Min(numbers[0], numbers[1:]...)) maxRequestedBlock := int64(mathutil.Max(numbers[0], numbers[1:]...)) - lpBlocks, err := lp.orm.GetBlocksRange(minRequestedBlock, maxRequestedBlock, qopts...) + lpBlocks, err := lp.orm.GetBlocksRange(lp.ctx, minRequestedBlock, maxRequestedBlock) if err != nil { lp.lggr.Warnw("Error while retrieving blocks from log pollers blocks table. Falling back to RPC...", "requestedBlocks", numbers, "err", err) } else { @@ -1185,8 +1184,8 @@ func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []str // // For example, query to retrieve unfulfilled requests by querying request log events without matching fulfillment log events. // The order of events is not significant. Both logs must be inside the block range and have the minimum number of confirmations -func (lp *logPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectIndexedLogsWithSigsExcluding(eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs, qopts...) +func (lp *logPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsWithSigsExcluding(lp.ctx, eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs) } func EvmWord(i uint64) common.Hash { diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 863ab0fddea..df6c41a9702 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -30,7 +30,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -39,7 +38,8 @@ var ( // Validate that filters stored in log_filters_table match the filters stored in memory func validateFiltersTable(t *testing.T, lp *logPoller, orm *DbORM) { - filters, err := orm.LoadFilters() + ctx := testutils.Context(t) + filters, err := orm.LoadFilters(ctx) require.NoError(t, err) require.Equal(t, len(filters), len(lp.filters)) for name, dbFilter := range filters { @@ -61,7 +61,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { chainID := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) - orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + orm := NewORM(chainID, db, lggr) // Set up a test chain with a log emitting contract deployed. lp := NewLogPoller(orm, nil, lggr, time.Hour, false, 1, 1, 2, 1000) @@ -126,7 +126,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { err = lp.UnregisterFilter("Emitter Log 1") require.NoError(t, err) assert.Len(t, lp.filters, 0) - filters, err := lp.orm.LoadFilters() + filters, err := lp.orm.LoadFilters(lp.ctx) require.NoError(t, err) assert.Len(t, filters, 0) @@ -197,7 +197,7 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zapcore.WarnLevel) chainID := testutils.FixtureChainID db := pgtest.NewSqlxDB(t) - orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + orm := NewORM(chainID, db, lggr) head := evmtypes.Head{Number: 3} events := []common.Hash{EmitterABI.Events["Log1"].ID} @@ -225,7 +225,7 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { lp.PollAndSaveLogs(ctx, 3) - lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + lastProcessed, err := lp.orm.SelectLatestBlock(ctx) require.NoError(t, err) require.Equal(t, int64(3), lastProcessed.BlockNumber) @@ -240,7 +240,7 @@ func TestLogPoller_Replay(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zapcore.ErrorLevel) chainID := testutils.FixtureChainID db := pgtest.NewSqlxDB(t) - orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + orm := NewORM(chainID, db, lggr) head := evmtypes.Head{Number: 4} events := []common.Hash{EmitterABI.Events["Log1"].ID} @@ -262,7 +262,7 @@ func TestLogPoller_Replay(t *testing.T) { // process 1 log in block 3 lp.PollAndSaveLogs(testutils.Context(t), 4) - latest, err := lp.LatestBlock() + latest, err := lp.LatestBlock(lp.ctx) require.NoError(t, err) require.Equal(t, int64(4), latest.BlockNumber) @@ -438,7 +438,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { lggr := logger.Test(t) chainID := testutils.FixtureChainID db := pgtest.NewSqlxDB(t) - orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + orm := NewORM(chainID, db, lggr) t.Run("pick latest block from chain and use finality from config with finality disabled", func(t *testing.T) { head := evmtypes.Head{Number: 4} diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 2508e676e6c..aa7c966f41a 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -38,7 +38,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func logRuntime(t testing.TB, start time.Time) { @@ -50,6 +49,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") startDate := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) + ctx := context.Background() for j := 1; j < 100; j++ { var logs []logpoller.Log @@ -77,8 +77,8 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo }) } - require.NoError(t, o.InsertLogs(logs)) - require.NoError(t, o.InsertBlock(utils.RandomHash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) + require.NoError(t, o.InsertLogs(ctx, logs)) + require.NoError(t, o.InsertBlock(ctx, utils.RandomHash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) } return event1, address1, address2 @@ -86,8 +86,9 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo func BenchmarkSelectLogsCreatedAfter(b *testing.B) { chainId := big.NewInt(137) + ctx := context.Background() _, db := heavyweight.FullTestDBV2(b, nil) - o := logpoller.NewORM(chainId, db, logger.Test(b), pgtest.NewQConfig(false)) + o := logpoller.NewORM(chainId, db, logger.Test(b)) event, address, _ := populateDatabase(b, o, chainId) // Setting searchDate to pick around 5k logs @@ -96,7 +97,7 @@ func BenchmarkSelectLogsCreatedAfter(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - logs, err := o.SelectLogsCreatedAfter(address, event, searchDate, 500) + logs, err := o.SelectLogsCreatedAfter(ctx, address, event, searchDate, 500) require.NotZero(b, len(logs)) require.NoError(b, err) } @@ -105,27 +106,28 @@ func BenchmarkSelectLogsCreatedAfter(b *testing.B) { func TestPopulateLoadedDB(t *testing.T) { t.Skip("Only for local load testing and query analysis") _, db := heavyweight.FullTestDBV2(t, nil) + ctx := context.Background() chainID := big.NewInt(137) - o := logpoller.NewORM(big.NewInt(137), db, logger.Test(t), pgtest.NewQConfig(true)) + o := logpoller.NewORM(big.NewInt(137), db, logger.Test(t)) event1, address1, address2 := populateDatabase(t, o, chainID) func() { defer logRuntime(t, time.Now()) - _, err1 := o.SelectLogs(750000, 800000, address1, event1) + _, err1 := o.SelectLogs(ctx, 750000, 800000, address1, event1) require.NoError(t, err1) }() func() { defer logRuntime(t, time.Now()) - _, err1 := o.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{address1}, []common.Hash{event1}, 0) + _, err1 := o.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0, []common.Address{address1}, []common.Hash{event1}, 0) require.NoError(t, err1) }() // Confirm all the logs. - require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now(), 0)) + require.NoError(t, o.InsertBlock(ctx, common.HexToHash("0x10"), 1000000, time.Now(), 0)) func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectLogsDataWordRange(address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + lgs, err1 := o.SelectLogsDataWordRange(ctx, address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) require.NoError(t, err1) // 10 since every other log is for address1 assert.Equal(t, 10, len(lgs)) @@ -133,14 +135,14 @@ func TestPopulateLoadedDB(t *testing.T) { func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectIndexedLogs(address2, event1, 1, []common.Hash{logpoller.EvmWord(500000), logpoller.EvmWord(500020)}, 0) + lgs, err1 := o.SelectIndexedLogs(ctx, address2, event1, 1, []common.Hash{logpoller.EvmWord(500000), logpoller.EvmWord(500020)}, 0) require.NoError(t, err1) assert.Equal(t, 2, len(lgs)) }() func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectIndexedLogsTopicRange(address1, event1, 1, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + lgs, err1 := o.SelectIndexedLogsTopicRange(ctx, address1, event1, 1, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) require.NoError(t, err1) assert.Equal(t, 10, len(lgs)) }() @@ -149,6 +151,7 @@ func TestPopulateLoadedDB(t *testing.T) { func TestLogPoller_Integration(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) th.Client.Commit() // Block 2. Ensure we have finality number of blocks + ctx := context.Background() require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) @@ -174,8 +177,7 @@ func TestLogPoller_Integration(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // We should immediately have at least logs 4-7 - logs, err := th.LogPoller.Logs(4, 7, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err := th.LogPoller.Logs(4, 7, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) require.Equal(t, 4, len(logs)) @@ -205,8 +207,7 @@ func TestLogPoller_Integration(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // We should immediately see 4 logs2 logs. - logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 4, len(logs)) @@ -262,7 +263,7 @@ func Test_BackupLogPoller(t *testing.T) { err := th.LogPoller.RegisterFilter(filter1) require.NoError(t, err) - filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) + filters, err := th.ORM.LoadFilters(ctx) require.NoError(t, err) require.Equal(t, 1, len(filters)) require.Equal(t, filter1, filters["filter1"]) @@ -333,8 +334,7 @@ func Test_BackupLogPoller(t *testing.T) { require.Equal(t, 32, len(fLogs)) // logs shouldn't show up yet - logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 0, len(logs)) @@ -343,17 +343,16 @@ func Test_BackupLogPoller(t *testing.T) { markBlockAsFinalized(t, th, 34) // Run ordinary poller + backup poller at least once - currentBlock, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + currentBlock, _ := th.LogPoller.LatestBlock(ctx) th.LogPoller.PollAndSaveLogs(ctx, currentBlock.BlockNumber+1) th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + currentBlock, _ = th.LogPoller.LatestBlock(ctx) require.Equal(t, int64(37), currentBlock.BlockNumber+1) // logs still shouldn't show up, because we don't want to backfill the last finalized log // to help with reorg detection - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 0, len(logs)) th.Client.Commit() @@ -362,21 +361,18 @@ func Test_BackupLogPoller(t *testing.T) { // Run ordinary poller + backup poller at least once more th.LogPoller.PollAndSaveLogs(ctx, currentBlockNumber+1) th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + currentBlock, _ = th.LogPoller.LatestBlock(ctx) require.Equal(t, int64(38), currentBlock.BlockNumber+1) // all 3 logs in block 34 should show up now, thanks to backup logger - logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 5, len(logs)) - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 1, len(logs)) - logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, - pg.WithParentCtx(testutils.Context(t))) + logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2) require.NoError(t, err) assert.Equal(t, 1, len(logs)) }) @@ -427,7 +423,6 @@ func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { currentBlock, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, emittedLogs-10) @@ -443,7 +438,6 @@ func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { currentBlock+1, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, emittedLogs) @@ -473,7 +467,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { th.PollAndSaveLogs(ctx, 1) // Check that latest block has the same properties as the head - latestBlock, err := th.LogPoller.LatestBlock() + latestBlock, err := th.LogPoller.LatestBlock(ctx) require.NoError(t, err) assert.Equal(t, latestBlock.BlockNumber, header.Number.Int64()) assert.Equal(t, latestBlock.FinalizedBlockNumber, header.Number.Int64()) @@ -497,7 +491,6 @@ func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { header.Number.Int64()+1, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, emittedLogs) @@ -556,7 +549,6 @@ func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) secondBatchBlock, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, logsBatch+1) @@ -626,13 +618,11 @@ func TestLogPoller_BlockTimestamps(t *testing.T) { require.NoError(t, err) require.Len(t, gethLogs, 2) - lb, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + lb, _ := th.LogPoller.LatestBlock(ctx) th.PollAndSaveLogs(ctx, lb.BlockNumber+1) - lg1, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(ctx)) + lg1, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) - lg2, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log2"].ID, th.EmitterAddress2, - pg.WithParentCtx(ctx)) + lg2, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log2"].ID, th.EmitterAddress2) require.NoError(t, err) // Logs should have correct timestamps @@ -662,7 +652,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { t.Log("Starting test", mineOrReorg) chainID := testutils.NewRandomEVMChainID() // Set up a test chain with a log emitting contract deployed. - orm := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + orm := logpoller.NewORM(chainID, db, lggr) // Note this property test is run concurrently and the sim is not threadsafe. ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ owner.From: { @@ -677,7 +667,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { } currentBlockNumber := int64(1) lp.PollAndSaveLogs(testutils.Context(t), currentBlockNumber) - currentBlock, err := lp.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + currentBlock, err := lp.LatestBlock(testutils.Context(t)) require.NoError(t, err) matchesGeth := func() bool { // Check every block is identical @@ -728,7 +718,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { t.Logf("New latest (%v, %x), latest parent %x)\n", latest.NumberU64(), latest.Hash(), latest.ParentHash()) } lp.PollAndSaveLogs(testutils.Context(t), currentBlock.BlockNumber) - currentBlock, err = lp.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + currentBlock, err = lp.LatestBlock(testutils.Context(t)) require.NoError(t, err) } return matchesGeth() @@ -779,7 +769,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { assert.Equal(t, int64(2), newStart) // We expect to have saved block 1. - lpb, err := th.ORM.SelectBlockByNumber(1) + lpb, err := th.ORM.SelectBlockByNumber(testutils.Context(t), 1) require.NoError(t, err) assert.Equal(t, lpb.BlockHash, b.Hash()) assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) @@ -787,7 +777,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { assert.Equal(t, uint64(10), b.Time()) // No logs. - lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + lgs, err := th.ORM.SelectLogsByBlockRange(testutils.Context(t), 1, 1) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) th.assertHaveCanonical(t, 1, 1) @@ -795,7 +785,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { // Polling again should be a noop, since we are at the latest. newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(2), newStart) - latest, err := th.ORM.SelectLatestBlock() + latest, err := th.ORM.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err) assert.Equal(t, int64(1), latest.BlockNumber) th.assertHaveCanonical(t, 1, 1) @@ -810,10 +800,10 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { // Polling should get us the L1 log. newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(3), newStart) - latest, err = th.ORM.SelectLatestBlock() + latest, err = th.ORM.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err) assert.Equal(t, int64(2), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 1, 3) require.NoError(t, err) require.Equal(t, 1, len(lgs)) assert.Equal(t, th.EmitterAddress1, lgs[0].Address) @@ -845,10 +835,10 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(4), newStart) - latest, err = th.ORM.SelectLatestBlock() + latest, err = th.ORM.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err) assert.Equal(t, int64(3), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 1, 3) require.NoError(t, err) require.Equal(t, 1, len(lgs)) assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) @@ -868,10 +858,10 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { markBlockAsFinalized(t, th, 1) newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(5), newStart) - latest, err = th.ORM.SelectLatestBlock() + latest, err = th.ORM.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err) assert.Equal(t, int64(4), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 1, 3) require.NoError(t, err) // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. assert.Equal(t, 2, len(lgs)) @@ -904,7 +894,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(7), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 4, 6) require.NoError(t, err) require.Equal(t, 3, len(lgs)) assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) @@ -934,7 +924,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(11), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 7, 9) require.NoError(t, err) require.Equal(t, 3, len(lgs)) assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) @@ -963,7 +953,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) assert.Equal(t, int64(18), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 11, 17) require.NoError(t, err) assert.Equal(t, 7, len(lgs)) th.assertHaveCanonical(t, 14, 16) // Should have last finalized block plus unfinalized blocks @@ -1028,7 +1018,7 @@ func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { newStart := th.PollAndSaveLogs(testutils.Context(t), 1) assert.Equal(t, int64(3), newStart) // Check that L1_1 has a proper data payload - lgs, err := th.ORM.SelectLogsByBlockRange(2, 2) + lgs, err := th.ORM.SelectLogsByBlockRange(testutils.Context(t), 2, 2) require.NoError(t, err) assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) @@ -1054,7 +1044,7 @@ func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { assert.Equal(t, int64(10), newStart) // Expect L1_2 to be properly updated - lgs, err = th.ORM.SelectLogsByBlockRange(2, 2) + lgs, err = th.ORM.SelectLogsByBlockRange(testutils.Context(t), 2, 2) require.NoError(t, err) assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) th.assertHaveCanonical(t, 1, 1) @@ -1087,7 +1077,7 @@ func TestLogPoller_LoadFilters(t *testing.T) { err = th.LogPoller.RegisterFilter(filter3) require.NoError(t, err) - filters, err := th.ORM.LoadFilters() + filters, err := th.ORM.LoadFilters(testutils.Context(t)) require.NoError(t, err) require.NotNil(t, filters) require.Len(t, filters, 3) @@ -1149,7 +1139,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { th.Client.Commit() // Assert block 2 is not yet in DB - _, err = th.ORM.SelectBlockByNumber(2) + _, err = th.ORM.SelectBlockByNumber(testutils.Context(t), 2) require.Error(t, err) // getBlocksRange is able to retrieve block 2 by calling RPC @@ -1164,7 +1154,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { th.Client.Commit() // Assert block 3 is not yet in DB - _, err = th.ORM.SelectBlockByNumber(3) + _, err = th.ORM.SelectBlockByNumber(testutils.Context(t), 3) require.Error(t, err) // getBlocksRange is able to retrieve blocks 1 and 3, without retrieving block 2 @@ -1177,10 +1167,10 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { // after calling PollAndSaveLogs, block 2 & 3 are persisted in DB th.LogPoller.PollAndSaveLogs(testutils.Context(t), 1) - block, err := th.ORM.SelectBlockByNumber(2) + block, err := th.ORM.SelectBlockByNumber(testutils.Context(t), 2) require.NoError(t, err) assert.Equal(t, 2, int(block.BlockNumber)) - block, err = th.ORM.SelectBlockByNumber(3) + block, err = th.ORM.SelectBlockByNumber(testutils.Context(t), 3) require.NoError(t, err) assert.Equal(t, 3, int(block.BlockNumber)) @@ -1219,9 +1209,8 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { // test still works when qopts is cancelled // but context object is not ctx, cancel = context.WithCancel(testutils.Context(t)) - qopts := pg.WithParentCtx(ctx) cancel() - _, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums, qopts) + _, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) require.NoError(t, err) } @@ -1251,7 +1240,7 @@ func TestGetReplayFromBlock(t *testing.T) { requested = int64(15) fromBlock, err = th.LogPoller.GetReplayFromBlock(testutils.Context(t), requested) require.NoError(t, err) - latest, err := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + latest, err := th.LogPoller.LatestBlock(testutils.Context(t)) require.NoError(t, err) assert.Equal(t, latest.BlockNumber, fromBlock) @@ -1269,7 +1258,7 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { chainID1 := testutils.NewRandomEVMChainID() chainID2 := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) - o := logpoller.NewORM(chainID1, db, lggr, pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID1, db, lggr) owner := testutils.MustNewSimTransactor(t) ethDB := rawdb.NewMemoryDatabase() @@ -1335,7 +1324,7 @@ func TestTooManyLogResults(t *testing.T) { lggr, obs := logger.TestObserved(t, zapcore.DebugLevel) chainID := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) - o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID, db, lggr) lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, false, 2, 20, 10, 1000) expected := []int64{10, 5, 2, 1} @@ -1368,7 +1357,7 @@ func TestTooManyLogResults(t *testing.T) { err := lp.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{addr}, 0}) require.NoError(t, err) lp.PollAndSaveLogs(ctx, 5) - block, err2 := o.SelectLatestBlock() + block, err2 := o.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err2) assert.Equal(t, int64(298), block.BlockNumber) @@ -1399,7 +1388,7 @@ func TestTooManyLogResults(t *testing.T) { }) lp.PollAndSaveLogs(ctx, 298) - block, err2 = o.SelectLatestBlock() + block, err2 = o.SelectLatestBlock(testutils.Context(t)) require.NoError(t, err2) assert.Equal(t, int64(298), block.BlockNumber) warns := obs.FilterMessageSnippet("halving block range").FilterLevelExact(zapcore.WarnLevel).All() @@ -1505,7 +1494,7 @@ func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { t.Run(tt.name, func(t *testing.T) { th := SetupTH(t, tt.useFinalityTag, tt.finalityDepth, 3, 2, 1000) // Should return error before the first poll and save - _, err := th.LogPoller.LatestBlock() + _, err := th.LogPoller.LatestBlock(testutils.Context(t)) require.Error(t, err) // Mark first block as finalized @@ -1519,7 +1508,7 @@ func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { th.PollAndSaveLogs(ctx, 1) - latestBlock, err := th.LogPoller.LatestBlock() + latestBlock, err := th.LogPoller.LatestBlock(testutils.Context(t)) require.NoError(t, err) require.Equal(t, int64(numberOfBlocks), latestBlock.BlockNumber) require.Equal(t, tt.expectedFinalizedBlock, latestBlock.FinalizedBlockNumber) @@ -1590,7 +1579,6 @@ func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { currentBlock, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, emittedLogs) @@ -1601,7 +1589,6 @@ func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { th.EmitterAddress1, genesisBlockTime, 0, - pg.WithParentCtx(testutils.Context(t)), ) require.NoError(t, err) require.Len(t, logs, emittedLogs) @@ -1644,7 +1631,7 @@ func Test_PruneOldBlocks(t *testing.T) { th := SetupTH(t, true, 0, 3, 2, tt.keepFinalizedBlocksDepth) for i := 1; i <= tt.blockToCreate; i++ { - err := th.ORM.InsertBlock(utils.RandomBytes32(), int64(i+10), time.Now(), int64(i)) + err := th.ORM.InsertBlock(testutils.Context(t), utils.RandomBytes32(), int64(i+10), time.Now(), int64(i)) require.NoError(t, err) } @@ -1654,7 +1641,7 @@ func Test_PruneOldBlocks(t *testing.T) { } require.NoError(t, th.LogPoller.PruneOldBlocks(ctx)) - blocks, err := th.ORM.GetBlocksRange(0, math.MaxInt64, pg.WithParentCtx(ctx)) + blocks, err := th.ORM.GetBlocksRange(testutils.Context(t), 0, math.MaxInt64) require.NoError(t, err) assert.Len(t, blocks, tt.blocksLeft) }) diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go index a7a0d3c03d5..956d1b7ee00 100644 --- a/core/chains/evm/logpoller/observability.go +++ b/core/chains/evm/logpoller/observability.go @@ -1,6 +1,7 @@ package logpoller import ( + "context" "math/big" "time" @@ -10,8 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/smartcontractkit/chainlink-common/pkg/logger" - - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) type queryType string @@ -77,9 +76,9 @@ type ObservedORM struct { // NewObservedORM creates an observed version of log poller's ORM created by NewORM // Please see ObservedLogPoller for more details on how latencies are measured -func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *ObservedORM { +func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger) *ObservedORM { return &ObservedORM{ - ORM: NewORM(chainID, db, lggr, cfg), + ORM: NewORM(chainID, db, lggr), queryDuration: lpQueryDuration, datasetSize: lpQueryDataSets, logsInserted: lpLogsInserted, @@ -88,169 +87,169 @@ func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QC } } -func (o *ObservedORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { +func (o *ObservedORM) InsertLogs(ctx context.Context, logs []Log) error { err := withObservedExec(o, "InsertLogs", create, func() error { - return o.ORM.InsertLogs(logs, qopts...) + return o.ORM.InsertLogs(ctx, logs) }) trackInsertedLogsAndBlock(o, logs, nil, err) return err } -func (o *ObservedORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error { +func (o *ObservedORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { err := withObservedExec(o, "InsertLogsWithBlock", create, func() error { - return o.ORM.InsertLogsWithBlock(logs, block, qopts...) + return o.ORM.InsertLogsWithBlock(ctx, logs, block) }) trackInsertedLogsAndBlock(o, logs, &block, err) return err } -func (o *ObservedORM) InsertFilter(filter Filter, qopts ...pg.QOpt) error { +func (o *ObservedORM) InsertFilter(ctx context.Context, filter Filter) error { return withObservedExec(o, "InsertFilter", create, func() error { - return o.ORM.InsertFilter(filter, qopts...) + return o.ORM.InsertFilter(ctx, filter) }) } -func (o *ObservedORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { +func (o *ObservedORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { return withObservedQuery(o, "LoadFilters", func() (map[string]Filter, error) { - return o.ORM.LoadFilters(qopts...) + return o.ORM.LoadFilters(ctx) }) } -func (o *ObservedORM) DeleteFilter(name string, qopts ...pg.QOpt) error { +func (o *ObservedORM) DeleteFilter(ctx context.Context, name string) error { return withObservedExec(o, "DeleteFilter", del, func() error { - return o.ORM.DeleteFilter(name, qopts...) + return o.ORM.DeleteFilter(ctx, name) }) } -func (o *ObservedORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error { +func (o *ObservedORM) DeleteBlocksBefore(ctx context.Context, end int64) error { return withObservedExec(o, "DeleteBlocksBefore", del, func() error { - return o.ORM.DeleteBlocksBefore(end, qopts...) + return o.ORM.DeleteBlocksBefore(ctx, end) }) } -func (o *ObservedORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error { +func (o *ObservedORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { return withObservedExec(o, "DeleteLogsAndBlocksAfter", del, func() error { - return o.ORM.DeleteLogsAndBlocksAfter(start, qopts...) + return o.ORM.DeleteLogsAndBlocksAfter(ctx, start) }) } -func (o *ObservedORM) DeleteExpiredLogs(qopts ...pg.QOpt) error { +func (o *ObservedORM) DeleteExpiredLogs(ctx context.Context) error { return withObservedExec(o, "DeleteExpiredLogs", del, func() error { - return o.ORM.DeleteExpiredLogs(qopts...) + return o.ORM.DeleteExpiredLogs(ctx) }) } -func (o *ObservedORM) SelectBlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { +func (o *ObservedORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { return withObservedQuery(o, "SelectBlockByNumber", func() (*LogPollerBlock, error) { - return o.ORM.SelectBlockByNumber(n, qopts...) + return o.ORM.SelectBlockByNumber(ctx, n) }) } -func (o *ObservedORM) SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) { +func (o *ObservedORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { return withObservedQuery(o, "SelectLatestBlock", func() (*LogPollerBlock, error) { - return o.ORM.SelectLatestBlock(qopts...) + return o.ORM.SelectLatestBlock(ctx) }) } -func (o *ObservedORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { +func (o *ObservedORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { return withObservedQuery(o, "SelectLatestLogByEventSigWithConfs", func() (*Log, error) { - return o.ORM.SelectLatestLogByEventSigWithConfs(eventSig, address, confs, qopts...) + return o.ORM.SelectLatestLogByEventSigWithConfs(ctx, eventSig, address, confs) }) } -func (o *ObservedORM) SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogsWithSigs", func() ([]Log, error) { - return o.ORM.SelectLogsWithSigs(start, end, address, eventSigs, qopts...) + return o.ORM.SelectLogsWithSigs(ctx, start, end, address, eventSigs) }) } -func (o *ObservedORM) SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogsCreatedAfter", func() ([]Log, error) { - return o.ORM.SelectLogsCreatedAfter(address, eventSig, after, confs, qopts...) + return o.ORM.SelectLogsCreatedAfter(ctx, address, eventSig, after, confs) }) } -func (o *ObservedORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogs", func() ([]Log, error) { - return o.ORM.SelectIndexedLogs(address, eventSig, topicIndex, topicValues, confs, qopts...) + return o.ORM.SelectIndexedLogs(ctx, address, eventSig, topicIndex, topicValues, confs) }) } -func (o *ObservedORM) SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsByBlockRange", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsByBlockRange(start, end, address, eventSig, topicIndex, topicValues, qopts...) + return o.ORM.SelectIndexedLogsByBlockRange(ctx, start, end, address, eventSig, topicIndex, topicValues) }) } -func (o *ObservedORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsCreatedAfter", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsCreatedAfter(address, eventSig, topicIndex, topicValues, after, confs, qopts...) + return o.ORM.SelectIndexedLogsCreatedAfter(ctx, address, eventSig, topicIndex, topicValues, after, confs) }) } -func (o *ObservedORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsWithSigsExcluding", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsWithSigsExcluding(sigA, sigB, topicIndex, address, startBlock, endBlock, confs, qopts...) + return o.ORM.SelectIndexedLogsWithSigsExcluding(ctx, sigA, sigB, topicIndex, address, startBlock, endBlock, confs) }) } -func (o *ObservedORM) SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogs", func() ([]Log, error) { - return o.ORM.SelectLogs(start, end, address, eventSig, qopts...) + return o.ORM.SelectLogs(ctx, start, end, address, eventSig) }) } -func (o *ObservedORM) SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsByTxHash", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsByTxHash(address, eventSig, txHash, qopts...) + return o.ORM.SelectIndexedLogsByTxHash(ctx, address, eventSig, txHash) }) } -func (o *ObservedORM) GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { +func (o *ObservedORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { return withObservedQueryAndResults(o, "GetBlocksRange", func() ([]LogPollerBlock, error) { - return o.ORM.GetBlocksRange(start, end, qopts...) + return o.ORM.GetBlocksRange(ctx, start, end) }) } -func (o *ObservedORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLatestLogEventSigsAddrsWithConfs", func() ([]Log, error) { - return o.ORM.SelectLatestLogEventSigsAddrsWithConfs(fromBlock, addresses, eventSigs, confs, qopts...) + return o.ORM.SelectLatestLogEventSigsAddrsWithConfs(ctx, fromBlock, addresses, eventSigs, confs) }) } -func (o *ObservedORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { +func (o *ObservedORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { return withObservedQuery(o, "SelectLatestBlockByEventSigsAddrsWithConfs", func() (int64, error) { - return o.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock, eventSigs, addresses, confs, qopts...) + return o.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(ctx, fromBlock, eventSigs, addresses, confs) }) } -func (o *ObservedORM) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogsDataWordRange", func() ([]Log, error) { - return o.ORM.SelectLogsDataWordRange(address, eventSig, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + return o.ORM.SelectLogsDataWordRange(ctx, address, eventSig, wordIndex, wordValueMin, wordValueMax, confs) }) } -func (o *ObservedORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogsDataWordGreaterThan", func() ([]Log, error) { - return o.ORM.SelectLogsDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, confs, qopts...) + return o.ORM.SelectLogsDataWordGreaterThan(ctx, address, eventSig, wordIndex, wordValueMin, confs) }) } -func (o *ObservedORM) SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectLogsDataWordBetween", func() ([]Log, error) { - return o.ORM.SelectLogsDataWordBetween(address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + return o.ORM.SelectLogsDataWordBetween(ctx, address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs) }) } -func (o *ObservedORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsTopicGreaterThan", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) + return o.ORM.SelectIndexedLogsTopicGreaterThan(ctx, address, eventSig, topicIndex, topicValueMin, confs) }) } -func (o *ObservedORM) SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *ObservedORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsTopicRange", func() ([]Log, error) { - return o.ORM.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + return o.ORM.SelectIndexedLogsTopicRange(ctx, address, eventSig, topicIndex, topicValueMin, topicValueMax, confs) }) } diff --git a/core/chains/evm/logpoller/observability_test.go b/core/chains/evm/logpoller/observability_test.go index eb81273af2c..78c27b4b8f7 100644 --- a/core/chains/evm/logpoller/observability_test.go +++ b/core/chains/evm/logpoller/observability_test.go @@ -20,7 +20,6 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func TestMultipleMetricsArePublished(t *testing.T) { @@ -29,19 +28,19 @@ func TestMultipleMetricsArePublished(t *testing.T) { t.Cleanup(func() { resetMetrics(*orm) }) require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) - _, _ = orm.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) - _, _ = orm.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _, _ = orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) - _, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) - _, _ = orm.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _ = orm.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) - _ = orm.InsertLogsWithBlock([]Log{}, NewLogPollerBlock(common.Hash{}, 1, time.Now(), 0), pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogs(ctx, common.Address{}, common.Hash{}, 1, []common.Hash{}, 1) + _, _ = orm.SelectIndexedLogsByBlockRange(ctx, 0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}) + _, _ = orm.SelectIndexedLogsTopicGreaterThan(ctx, common.Address{}, common.Hash{}, 1, common.Hash{}, 1) + _, _ = orm.SelectIndexedLogsTopicRange(ctx, common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1) + _, _ = orm.SelectIndexedLogsWithSigsExcluding(ctx, common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1) + _, _ = orm.SelectLogsDataWordRange(ctx, common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1) + _, _ = orm.SelectLogsDataWordGreaterThan(ctx, common.Address{}, common.Hash{}, 0, common.Hash{}, 1) + _, _ = orm.SelectLogsCreatedAfter(ctx, common.Address{}, common.Hash{}, time.Now(), 0) + _, _ = orm.SelectLatestLogByEventSigWithConfs(ctx, common.Hash{}, common.Address{}, 0) + _, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0, []common.Address{{}}, []common.Hash{{}}, 1) + _, _ = orm.SelectIndexedLogsCreatedAfter(ctx, common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0) + _ = orm.InsertLogs(ctx, []Log{}) + _ = orm.InsertLogsWithBlock(ctx, []Log{}, NewLogPollerBlock(common.Hash{}, 1, time.Now(), 0)) require.Equal(t, 13, testutil.CollectAndCount(orm.queryDuration)) require.Equal(t, 10, testutil.CollectAndCount(orm.datasetSize)) @@ -53,7 +52,7 @@ func TestShouldPublishDurationInCaseOfError(t *testing.T) { t.Cleanup(func() { resetMetrics(*orm) }) require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) - _, err := orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, err := orm.SelectLatestLogByEventSigWithConfs(ctx, common.Hash{}, common.Address{}, 0) require.Error(t, err) require.Equal(t, 1, testutil.CollectAndCount(orm.queryDuration)) @@ -100,25 +99,26 @@ func TestMetricsAreProperlyPopulatedForWrites(t *testing.T) { } func TestCountersAreProperlyPopulatedForWrites(t *testing.T) { + ctx := testutils.Context(t) orm := createObservedORM(t, 420) logs := generateRandomLogs(420, 20) // First insert 10 logs - require.NoError(t, orm.InsertLogs(logs[:10])) + require.NoError(t, orm.InsertLogs(ctx, logs[:10])) assert.Equal(t, float64(10), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) // Insert 5 more logs with block - require.NoError(t, orm.InsertLogsWithBlock(logs[10:15], NewLogPollerBlock(utils.RandomBytes32(), 10, time.Now(), 5))) + require.NoError(t, orm.InsertLogsWithBlock(ctx, logs[10:15], NewLogPollerBlock(utils.RandomBytes32(), 10, time.Now(), 5))) assert.Equal(t, float64(15), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) assert.Equal(t, float64(1), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) // Insert 5 more logs with block - require.NoError(t, orm.InsertLogsWithBlock(logs[15:], NewLogPollerBlock(utils.RandomBytes32(), 15, time.Now(), 5))) + require.NoError(t, orm.InsertLogsWithBlock(ctx, logs[15:], NewLogPollerBlock(utils.RandomBytes32(), 15, time.Now(), 5))) assert.Equal(t, float64(20), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) assert.Equal(t, float64(2), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) // Don't update counters in case of an error - require.Error(t, orm.InsertLogsWithBlock(logs, NewLogPollerBlock(utils.RandomBytes32(), 0, time.Now(), 0))) + require.Error(t, orm.InsertLogsWithBlock(ctx, logs, NewLogPollerBlock(utils.RandomBytes32(), 0, time.Now(), 0))) assert.Equal(t, float64(20), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) assert.Equal(t, float64(2), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) } @@ -146,9 +146,7 @@ func generateRandomLogs(chainId, count int) []Log { func createObservedORM(t *testing.T, chainId int64) *ObservedORM { lggr, _ := logger.TestObserved(t, zapcore.ErrorLevel) db := pgtest.NewSqlxDB(t) - return NewObservedORM( - big.NewInt(chainId), db, lggr, pgtest.NewQConfig(true), - ) + return NewObservedORM(big.NewInt(chainId), db, lggr) } func resetMetrics(lp ObservedORM) { diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 663c56d10ed..ee75b75240d 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -12,88 +12,84 @@ import ( "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) +// TODO: Set a reasonable timeout +const defaultTimeout = 10 * time.Second + // ORM represents the persistent data access layer used by the log poller. At this moment, it's a bit leaky abstraction, because // it exposes some of the database implementation details (e.g. pg.Q). Ideally it should be agnostic and could be applied to any persistence layer. // What is more, LogPoller should not be aware of the underlying database implementation and delegate all the queries to the ORM. type ORM interface { - InsertLogs(logs []Log, qopts ...pg.QOpt) error - InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error - InsertFilter(filter Filter, qopts ...pg.QOpt) error - - LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) - DeleteFilter(name string, qopts ...pg.QOpt) error - - DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error - DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error - DeleteExpiredLogs(qopts ...pg.QOpt) error - - GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) - SelectBlockByNumber(blockNumber int64, qopts ...pg.QOpt) (*LogPollerBlock, error) - SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) - - SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) - SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) ([]Log, error) - SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) - SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) - - SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) - SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + InsertLogs(ctx context.Context, logs []Log) error + InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error + InsertFilter(ctx context.Context, filter Filter) error + + LoadFilters(ctx context.Context) (map[string]Filter, error) + DeleteFilter(ctx context.Context, name string) error + + DeleteBlocksBefore(ctx context.Context, end int64) error + DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error + DeleteExpiredLogs(ctx context.Context) error + + GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) + SelectBlockByNumber(ctx context.Context, blockNumber int64) (*LogPollerBlock, error) + SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) + + SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) + SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) ([]Log, error) + SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) + SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) + SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) + SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) + + SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) + SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) + SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) + SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) + SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) + SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) + SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) + SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) + SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) + SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) } type DbORM struct { chainID *big.Int - q pg.Q + db sqlutil.Queryer lggr logger.Logger } +var _ ORM = &DbORM{} + // NewORM creates a DbORM scoped to chainID. -func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *DbORM { - namedLogger := logger.Named(lggr, "Configs") - q := pg.NewQ(db, namedLogger, cfg) +func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger) *DbORM { return &DbORM{ chainID: chainID, - q: q, + db: db, lggr: lggr, } } // InsertBlock is idempotent to support replays. -func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64, qopts ...pg.QOpt) error { - args, err := newQueryArgs(o.chainID). - withCustomHashArg("block_hash", blockHash). - withCustomArg("block_number", blockNumber). - withCustomArg("block_timestamp", blockTimestamp). - withCustomArg("finalized_block_number", finalizedBlock). - toArgs() - if err != nil { - return err - } - return o.q.WithOpts(qopts...).ExecQNamed(` - INSERT INTO evm.log_poller_blocks - (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) - VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) - ON CONFLICT DO NOTHING`, args) +func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { + query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES ($1, $2, $3, $4, $5, NOW()) + ON CONFLICT DO NOTHING` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + _, err := o.db.ExecContext(ctx, query, o.chainID.String(), blockHash, blockNumber, blockTimestamp, finalizedBlock) + return err } // InsertFilter is idempotent. // // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. -func (o *DbORM) InsertFilter(filter Filter, qopts ...pg.QOpt) (err error) { +func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { args, err := newQueryArgs(o.chainID). withCustomArg("name", filter.Name). withCustomArg("retention", filter.Retention). @@ -103,9 +99,10 @@ func (o *DbORM) InsertFilter(filter Filter, qopts ...pg.QOpt) (err error) { if err != nil { return err } + // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 - return o.q.WithOpts(qopts...).ExecQNamed(` + query := ` INSERT INTO evm.log_poller_filters (name, evm_chain_id, retention, created_at, address, event) SELECT * FROM @@ -113,25 +110,47 @@ func (o *DbORM) InsertFilter(filter Filter, qopts ...pg.QOpt) (err error) { (SELECT unnest(:address_array ::::BYTEA[]) addr) a, (SELECT unnest(:event_sig_array ::::BYTEA[]) ev) e ON CONFLICT (name, evm_chain_id, address, event) - DO UPDATE SET retention=:retention ::::BIGINT`, args) + DO UPDATE SET retention=:retention ::::BIGINT` + + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + _, err = o.db.ExecContext(ctx, query, args) + return err } // DeleteFilter removes all events,address pairs associated with the Filter -func (o *DbORM) DeleteFilter(name string, qopts ...pg.QOpt) error { - q := o.q.WithOpts(qopts...) - return q.ExecQ(`DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) +func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + _, err := o.db.ExecContext(ctx, + `DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, + name, ubig.New(o.chainID)) + return err + } -// LoadFiltersForChain returns all filters for this chain -func (o *DbORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { - q := o.q.WithOpts(qopts...) +// LoadFilters returns all filters for this chain +func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { rows := make([]Filter, 0) - err := q.Select(&rows, `SELECT name, + + query := `SELECT name, ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id = $1 - GROUP BY name`, ubig.New(o.chainID)) + GROUP BY name` + + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err := o.db.SelectContext(ctx, &rows, query, ubig.New(o.chainID)) + /* + err := q.Select(&rows, `SELECT name, + ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, + ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, + MAX(retention) AS retention + FROM evm.log_poller_filters WHERE evm_chain_id = $1 + GROUP BY name`, ubig.New(o.chainID)) + */ filters := make(map[string]Filter) for _, filter := range rows { filters[filter.Name] = filter @@ -140,34 +159,37 @@ func (o *DbORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { return filters, err } -func (o *DbORM) SelectBlockByHash(hash common.Hash, qopts ...pg.QOpt) (*LogPollerBlock, error) { - q := o.q.WithOpts(qopts...) +func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { var b LogPollerBlock - if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash, ubig.New(o.chainID)); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash, ubig.New(o.chainID)); err != nil { return nil, err } return &b, nil } -func (o *DbORM) SelectBlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { - q := o.q.WithOpts(qopts...) +func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { var b LogPollerBlock - if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { return nil, err } return &b, nil } -func (o *DbORM) SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) { - q := o.q.WithOpts(qopts...) +func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { var b LogPollerBlock - if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, o.chainID.String()); err != nil { return nil, err } return &b, nil } -func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { +func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withConfs(confs). toArgs() @@ -182,23 +204,31 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address AND block_number <= %s ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) var l Log - if err := o.q.WithOpts(qopts...).GetNamed(query, &l, args); err != nil { + + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.GetContext(ctx, &l, query, args); err != nil { return nil, err } return &l, nil } // DeleteBlocksBefore delete all blocks before and including end. -func (o *DbORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error { - q := o.q.WithOpts(qopts...) - _, err := q.Exec(`DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) +func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64) error { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) return err } -func (o *DbORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error { +func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { // These deletes are bounded by reorg depth, so they are // fast and should not slow down the log readers. - return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + + // TODO: Is Transact working?? Why are tests failing + performInsert := func(tx *sqlx.Tx) error { args, err := newQueryArgs(o.chainID). withStartBlock(start). toArgs() @@ -219,7 +249,10 @@ func (o *DbORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error { return err } return nil - }) + } + return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { + return q.(*sqlx.Tx) + }, o.db, nil, performInsert) } type Exp struct { @@ -230,11 +263,12 @@ type Exp struct { ShouldDelete bool } -func (o *DbORM) DeleteExpiredLogs(qopts ...pg.QOpt) error { - qopts = append(qopts, pg.WithLongQueryTimeout()) - q := o.q.WithOpts(qopts...) +func (o *DbORM) DeleteExpiredLogs(ctx context.Context) error { + // TODO: LongQueryTimeout? + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() - return q.ExecQ(`WITH r AS + _, err := o.db.ExecContext(ctx, `WITH r AS ( SELECT address, event, MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id=$1 GROUP BY evm_chain_id,address, event HAVING NOT 0 = ANY(ARRAY_AGG(retention)) @@ -242,39 +276,50 @@ func (o *DbORM) DeleteExpiredLogs(qopts ...pg.QOpt) error { WHERE l.evm_chain_id = $1 AND l.address=r.address AND l.event_sig=r.event AND l.created_at <= STATEMENT_TIMESTAMP() - (r.retention / 10^9 * interval '1 second')`, // retention is in nanoseconds (time.Duration aka BIGINT) ubig.New(o.chainID)) + return err } // InsertLogs is idempotent to support replays. -func (o *DbORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { +func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { if err := o.validateLogs(logs); err != nil { return err } - return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { - return o.insertLogsWithinTx(logs, tx) - }) + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + performInsert := func(tx *sqlx.Tx) error { + return o.insertLogsWithinTx(ctx, logs, tx) + } + return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { + return q.(*sqlx.Tx) + }, o.db, nil, performInsert) } -func (o *DbORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error { +func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { // Optimization, don't open TX when there is only a block to be persisted if len(logs) == 0 { - return o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, qopts...) + return o.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber) } if err := o.validateLogs(logs); err != nil { return err } + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() // Block and logs goes with the same TX to ensure atomicity - return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { - if err := o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, pg.WithQueryer(tx)); err != nil { + performInsert := func(tx *sqlx.Tx) error { + if err := o.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { return err } - return o.insertLogsWithinTx(logs, tx) - }) + return o.insertLogsWithinTx(ctx, logs, tx) + } + return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { + return q.(*sqlx.Tx) + }, o.db, nil, performInsert) } -func (o *DbORM) insertLogsWithinTx(logs []Log, tx pg.Queryer) error { +func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { start, end := i, i+batchInsertSize @@ -282,7 +327,7 @@ func (o *DbORM) insertLogsWithinTx(logs []Log, tx pg.Queryer) error { end = len(logs) } - _, err := tx.NamedExec(` + _, err := tx.NamedExecContext(ctx, ` INSERT INTO evm.logs (evm_chain_id, log_index, block_hash, block_number, block_timestamp, address, event_sig, topics, tx_hash, data, created_at) VALUES @@ -313,7 +358,7 @@ func (o *DbORM) validateLogs(logs []Log) error { return nil } -func (o *DbORM) SelectLogsByBlockRange(start, end int64) ([]Log, error) { +func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { args, err := newQueryArgs(o.chainID). withStartBlock(start). withEndBlock(end). @@ -323,7 +368,9 @@ func (o *DbORM) SelectLogsByBlockRange(start, end int64) ([]Log, error) { } var logs []Log - err = o.q.SelectNamed(&logs, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id AND block_number >= :start_block @@ -335,8 +382,8 @@ func (o *DbORM) SelectLogsByBlockRange(start, end int64) ([]Log, error) { return logs, nil } -// SelectLogsByBlockRangeFilter finds the logs in a given block range. -func (o *DbORM) SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) { +// SelectLogs finds the logs in a given block range. +func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withStartBlock(start). withEndBlock(end). @@ -344,8 +391,11 @@ func (o *DbORM) SelectLogs(start, end int64, address common.Address, eventSig co if err != nil { return nil, err } + var logs []Log - err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id AND address = :address @@ -360,7 +410,7 @@ func (o *DbORM) SelectLogs(start, end int64, address common.Address, eventSig co } // SelectLogsCreatedAfter finds logs created after some timestamp. -func (o *DbORM) SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withBlockTimestampAfter(after). withConfs(confs). @@ -379,15 +429,17 @@ func (o *DbORM) SelectLogsCreatedAfter(address common.Address, eventSig common.H ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -// SelectLogsWithSigsByBlockRangeFilter finds the logs in the given block range with the given event signatures +// SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. -func (o *DbORM) SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) (logs []Log, err error) { +func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { args, err := newQueryArgs(o.chainID). withAddress(address). withEventSigArray(eventSigs). @@ -398,8 +450,9 @@ func (o *DbORM) SelectLogsWithSigs(start, end int64, address common.Address, eve return nil, err } - q := o.q.WithOpts(qopts...) - err = q.SelectNamed(&logs, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id AND address = :address @@ -412,21 +465,16 @@ func (o *DbORM) SelectLogsWithSigs(start, end int64, address common.Address, eve return logs, err } -func (o *DbORM) GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { - args, err := newQueryArgs(o.chainID). - withStartBlock(start). - withEndBlock(end). - toArgs() - if err != nil { - return nil, err - } +func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { var blocks []LogPollerBlock - err = o.q.WithOpts(qopts...).SelectNamed(&blocks, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err := o.db.SelectContext(ctx, &blocks, ` SELECT * FROM evm.log_poller_blocks - WHERE block_number >= :start_block - AND block_number <= :end_block - AND evm_chain_id = :evm_chain_id - ORDER BY block_number ASC`, args) + WHERE block_number >= $1 + AND block_number <= $2 + AND evm_chain_id = $3 + ORDER BY block_number ASC`, start, end, o.chainID.String()) if err != nil { return nil, err } @@ -434,7 +482,7 @@ func (o *DbORM) GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogP } // SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events -func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgs(o.chainID). withAddressArray(addresses). withEventSigArray(eventSigs). @@ -444,6 +492,7 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresse if err != nil { return nil, err } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE (block_number, address, event_sig) IN ( SELECT MAX(block_number), address, event_sig FROM evm.logs @@ -455,15 +504,18 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresse GROUP BY event_sig, address ) ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) + var logs []Log - if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, errors.Wrap(err, "failed to execute query") } return logs, nil } -// SelectLatestBlockNumberEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block -func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { +// SelectLatestBlockByEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block +func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { args, err := newQueryArgs(o.chainID). withEventSigArray(eventSigs). withAddressArray(addresses). @@ -481,13 +533,15 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, even AND block_number > :start_block AND block_number <= %s`, nestedBlockNumberQuery(confs)) var blockNumber int64 - if err := o.q.WithOpts(qopts...).GetNamed(query, &blockNumber, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.GetContext(ctx, &blockNumber, query, args); err != nil { return 0, err } return blockNumber, nil } -func (o *DbORM) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndex(wordIndex). withWordValueMin(wordValueMin). @@ -506,13 +560,15 @@ func (o *DbORM) SelectLogsDataWordRange(address common.Address, eventSig common. AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndex(wordIndex). withWordValueMin(wordValueMin). @@ -530,13 +586,15 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig c AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndexMin(wordIndexMin). withWordIndexMax(wordIndexMax). @@ -556,13 +614,15 @@ func (o *DbORM) SelectLogsDataWordBetween(address common.Address, eventSig commo AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValueMin(topicValueMin). @@ -580,13 +640,15 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventS AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValueMin(topicValueMin). @@ -606,13 +668,15 @@ func (o *DbORM) SelectIndexedLogsTopicRange(address common.Address, eventSig com AND block_number <= %s ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValues(topicValues). @@ -630,14 +694,16 @@ func (o *DbORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -// SelectIndexedLogsByBlockRangeFilter finds the indexed logs in a given block range. -func (o *DbORM) SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { +// SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. +func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValues(topicValues). @@ -648,7 +714,9 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(start, end int64, address common.A return nil, err } var logs []Log - err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id AND address = :address @@ -663,7 +731,7 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(start, end int64, address common.A return logs, nil } -func (o *DbORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withBlockTimestampAfter(after). withConfs(confs). @@ -685,13 +753,15 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig c ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil } -func (o *DbORM) SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { args, err := newQueryArgs(o.chainID). withTxHash(txHash). withAddress(address). @@ -701,7 +771,9 @@ func (o *DbORM) SelectIndexedLogsByTxHash(address common.Address, eventSig commo return nil, err } var logs []Log - err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id AND address = :address @@ -715,7 +787,7 @@ func (o *DbORM) SelectIndexedLogsByTxHash(address common.Address, eventSig commo } // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations -func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { args, err := newQueryArgs(o.chainID). withAddress(address). withTopicIndex(topicIndex). @@ -749,7 +821,9 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topic AND b.block_number <= %s ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) var logs []Log - if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { return nil, err } return logs, nil diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 0af62ebd547..400551211de 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -2,6 +2,7 @@ package logpoller_test import ( "bytes" + "context" "database/sql" "fmt" "math" @@ -22,8 +23,6 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) type block struct { @@ -68,6 +67,7 @@ func GenLogWithData(chainID *big.Int, address common.Address, eventSig common.Ha func TestLogPoller_Batching(t *testing.T) { t.Parallel() + ctx := context.Background() th := SetupTH(t, false, 2, 3, 2, 1000) var logs []logpoller.Log // Inserts are limited to 65535 parameters. A log being 10 parameters this results in @@ -76,8 +76,8 @@ func TestLogPoller_Batching(t *testing.T) { for i := 0; i < 15000; i++ { logs = append(logs, GenLog(th.ChainID, int64(i+1), 1, "0x3", EmitterABI.Events["Log1"].ID.Bytes(), th.EmitterAddress1)) } - require.NoError(t, th.ORM.InsertLogs(logs)) - lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + require.NoError(t, th.ORM.InsertLogs(ctx, logs)) + lgs, err := th.ORM.SelectLogsByBlockRange(ctx, 1, 1) require.NoError(t, err) // Make sure all logs are inserted require.Equal(t, len(logs), len(lgs)) @@ -86,6 +86,7 @@ func TestLogPoller_Batching(t *testing.T) { func TestORM_GetBlocks_From_Range(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() // Insert many blocks and read them back together blocks := []block{ { @@ -115,7 +116,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { }, } for _, b := range blocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC(), 0)) + require.NoError(t, o1.InsertBlock(ctx, b.hash, b.number, time.Unix(b.timestamp, 0).UTC(), 0)) } var blockNumbers []int64 @@ -123,17 +124,17 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { blockNumbers = append(blockNumbers, b.number) } - lpBlocks, err := o1.GetBlocksRange(blockNumbers[0], blockNumbers[len(blockNumbers)-1]) + lpBlocks, err := o1.GetBlocksRange(ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1]) require.NoError(t, err) assert.Len(t, lpBlocks, len(blocks)) // Ignores non-existent block - lpBlocks2, err := o1.GetBlocksRange(blockNumbers[0], 15) + lpBlocks2, err := o1.GetBlocksRange(ctx, blockNumbers[0], 15) require.NoError(t, err) assert.Len(t, lpBlocks2, len(blocks)) // Only non-existent blocks - lpBlocks3, err := o1.GetBlocksRange(15, 15) + lpBlocks3, err := o1.GetBlocksRange(ctx, 15, 15) require.NoError(t, err) assert.Len(t, lpBlocks3, 0) } @@ -141,13 +142,14 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() // Insert many blocks and read them back together var recentBlocks []block for i := 1; i <= 256; i++ { recentBlocks = append(recentBlocks, block{number: int64(i), hash: common.HexToHash(fmt.Sprintf("0x%d", i))}) } for _, b := range recentBlocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, b.hash, b.number, time.Now(), 0)) } var blockNumbers []int64 @@ -155,17 +157,17 @@ func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { blockNumbers = append(blockNumbers, b.number) } - lpBlocks, err := o1.GetBlocksRange(blockNumbers[0], blockNumbers[len(blockNumbers)-1]) + lpBlocks, err := o1.GetBlocksRange(ctx, blockNumbers[0], blockNumbers[len(blockNumbers)-1]) require.NoError(t, err) assert.Len(t, lpBlocks, len(recentBlocks)) // Ignores non-existent block - lpBlocks2, err := o1.GetBlocksRange(blockNumbers[0], 257) + lpBlocks2, err := o1.GetBlocksRange(ctx, blockNumbers[0], 257) require.NoError(t, err) assert.Len(t, lpBlocks2, len(recentBlocks)) // Only non-existent blocks - lpBlocks3, err := o1.GetBlocksRange(257, 257) + lpBlocks3, err := o1.GetBlocksRange(ctx, 257, 257) require.NoError(t, err) assert.Len(t, lpBlocks3, 0) } @@ -174,51 +176,52 @@ func TestORM(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM o2 := th.ORM2 + ctx := context.Background() // Insert and read back a block. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) - b, err := o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 10, time.Now(), 0)) + b, err := o1.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b.BlockNumber, int64(10)) assert.Equal(t, b.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) assert.Equal(t, b.EvmChainId.String(), th.ChainID.String()) // Insert blocks from a different chain - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) - b2, err := o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, o2.InsertBlock(ctx, common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o2.InsertBlock(ctx, common.HexToHash("0x1235"), 12, time.Now(), 0)) + b2, err := o2.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b2.BlockNumber, int64(11)) assert.Equal(t, b2.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) assert.Equal(t, b2.EvmChainId.String(), th.ChainID2.String()) - latest, err := o1.SelectLatestBlock() + latest, err := o1.SelectLatestBlock(ctx) require.NoError(t, err) assert.Equal(t, int64(10), latest.BlockNumber) - latest, err = o2.SelectLatestBlock() + latest, err = o2.SelectLatestBlock(ctx) require.NoError(t, err) assert.Equal(t, int64(12), latest.BlockNumber) // Delete a block (only 10 on chain). - require.NoError(t, o1.DeleteLogsAndBlocksAfter(10)) - _, err = o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, o1.DeleteLogsAndBlocksAfter(ctx, 10)) + _, err = o1.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // Delete blocks from another chain. - require.NoError(t, o2.DeleteLogsAndBlocksAfter(11)) - _, err = o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, o2.DeleteLogsAndBlocksAfter(ctx, 11)) + _, err = o2.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // Delete blocks after should also delete block 12. - _, err = o2.SelectBlockByHash(common.HexToHash("0x1235")) + _, err = o2.SelectBlockByHash(ctx, common.HexToHash("0x1235")) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // Should be able to insert and read back a log. topic := common.HexToHash("0x1599") topic2 := common.HexToHash("0x1600") - require.NoError(t, o1.InsertLogs([]logpoller.Log{ + require.NoError(t, o1.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: ubig.New(th.ChainID), LogIndex: 1, @@ -310,86 +313,86 @@ func TestORM(t *testing.T) { })) t.Log(latest.BlockNumber) - logs, err := o1.SelectLogsByBlockRange(1, 17) + logs, err := o1.SelectLogsByBlockRange(ctx, 1, 17) require.NoError(t, err) require.Len(t, logs, 8) - logs, err = o1.SelectLogsByBlockRange(10, 10) + logs, err = o1.SelectLogsByBlockRange(ctx, 10, 10) require.NoError(t, err) require.Equal(t, 1, len(logs)) assert.Equal(t, []byte("hello"), logs[0].Data) - logs, err = o1.SelectLogs(1, 1, common.HexToAddress("0x1234"), topic) + logs, err = o1.SelectLogs(ctx, 1, 1, common.HexToAddress("0x1234"), topic) require.NoError(t, err) assert.Equal(t, 0, len(logs)) - logs, err = o1.SelectLogs(10, 10, common.HexToAddress("0x1234"), topic) + logs, err = o1.SelectLogs(ctx, 10, 10, common.HexToAddress("0x1234"), topic) require.NoError(t, err) require.Equal(t, 1, len(logs)) // With no blocks, should be an error - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 10, only 0 confs should work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) - log, err := o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 10, time.Now(), 0)) + log, err := o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) assert.Equal(t, int64(10), log.BlockNumber) - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 1) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 12, anything <=2 should work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 12, time.Now(), 0)) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 1) require.NoError(t, err) - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 2) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 2) require.NoError(t, err) - _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 3) + _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 3) require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // Required for confirmations to work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 14, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1236"), 15, time.Now(), 0)) // Latest log for topic for addr "0x1234" is @ block 11 - lgs, err := o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic}, 0) + lgs, err := o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic}, 0) require.NoError(t, err) require.Equal(t, 1, len(lgs)) require.Equal(t, int64(11), lgs[0].BlockNumber) // should return two entries one for each address with the latest update - lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic}, 0) + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic}, 0) require.NoError(t, err) require.Equal(t, 2, len(lgs)) // should return two entries one for each topic for addr 0x1234 - lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic, topic2}, 0) + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic, topic2}, 0) require.NoError(t, err) require.Equal(t, 2, len(lgs)) // should return 4 entries one for each (address,topic) combination - lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 0) + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 0) require.NoError(t, err) require.Equal(t, 4, len(lgs)) // should return 3 entries of logs with atleast 1 confirmation - lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 1) + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 1) require.NoError(t, err) require.Equal(t, 3, len(lgs)) // should return 2 entries of logs with atleast 2 confirmation - lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 2) + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(ctx, 0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 2) require.NoError(t, err) require.Equal(t, 2, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1237"), 16, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1238"), 17, time.Now(), 0)) filter0 := logpoller.Filter{ Name: "permanent retention filter", @@ -411,29 +414,29 @@ func TestORM(t *testing.T) { } // Test inserting filters and reading them back - require.NoError(t, o1.InsertFilter(filter0)) - require.NoError(t, o1.InsertFilter(filter12)) - require.NoError(t, o1.InsertFilter(filter2)) + require.NoError(t, o1.InsertFilter(ctx, filter0)) + require.NoError(t, o1.InsertFilter(ctx, filter12)) + require.NoError(t, o1.InsertFilter(ctx, filter2)) - filters, err := o1.LoadFilters() + filters, err := o1.LoadFilters(ctx) require.NoError(t, err) require.Len(t, filters, 3) assert.Equal(t, filter0, filters["permanent retention filter"]) assert.Equal(t, filter12, filters["short retention filter"]) assert.Equal(t, filter2, filters["long retention filter"]) - latest, err = o1.SelectLatestBlock() + latest, err = o1.SelectLatestBlock(ctx) require.NoError(t, err) require.Equal(t, int64(17), latest.BlockNumber) - logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + logs, err = o1.SelectLogsByBlockRange(ctx, 1, latest.BlockNumber) require.NoError(t, err) require.Len(t, logs, 8) // Delete expired logs time.Sleep(2 * time.Millisecond) // just in case we haven't reached the end of the 1ms retention period - err = o1.DeleteExpiredLogs(pg.WithParentCtx(testutils.Context(t))) + err = o1.DeleteExpiredLogs(ctx) require.NoError(t, err) - logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + logs, err = o1.SelectLogsByBlockRange(ctx, 1, latest.BlockNumber) require.NoError(t, err) // The only log which should be deleted is the one which matches filter1 (ret=1ms) but not filter12 (ret=1 hour) // Importantly, it shouldn't delete any logs matching only filter0 (ret=0 meaning permanent retention). Anything @@ -441,9 +444,9 @@ func TestORM(t *testing.T) { assert.Len(t, logs, 7) // Delete logs after should delete all logs. - err = o1.DeleteLogsAndBlocksAfter(1) + err = o1.DeleteLogsAndBlocksAfter(ctx, 1) require.NoError(t, err) - logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + logs, err = o1.SelectLogsByBlockRange(ctx, 1, latest.BlockNumber) require.NoError(t, err) require.Zero(t, len(logs)) } @@ -463,66 +466,67 @@ func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbOR Data: []byte("hello"), }) } - require.NoError(t, o.InsertLogs(lgs)) + require.NoError(t, o.InsertLogs(context.Background(), lgs)) } func TestORM_IndexedLogs(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 1, eventSig, 1, 3) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 2, eventSig, 4, 4) // unconfirmed - lgs, err := o1.SelectIndexedLogs(addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}, 0) + lgs, err := o1.SelectIndexedLogs(ctx, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}, 0) require.NoError(t, err) require.Equal(t, 1, len(lgs)) assert.Equal(t, logpoller.EvmWord(1).Bytes(), lgs[0].GetTopics()[1].Bytes()) - lgs, err = o1.SelectIndexedLogs(addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1), logpoller.EvmWord(2)}, 0) + lgs, err = o1.SelectIndexedLogs(ctx, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1), logpoller.EvmWord(2)}, 0) require.NoError(t, err) assert.Equal(t, 2, len(lgs)) - lgs, err = o1.SelectIndexedLogsByBlockRange(1, 1, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) + lgs, err = o1.SelectIndexedLogsByBlockRange(ctx, 1, 1, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) - lgs, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(2)}) + lgs, err = o1.SelectIndexedLogsByBlockRange(ctx, 1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(2)}) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) - lgs, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) + lgs, err = o1.SelectIndexedLogsByBlockRange(ctx, 1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) - _, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 0, []common.Hash{logpoller.EvmWord(1)}) + _, err = o1.SelectIndexedLogsByBlockRange(ctx, 1, 2, addr, eventSig, 0, []common.Hash{logpoller.EvmWord(1)}) require.Error(t, err) assert.Contains(t, err.Error(), "invalid index for topic: 0") - _, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 4, []common.Hash{logpoller.EvmWord(1)}) + _, err = o1.SelectIndexedLogsByBlockRange(ctx, 1, 2, addr, eventSig, 4, []common.Hash{logpoller.EvmWord(1)}) require.Error(t, err) assert.Contains(t, err.Error(), "invalid index for topic: 4") - lgs, err = o1.SelectIndexedLogsTopicGreaterThan(addr, eventSig, 1, logpoller.EvmWord(2), 0) + lgs, err = o1.SelectIndexedLogsTopicGreaterThan(ctx, addr, eventSig, 1, logpoller.EvmWord(2), 0) require.NoError(t, err) assert.Equal(t, 2, len(lgs)) - lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + lgs, err = o1.SelectIndexedLogsTopicRange(ctx, addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) assert.Equal(t, logpoller.EvmWord(3).Bytes(), lgs[0].GetTopics()[1].Bytes()) - lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(1), logpoller.EvmWord(3), 0) + lgs, err = o1.SelectIndexedLogsTopicRange(ctx, addr, eventSig, 1, logpoller.EvmWord(1), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 3, len(lgs)) // Check confirmations work as expected. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) - lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x2"), 2, time.Now(), 0)) + lgs, err = o1.SelectIndexedLogsTopicRange(ctx, addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) - lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x3"), 3, time.Now(), 0)) + lgs, err = o1.SelectIndexedLogsTopicRange(ctx, addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) } @@ -530,11 +534,12 @@ func TestORM_IndexedLogs(t *testing.T) { func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { th := SetupTH(t, false, 0, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() eventSig := common.HexToHash("0x1599") txHash := common.HexToHash("0x1888") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) logs := []logpoller.Log{ { EvmChainId: ubig.New(th.ChainID), @@ -583,9 +588,9 @@ func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { Data: append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...), }, } - require.NoError(t, o1.InsertLogs(logs)) + require.NoError(t, o1.InsertLogs(ctx, logs)) - retrievedLogs, err := o1.SelectIndexedLogsByTxHash(addr, eventSig, txHash) + retrievedLogs, err := o1.SelectIndexedLogsByTxHash(ctx, addr, eventSig, txHash) require.NoError(t, err) require.Equal(t, 2, len(retrievedLogs)) @@ -596,10 +601,11 @@ func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { func TestORM_DataWords(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) - require.NoError(t, o1.InsertLogs([]logpoller.Log{ + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: ubig.New(th.ChainID), LogIndex: int64(0), @@ -625,33 +631,33 @@ func TestORM_DataWords(t *testing.T) { }, })) // Outside range should fail. - lgs, err := o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(2), logpoller.EvmWord(2), 0) + lgs, err := o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(2), logpoller.EvmWord(2), 0) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) // Range including log should succeed - lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(2), 0) + lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(2), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) // Range only covering log should succeed - lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(1), 0) + lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(1), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) // Cannot query for unconfirmed second log. - lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) // Confirm it, then can query. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) - lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x2"), 2, time.Now(), 0)) + lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) assert.Equal(t, lgs[0].Data, append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...)) // Check greater than 1 yields both logs. - lgs, err = o1.SelectLogsDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), 0) + lgs, err = o1.SelectLogsDataWordGreaterThan(ctx, addr, eventSig, 0, logpoller.EvmWord(1), 0) require.NoError(t, err) assert.Equal(t, 2, len(lgs)) } @@ -659,6 +665,7 @@ func TestORM_DataWords(t *testing.T) { func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM + ctx := context.Background() // Insert logs on different topics, should be able to read them // back using SelectLogsWithSigs and specifying @@ -734,10 +741,10 @@ func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { Data: []byte("hello6"), }, } - require.NoError(t, o1.InsertLogs(inputLogs)) + require.NoError(t, o1.InsertLogs(ctx, inputLogs)) startBlock, endBlock := int64(10), int64(15) - logs, err := o1.SelectLogsWithSigs(startBlock, endBlock, sourceAddr, []common.Hash{ + logs, err := o1.SelectLogsWithSigs(ctx, startBlock, endBlock, sourceAddr, []common.Hash{ topic, topic2, }) @@ -753,35 +760,37 @@ func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { func TestORM_DeleteBlocksBefore(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now(), 0)) - require.NoError(t, o1.DeleteBlocksBefore(1)) + ctx := context.Background() + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 2, time.Now(), 0)) + require.NoError(t, o1.DeleteBlocksBefore(ctx, 1)) // 1 should be gone. - _, err := o1.SelectBlockByNumber(1) + _, err := o1.SelectBlockByNumber(ctx, 1) require.Equal(t, err, sql.ErrNoRows) - b, err := o1.SelectBlockByNumber(2) + b, err := o1.SelectBlockByNumber(ctx, 2) require.NoError(t, err) assert.Equal(t, int64(2), b.BlockNumber) // Clear multiple - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now(), 0)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now(), 0)) - require.NoError(t, o1.DeleteBlocksBefore(3)) - _, err = o1.SelectBlockByNumber(2) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1236"), 3, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1237"), 4, time.Now(), 0)) + require.NoError(t, o1.DeleteBlocksBefore(ctx, 3)) + _, err = o1.SelectBlockByNumber(ctx, 2) require.Equal(t, err, sql.ErrNoRows) - _, err = o1.SelectBlockByNumber(3) + _, err = o1.SelectBlockByNumber(ctx, 3) require.Equal(t, err, sql.ErrNoRows) } func TestLogPoller_Logs(t *testing.T) { t.Parallel() th := SetupTH(t, false, 2, 3, 2, 1000) + ctx := context.Background() event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") // Block 1-3 - require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + require.NoError(t, th.ORM.InsertLogs(ctx, []logpoller.Log{ GenLog(th.ChainID, 1, 1, "0x3", event1[:], address1), GenLog(th.ChainID, 2, 1, "0x3", event2[:], address2), GenLog(th.ChainID, 1, 2, "0x4", event1[:], address2), @@ -791,7 +800,7 @@ func TestLogPoller_Logs(t *testing.T) { })) // Select for all Addresses - lgs, err := th.ORM.SelectLogsByBlockRange(1, 3) + lgs, err := th.ORM.SelectLogsByBlockRange(ctx, 1, 3) require.NoError(t, err) require.Equal(t, 6, len(lgs)) assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) @@ -802,7 +811,7 @@ func TestLogPoller_Logs(t *testing.T) { assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[5].BlockHash.String()) // Filter by Address and topic - lgs, err = th.ORM.SelectLogs(1, 3, address1, event1) + lgs, err = th.ORM.SelectLogs(ctx, 1, 3, address1, event1) require.NoError(t, err) require.Equal(t, 2, len(lgs)) assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) @@ -812,7 +821,7 @@ func TestLogPoller_Logs(t *testing.T) { assert.Equal(t, address1, lgs[1].Address) // Filter by block - lgs, err = th.ORM.SelectLogs(2, 2, address2, event1) + lgs, err = th.ORM.SelectLogs(ctx, 2, 2, address2, event1) require.NoError(t, err) require.Equal(t, 1, len(lgs)) assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[0].BlockHash.String()) @@ -824,6 +833,7 @@ func TestLogPoller_Logs(t *testing.T) { func BenchmarkLogs(b *testing.B) { th := SetupTH(b, false, 2, 3, 2, 1000) o := th.ORM + ctx := context.Background() var lgs []logpoller.Log addr := common.HexToAddress("0x1234") for i := 0; i < 10_000; i++ { @@ -839,10 +849,10 @@ func BenchmarkLogs(b *testing.B) { Data: common.HexToHash(fmt.Sprintf("0x%d", i)).Bytes(), }) } - require.NoError(b, o.InsertLogs(lgs)) + require.NoError(b, o.InsertLogs(ctx, lgs)) b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := o.SelectLogsDataWordRange(addr, EmitterABI.Events["Log1"].ID, 0, logpoller.EvmWord(8000), logpoller.EvmWord(8002), 0) + _, err := o.SelectLogsDataWordRange(ctx, addr, EmitterABI.Events["Log1"].ID, 0, logpoller.EvmWord(8000), logpoller.EvmWord(8002), 0) require.NoError(b, err) } } @@ -850,6 +860,7 @@ func BenchmarkLogs(b *testing.B) { func TestSelectLogsWithSigsExcluding(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) orm := th.ORM + ctx := context.Background() addressA := common.HexToAddress("0x11111") addressB := common.HexToAddress("0x22222") addressC := common.HexToAddress("0x33333") @@ -865,7 +876,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { topicD := common.HexToHash("0x000d") //Insert two logs that mimics an oracle request from 2 different addresses (matching will be on topic index 1) - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 1, @@ -891,22 +902,22 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-B1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) //Get any requestSigA from addressA that do not have a equivalent responseSigA - logs, err := orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) + logs, err := orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigA, responseSigA, 1, addressA, 0, 3, 0) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-A1")) //Get any requestSigB from addressB that do not have a equivalent responseSigB - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 1, addressB, 0, 3, 0) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-B1")) //Insert a log that mimics response for requestID-A1 - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 3, @@ -920,21 +931,21 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("responseID-A1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x2"), 2, time.Now(), 0)) //Should return nothing as requestID-A1 has been fulfilled - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigA, responseSigA, 1, addressA, 0, 3, 0) require.NoError(t, err) require.Len(t, logs, 0) //requestID-B1 should still be unfulfilled - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 1, addressB, 0, 3, 0) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-B1")) //Insert 3 request from addressC (matching will be on topic index 3) - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 5, @@ -971,10 +982,10 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-C3"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x3"), 3, time.Now(), 0)) //Get all unfulfilled requests from addressC, match on topic index 3 - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 4, 0) require.NoError(t, err) require.Len(t, logs, 3) require.Equal(t, logs[0].Data, []byte("requestID-C1")) @@ -982,7 +993,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { require.Equal(t, logs[2].Data, []byte("requestID-C3")) //Fulfill requestID-C2 - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 8, @@ -998,14 +1009,14 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { })) //Verify that requestID-C2 is now fulfilled (not returned) - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 4, 0) require.NoError(t, err) require.Len(t, logs, 2) require.Equal(t, logs[0].Data, []byte("requestID-C1")) require.Equal(t, logs[1].Data, []byte("requestID-C3")) //Fulfill requestID-C3 - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 9, @@ -1021,26 +1032,26 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { })) //Verify that requestID-C3 is now fulfilled (not returned) - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 4, 0) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-C1")) //Should return no logs as the number of confirmations is not satisfied - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 3) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 4, 3) require.NoError(t, err) require.Len(t, logs, 0) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x4"), 4, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x5"), 5, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x6"), 6, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x7"), 7, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x8"), 8, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x9"), 9, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x10"), 10, time.Now(), 0)) //Fulfill requestID-C3 - require.NoError(t, orm.InsertLogs([]logpoller.Log{ + require.NoError(t, orm.InsertLogs(ctx, []logpoller.Log{ { EvmChainId: (*ubig.Big)(th.ChainID), LogIndex: 10, @@ -1056,57 +1067,58 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { })) //All logs for addressC should be fulfilled, query should return 0 logs - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 10, 0) require.NoError(t, err) require.Len(t, logs, 0) //Should return 1 log as it does not satisfy the required number of confirmations - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 3) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 10, 3) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-C1")) //Insert 3 more blocks so that the requestID-C1 has enough confirmations - require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now(), 0)) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x11"), 11, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x12"), 12, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(ctx, common.HexToHash("0x13"), 13, time.Now(), 0)) - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 3, addressC, 0, 10, 0) require.NoError(t, err) require.Len(t, logs, 0) //AddressB should still have an unfulfilled log (requestID-B1) - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 1, addressB, 0, 3, 0) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-B1")) //Should return requestID-A1 as the fulfillment event is out of the block range - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 1, 10) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigA, responseSigA, 1, addressA, 0, 1, 10) require.NoError(t, err) require.Len(t, logs, 1) require.Equal(t, logs[0].Data, []byte("requestID-A1")) //Should return nothing as requestID-B1 is before the block range - logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 2, 13, 0) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(ctx, requestSigB, responseSigB, 1, addressB, 2, 13, 0) require.NoError(t, err) require.Len(t, logs, 0) } func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) + ctx := context.Background() event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := utils.RandomAddress() address2 := utils.RandomAddress() - require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + require.NoError(t, th.ORM.InsertLogs(ctx, []logpoller.Log{ GenLog(th.ChainID, 1, 1, utils.RandomAddress().String(), event1[:], address1), GenLog(th.ChainID, 2, 1, utils.RandomAddress().String(), event2[:], address2), GenLog(th.ChainID, 2, 2, utils.RandomAddress().String(), event2[:], address2), GenLog(th.ChainID, 2, 3, utils.RandomAddress().String(), event2[:], address2), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 3, time.Now(), 1)) + require.NoError(t, th.ORM.InsertBlock(ctx, utils.RandomHash(), 3, time.Now(), 1)) tests := []struct { name string @@ -1183,7 +1195,7 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - blockNumber, err := th.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(tt.fromBlock, tt.events, tt.addrs, tt.confs) + blockNumber, err := th.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(ctx, tt.fromBlock, tt.events, tt.addrs, tt.confs) require.NoError(t, err) assert.Equal(t, tt.expectedBlockNumber, blockNumber) }) @@ -1192,6 +1204,7 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { func TestSelectLogsCreatedAfter(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) + ctx := context.Background() event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() @@ -1199,15 +1212,15 @@ func TestSelectLogsCreatedAfter(t *testing.T) { block2ts := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) block3ts := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) - require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + require.NoError(t, th.ORM.InsertLogs(ctx, []logpoller.Log{ GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, block1ts), GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, block2ts), GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, block2ts), GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, block3ts), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 1, block1ts, 0)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 2, block2ts, 1)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 3, block3ts, 2)) + require.NoError(t, th.ORM.InsertBlock(ctx, utils.RandomHash(), 1, block1ts, 0)) + require.NoError(t, th.ORM.InsertBlock(ctx, utils.RandomHash(), 2, block2ts, 1)) + require.NoError(t, th.ORM.InsertBlock(ctx, utils.RandomHash(), 3, block3ts, 2)) type expectedLog struct { block int64 @@ -1271,7 +1284,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { } for _, tt := range tests { t.Run("SelectLogsCreatedAfter"+tt.name, func(t *testing.T) { - logs, err := th.ORM.SelectLogsCreatedAfter(address, event, tt.after, tt.confs) + logs, err := th.ORM.SelectLogsCreatedAfter(ctx, address, event, tt.after, tt.confs) require.NoError(t, err) require.Len(t, logs, len(tt.expectedLogs)) @@ -1282,7 +1295,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { }) t.Run("SelectIndexedLogsCreatedAfter"+tt.name, func(t *testing.T) { - logs, err := th.ORM.SelectIndexedLogsCreatedAfter(address, event, 1, []common.Hash{event}, tt.after, tt.confs) + logs, err := th.ORM.SelectIndexedLogsCreatedAfter(ctx, address, event, 1, []common.Hash{event}, tt.after, tt.confs) require.NoError(t, err) require.Len(t, logs, len(tt.expectedLogs)) @@ -1296,28 +1309,29 @@ func TestSelectLogsCreatedAfter(t *testing.T) { func TestNestedLogPollerBlocksQuery(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) + ctx := context.Background() event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() - require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + require.NoError(t, th.ORM.InsertLogs(ctx, []logpoller.Log{ GenLog(th.ChainID, 1, 8, utils.RandomAddress().String(), event[:], address), })) // Empty logs when block are not persisted - logs, err := th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + logs, err := th.ORM.SelectIndexedLogs(ctx, address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) require.NoError(t, err) require.Len(t, logs, 0) // Persist block - require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 10, time.Now(), 0)) + require.NoError(t, th.ORM.InsertBlock(ctx, utils.RandomHash(), 10, time.Now(), 0)) // Check if query actually works well with provided dataset - logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + logs, err = th.ORM.SelectIndexedLogs(ctx, address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) require.NoError(t, err) require.Len(t, logs, 1) // Empty logs when number of confirmations is too deep - logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Confirmations(4)) + logs, err = th.ORM.SelectIndexedLogs(ctx, address, event, 1, []common.Hash{event}, logpoller.Confirmations(4)) require.NoError(t, err) require.Len(t, logs, 0) } @@ -1326,12 +1340,13 @@ func TestInsertLogsWithBlock(t *testing.T) { chainID := testutils.NewRandomEVMChainID() event := utils.RandomBytes32() address := utils.RandomAddress() + ctx := context.Background() // We need full db here, because we want to test transaction rollbacks. // Using pgtest.NewSqlxDB(t) will run all tests in TXs which is not desired for this type of test // (inner tx rollback will rollback outer tx, blocking rest of execution) _, db := heavyweight.FullTestDBV2(t, nil) - o := logpoller.NewORM(chainID, db, logger.Test(t), pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID, db, logger.Test(t)) correctLog := GenLog(chainID, 1, 1, utils.RandomAddress().String(), event[:], address) invalidLog := GenLog(chainID, -10, -10, utils.RandomAddress().String(), event[:], address) @@ -1373,11 +1388,11 @@ func TestInsertLogsWithBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // clean all logs and blocks between test cases - defer func() { _ = o.DeleteLogsAndBlocksAfter(0) }() - insertError := o.InsertLogsWithBlock(tt.logs, tt.block) + defer func() { _ = o.DeleteLogsAndBlocksAfter(ctx, 0) }() + insertError := o.InsertLogsWithBlock(ctx, tt.logs, tt.block) - logs, logsErr := o.SelectLogs(0, math.MaxInt, address, event) - block, blockErr := o.SelectLatestBlock() + logs, logsErr := o.SelectLogs(ctx, 0, math.MaxInt, address, event) + block, blockErr := o.SelectLatestBlock(ctx) if tt.shouldRollback { assert.Error(t, insertError) @@ -1404,10 +1419,11 @@ func TestInsertLogsInTx(t *testing.T) { event := utils.RandomBytes32() address := utils.RandomAddress() maxLogsSize := 9000 + ctx := context.Background() // We need full db here, because we want to test transaction rollbacks. _, db := heavyweight.FullTestDBV2(t, nil) - o := logpoller.NewORM(chainID, db, logger.Test(t), pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID, db, logger.Test(t)) logs := make([]logpoller.Log, maxLogsSize, maxLogsSize+1) for i := 0; i < maxLogsSize; i++ { @@ -1435,10 +1451,10 @@ func TestInsertLogsInTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // clean all logs and blocks between test cases - defer func() { _ = o.DeleteLogsAndBlocksAfter(0) }() + defer func() { _ = o.DeleteLogsAndBlocksAfter(ctx, 0) }() - insertErr := o.InsertLogs(tt.logs) - logsFromDb, err := o.SelectLogs(0, math.MaxInt, address, event) + insertErr := o.InsertLogs(ctx, tt.logs) + logsFromDb, err := o.SelectLogs(ctx, 0, math.MaxInt, address, event) assert.NoError(t, err) if tt.shouldRollback { @@ -1453,6 +1469,7 @@ func TestInsertLogsInTx(t *testing.T) { } func TestSelectLogsDataWordBetween(t *testing.T) { + ctx := context.Background() address := utils.RandomAddress() eventSig := utils.RandomBytes32() th := SetupTH(t, false, 2, 3, 2, 1000) @@ -1465,7 +1482,7 @@ func TestSelectLogsDataWordBetween(t *testing.T) { secondLogData = append(secondLogData, logpoller.EvmWord(5).Bytes()...) secondLogData = append(secondLogData, logpoller.EvmWord(20).Bytes()...) - err := th.ORM.InsertLogsWithBlock( + err := th.ORM.InsertLogsWithBlock(ctx, []logpoller.Log{ GenLogWithData(th.ChainID, address, eventSig, 1, 1, firstLogData), GenLogWithData(th.ChainID, address, eventSig, 2, 2, secondLogData), @@ -1503,7 +1520,7 @@ func TestSelectLogsDataWordBetween(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - logs, err1 := th.ORM.SelectLogsDataWordBetween(address, eventSig, 0, 1, logpoller.EvmWord(tt.wordValue), logpoller.Unconfirmed) + logs, err1 := th.ORM.SelectLogsDataWordBetween(ctx, address, eventSig, 0, 1, logpoller.EvmWord(tt.wordValue), logpoller.Unconfirmed) assert.NoError(t, err1) assert.Len(t, logs, len(tt.expectedLogs)) @@ -1517,7 +1534,8 @@ func TestSelectLogsDataWordBetween(t *testing.T) { func Benchmark_LogsDataWordBetween(b *testing.B) { chainId := big.NewInt(137) _, db := heavyweight.FullTestDBV2(b, nil) - o := logpoller.NewORM(chainId, db, logger.Test(b), pgtest.NewQConfig(false)) + o := logpoller.NewORM(chainId, db, logger.Test(b)) + ctx := context.Background() numberOfReports := 100_000 numberOfMessagesPerReport := 256 @@ -1547,13 +1565,13 @@ func Benchmark_LogsDataWordBetween(b *testing.B) { CreatedAt: time.Now(), }) } - require.NoError(b, o.InsertBlock(utils.RandomHash(), int64(numberOfReports*numberOfMessagesPerReport), time.Now(), int64(numberOfReports*numberOfMessagesPerReport))) - require.NoError(b, o.InsertLogs(dbLogs)) + require.NoError(b, o.InsertBlock(ctx, utils.RandomHash(), int64(numberOfReports*numberOfMessagesPerReport), time.Now(), int64(numberOfReports*numberOfMessagesPerReport))) + require.NoError(b, o.InsertLogs(ctx, dbLogs)) b.ResetTimer() for i := 0; i < b.N; i++ { - logs, err := o.SelectLogsDataWordBetween( + logs, err := o.SelectLogsDataWordBetween(ctx, commitStoreAddress, commitReportAccepted, 2, diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index 3a91f7083a3..f806ae3084f 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -243,7 +243,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod logPoller = opts.GenLogPoller(chainID) } else { logPoller = logpoller.NewLogPoller( - logpoller.NewObservedORM(chainID, db, l, cfg.Database()), + logpoller.NewObservedORM(chainID, db, l), client, l, cfg.EVM().LogPollInterval(), diff --git a/core/services/blockhashstore/coordinators.go b/core/services/blockhashstore/coordinators.go index 4cb58bab6fd..64e0f0550f5 100644 --- a/core/services/blockhashstore/coordinators.go +++ b/core/services/blockhashstore/coordinators.go @@ -12,7 +12,6 @@ import ( v1 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" v2 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2" v2plus "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -97,8 +96,7 @@ func (v *V1Coordinator) Requests( []common.Hash{ v1.VRFCoordinatorRandomnessRequest{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v1 requests") } @@ -121,7 +119,7 @@ func (v *V1Coordinator) Requests( // Fulfillments satisfies the Coordinator interface. func (v *V1Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { - toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + toBlock, err := v.lp.LatestBlock(ctx) if err != nil { return nil, errors.Wrap(err, "fetching latest block") } @@ -132,8 +130,7 @@ func (v *V1Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]E []common.Hash{ v1.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v1 fulfillments") } @@ -188,8 +185,7 @@ func (v *V2Coordinator) Requests( []common.Hash{ v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v2 requests") } @@ -212,7 +208,7 @@ func (v *V2Coordinator) Requests( // Fulfillments satisfies the Coordinator interface. func (v *V2Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { - toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + toBlock, err := v.lp.LatestBlock(ctx) if err != nil { return nil, errors.Wrap(err, "fetching latest block") } @@ -223,8 +219,7 @@ func (v *V2Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]E []common.Hash{ v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v2 fulfillments") } @@ -279,8 +274,7 @@ func (v *V2PlusCoordinator) Requests( []common.Hash{ v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v2 requests") } @@ -303,7 +297,7 @@ func (v *V2PlusCoordinator) Requests( // Fulfillments satisfies the Coordinator interface. func (v *V2PlusCoordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { - toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + toBlock, err := v.lp.LatestBlock(ctx) if err != nil { return nil, errors.Wrap(err, "fetching latest block") } @@ -314,8 +308,7 @@ func (v *V2PlusCoordinator) Fulfillments(ctx context.Context, fromBlock uint64) []common.Hash{ v2plus.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic(), }, - v.c.Address(), - pg.WithParentCtx(ctx)) + v.c.Address()) if err != nil { return nil, errors.Wrap(err, "filter v2 fulfillments") } diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go index d6c27acd0b5..e1fa76f639d 100644 --- a/core/services/blockhashstore/delegate.go +++ b/core/services/blockhashstore/delegate.go @@ -169,7 +169,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { int(jb.BlockhashStoreSpec.LookbackBlocks), jb.BlockhashStoreSpec.HeartbeatPeriod, func(ctx context.Context) (uint64, error) { - head, err := lp.LatestBlock(pg.WithParentCtx(ctx)) + head, err := lp.LatestBlock(ctx) if err != nil { return 0, errors.Wrap(err, "getting chain head") } diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index c838316b1cc..8ea0af442f0 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -331,7 +331,7 @@ func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error for _, filter := range filters { d.lggr.Debugf("Unregistering %s filter", filter) - err = lp.UnregisterFilter(filter, pg.WithQueryer(q)) + err = lp.UnregisterFilter(filter) if err != nil { return errors.Wrapf(err, "Failed to unregister filter %s", filter) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go index 45884d2f726..79bfd86e9d2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go @@ -21,7 +21,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" registry "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) type TransmitUnpacker interface { @@ -144,7 +143,7 @@ func (c *LogProvider) HealthReport() map[string]error { } func (c *LogProvider) PerformLogs(ctx context.Context) ([]ocr2keepers.PerformLog, error) { - end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + end, err := c.logPoller.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) } @@ -158,7 +157,6 @@ func (c *LogProvider) PerformLogs(ctx context.Context) ([]ocr2keepers.PerformLog registry.KeeperRegistryUpkeepPerformed{}.Topic(), }, c.registryAddress, - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) @@ -185,7 +183,7 @@ func (c *LogProvider) PerformLogs(ctx context.Context) ([]ocr2keepers.PerformLog } func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleReportLog, error) { - end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + end, err := c.logPoller.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) } @@ -201,7 +199,6 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR registry.KeeperRegistryReorgedUpkeepReport{}.Topic(), }, c.registryAddress, - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) @@ -219,7 +216,6 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR registry.KeeperRegistryStaleUpkeepReport{}.Topic(), }, c.registryAddress, - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) @@ -237,7 +233,6 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR registry.KeeperRegistryInsufficientFundsUpkeepReport{}.Topic(), }, c.registryAddress, - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go index a6a2f40f855..0b60fba6b95 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -27,7 +27,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) const ( @@ -351,7 +350,7 @@ func (r *EvmRegistry) pollLogs() error { var end logpoller.LogPollerBlock var err error - if end, err = r.poller.LatestBlock(pg.WithParentCtx(r.ctx)); err != nil { + if end, err = r.poller.LatestBlock(r.ctx); err != nil { return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -373,7 +372,6 @@ func (r *EvmRegistry) pollLogs() error { end.BlockNumber, upkeepStateEvents, r.addr, - pg.WithParentCtx(r.ctx), ); err != nil { return fmt.Errorf("%w: %s", ErrLogReadFailure, err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go index 9ae17c08ee3..3a7d329ac02 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go @@ -17,7 +17,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -80,7 +79,7 @@ func NewBlockSubscriber(hb httypes.HeadBroadcaster, lp logpoller.LogPoller, fina } func (bs *BlockSubscriber) getBlockRange(ctx context.Context) ([]uint64, error) { - h, err := bs.lp.LatestBlock(pg.WithParentCtx(ctx)) + h, err := bs.lp.LatestBlock(ctx) if err != nil { return nil, err } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go index 814ed29d900..9dd442f2e8d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go @@ -7,7 +7,6 @@ import ( "time" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -30,7 +29,7 @@ func (r *blockTimeResolver) BlockTime(ctx context.Context, blockSampleSize int64 blockSampleSize = defaultSampleSize } - latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := r.poller.LatestBlock(ctx) if err != nil { return 0, fmt.Errorf("failed to get latest block from poller: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index d1360faaf6d..e8e8c3716ed 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -24,7 +24,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_utils_2_1" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -158,7 +157,7 @@ func (p *logEventProvider) HealthReport() map[string]error { } func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers.UpkeepPayload, error) { - latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := p.poller.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -196,7 +195,7 @@ func (p *logEventProvider) ReadLogs(pctx context.Context, ids ...*big.Int) error ctx, cancel := context.WithTimeout(pctx, readLogsTimeout) defer cancel() - latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := p.poller.LatestBlock(pctx) if err != nil { return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -378,7 +377,7 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ start = configUpdateBlock } // query logs based on contract address, event sig, and blocks - logs, err := p.poller.LogsWithSigs(start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr), pg.WithParentCtx(ctx)) + logs, err := p.poller.LogsWithSigs(start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr)) if err != nil { // cancel limit reservation as we failed to get logs resv.Cancel() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go index 69a4872351d..a35200734eb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -12,7 +12,6 @@ import ( "golang.org/x/time/rate" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -105,7 +104,7 @@ func (p *logEventProvider) RegisterFilter(ctx context.Context, opts FilterOption // register registers the upkeep filter with the log poller and adds it to the filter store. func (p *logEventProvider) register(ctx context.Context, lpFilter logpoller.Filter, ufilter upkeepFilter) error { - latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := p.poller.LatestBlock(ctx) if err != nil { return fmt.Errorf("failed to get latest block while registering filter: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go index b28ece9843f..a39f52ffd58 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -27,7 +27,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -206,7 +205,7 @@ func (r *logRecoverer) getLogTriggerCheckData(ctx context.Context, proposal ocr2 if !r.filterStore.Has(proposal.UpkeepID.BigInt()) { return nil, fmt.Errorf("filter not found for upkeep %v", proposal.UpkeepID) } - latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := r.poller.LatestBlock(ctx) if err != nil { return nil, err } @@ -260,7 +259,7 @@ func (r *logRecoverer) getLogTriggerCheckData(ctx context.Context, proposal ocr2 return nil, fmt.Errorf("log block %d is before the filter configUpdateBlock %d for upkeepID %s", logBlock, filter.configUpdateBlock, proposal.UpkeepID.String()) } - logs, err := r.poller.LogsWithSigs(logBlock-1, logBlock+1, filter.topics, common.BytesToAddress(filter.addr), pg.WithParentCtx(ctx)) + logs, err := r.poller.LogsWithSigs(logBlock-1, logBlock+1, filter.topics, common.BytesToAddress(filter.addr)) if err != nil { return nil, fmt.Errorf("could not read logs: %w", err) } @@ -285,7 +284,7 @@ func (r *logRecoverer) getLogTriggerCheckData(ctx context.Context, proposal ocr2 } func (r *logRecoverer) GetRecoveryProposals(ctx context.Context) ([]ocr2keepers.UpkeepPayload, error) { - latestBlock, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + latestBlock, err := r.poller.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -328,7 +327,7 @@ func (r *logRecoverer) GetRecoveryProposals(ctx context.Context) ([]ocr2keepers. } func (r *logRecoverer) recover(ctx context.Context) error { - latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := r.poller.LatestBlock(ctx) if err != nil { return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -387,7 +386,7 @@ func (r *logRecoverer) recoverFilter(ctx context.Context, f upkeepFilter, startB end = offsetBlock } // we expect start to be > offsetBlock in any case - logs, err := r.poller.LogsWithSigs(start, end, f.topics, common.BytesToAddress(f.addr), pg.WithParentCtx(ctx)) + logs, err := r.poller.LogsWithSigs(start, end, f.topics, common.BytesToAddress(f.addr)) if err != nil { return fmt.Errorf("could not read logs: %w", err) } @@ -602,7 +601,7 @@ func (r *logRecoverer) clean(ctx context.Context) { } func (r *logRecoverer) tryExpire(ctx context.Context, ids ...string) error { - latestBlock, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + latestBlock, err := r.poller.LatestBlock(ctx) if err != nil { return fmt.Errorf("failed to get latest block: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go index fd7bfa91d7f..4e55a646fe4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -35,7 +35,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -339,11 +338,11 @@ func (r *EvmRegistry) refreshLogTriggerUpkeepsBatch(logTriggerIDs []*big.Int) er logTriggerHashes = append(logTriggerHashes, common.BigToHash(id)) } - unpausedLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth), pg.WithParentCtx(r.ctx)) + unpausedLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } - configSetLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth), pg.WithParentCtx(r.ctx)) + configSetLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } @@ -406,7 +405,7 @@ func (r *EvmRegistry) pollUpkeepStateLogs() error { var end logpoller.LogPollerBlock var err error - if end, err = r.poller.LatestBlock(pg.WithParentCtx(r.ctx)); err != nil { + if end, err = r.poller.LatestBlock(r.ctx); err != nil { return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } @@ -426,7 +425,6 @@ func (r *EvmRegistry) pollUpkeepStateLogs() error { end.BlockNumber, upkeepStateEvents, r.addr, - pg.WithParentCtx(r.ctx), ); err != nil { return fmt.Errorf("%w: %s", ErrLogReadFailure, err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go index eb8dc1793c1..ea4a2f58d09 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go @@ -19,7 +19,6 @@ import ( iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var _ types.TransmitEventProvider = &EventProvider{} @@ -136,7 +135,7 @@ func (c *EventProvider) HealthReport() map[string]error { } func (c *EventProvider) GetLatestEvents(ctx context.Context) ([]ocr2keepers.TransmitEvent, error) { - end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + end, err := c.logPoller.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) } @@ -153,7 +152,6 @@ func (c *EventProvider) GetLatestEvents(ctx context.Context) ([]ocr2keepers.Tran iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport{}.Topic(), }, c.registryAddress, - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go index 30a50977d17..a0009ae65c5 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go @@ -12,7 +12,6 @@ import ( iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var ( @@ -79,7 +78,7 @@ func (s *performedEventsScanner) ScanWorkIDs(ctx context.Context, workID ...stri end = len(ids) } batch := ids[i:end] - batchLogs, err := s.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), s.registryAddress, 1, batch, logpoller.Confirmations(s.finalityDepth), pg.WithParentCtx(ctx)) + batchLogs, err := s.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), s.registryAddress, 1, batch, logpoller.Confirmations(s.finalityDepth)) if err != nil { return nil, fmt.Errorf("error fetching logs: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go index 88d6544d8c4..9c84adea700 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go @@ -39,7 +39,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" ocr2vrfconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2vrf/config" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var _ ocr2vrftypes.CoordinatorInterface = &coordinator{} @@ -226,7 +225,7 @@ func New( } func (c *coordinator) CurrentChainHeight(ctx context.Context) (uint64, error) { - head, err := c.lp.LatestBlock(pg.WithParentCtx(ctx)) + head, err := c.lp.LatestBlock(ctx) if err != nil { return 0, err } @@ -263,8 +262,7 @@ func (c *coordinator) ReportIsOnchain( []common.Hash{ enrTopic, }, - 1, - pg.WithParentCtx(ctx)) + 1) if err != nil { return false, errors.Wrap(err, "log poller IndexedLogs") } @@ -350,8 +348,7 @@ func (c *coordinator) ReportBlocks( c.randomWordsFulfilledTopic, c.outputsServedTopic, }, - c.coordinatorAddress, - pg.WithParentCtx(ctx)) + c.coordinatorAddress) if err != nil { err = errors.Wrapf(err, "logs with topics. address: %s", c.coordinatorAddress) return @@ -548,7 +545,7 @@ func (c *coordinator) getBlockhashesMapping( return blockNumbers[a] < blockNumbers[b] }) - heads, err := c.lp.GetBlocksRange(ctx, blockNumbers, pg.WithParentCtx(ctx)) + heads, err := c.lp.GetBlocksRange(ctx, blockNumbers) if err != nil { return nil, errors.Wrap(err, "logpoller.GetBlocks") } @@ -915,7 +912,6 @@ func (c *coordinator) DKGVRFCommittees(ctx context.Context) (dkgCommittee, vrfCo c.configSetTopic, c.beaconAddress, logpoller.Confirmations(c.finalityDepth), - pg.WithParentCtx(ctx), ) if err != nil { err = errors.Wrap(err, "latest vrf ConfigSet by sig with confs") @@ -926,7 +922,6 @@ func (c *coordinator) DKGVRFCommittees(ctx context.Context) (dkgCommittee, vrfCo c.configSetTopic, c.dkgAddress, logpoller.Confirmations(c.finalityDepth), - pg.WithParentCtx(ctx), ) if err != nil { err = errors.Wrap(err, "latest dkg ConfigSet by sig with confs") diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go index dc75fe037fe..e4cbdb56601 100644 --- a/core/services/relay/evm/config_poller.go +++ b/core/services/relay/evm/config_poller.go @@ -21,7 +21,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" evmRelayTypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) @@ -164,7 +163,7 @@ func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, cp.aggregatorContractAddr, 1, pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, cp.aggregatorContractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { if cp.isConfigStoreAvailable() { @@ -185,7 +184,7 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, cp.aggregatorContractAddr, pg.WithParentCtx(ctx)) + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, cp.aggregatorContractAddr) if err != nil { return ocrtypes.ContractConfig{}, err } @@ -206,7 +205,7 @@ func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) // LatestBlockHeight returns the latest block height from the logs func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { - latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestBlock(ctx) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, nil diff --git a/core/services/relay/evm/contract_transmitter.go b/core/services/relay/evm/contract_transmitter.go index 76360e34e1a..1d0d1753dfd 100644 --- a/core/services/relay/evm/contract_transmitter.go +++ b/core/services/relay/evm/contract_transmitter.go @@ -19,7 +19,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) type ContractTransmitter interface { @@ -182,7 +181,7 @@ func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) ( return ocrtypes.ConfigDigest{}, 0, err } latest, err := oc.lp.LatestLogByEventSigWithConfs( - oc.transmittedEventSig, oc.contractAddress, 1, pg.WithParentCtx(ctx)) + oc.transmittedEventSig, oc.contractAddress, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { // No transmissions yet diff --git a/core/services/relay/evm/functions/config_poller.go b/core/services/relay/evm/functions/config_poller.go index 7a59d499898..d4d8d12df30 100644 --- a/core/services/relay/evm/functions/config_poller.go +++ b/core/services/relay/evm/functions/config_poller.go @@ -15,7 +15,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) @@ -136,7 +135,7 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock return 0, ocrtypes.ConfigDigest{}, nil } - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, *contractAddr, 1, pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, *contractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, ocrtypes.ConfigDigest{}, nil @@ -158,7 +157,7 @@ func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) return ocrtypes.ContractConfig{}, errors.New("no target contract address set yet") } - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, *contractAddr, pg.WithParentCtx(ctx)) + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, *contractAddr) if err != nil { return ocrtypes.ContractConfig{}, err } @@ -174,7 +173,7 @@ func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) } func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { - latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestBlock(ctx) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, nil diff --git a/core/services/relay/evm/functions/contract_transmitter.go b/core/services/relay/evm/functions/contract_transmitter.go index 2a62db31a8c..a9ae2298e36 100644 --- a/core/services/relay/evm/functions/contract_transmitter.go +++ b/core/services/relay/evm/functions/contract_transmitter.go @@ -22,7 +22,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions/encoding" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" evmRelayTypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) @@ -228,7 +227,7 @@ func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) ( return ocrtypes.ConfigDigest{}, 0, err } latest, err := oc.lp.LatestLogByEventSigWithConfs( - oc.transmittedEventSig, *contractAddr, 1, pg.WithParentCtx(ctx)) + oc.transmittedEventSig, *contractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { // No transmissions yet diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index e76b567b42b..fb91af0b2ae 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -151,7 +151,7 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR if l.proposedCoordinator != (common.Address{}) && l.activeCoordinator != l.proposedCoordinator { coordinators = append(coordinators, l.proposedCoordinator) } - latest, err := l.logPoller.LatestBlock() + latest, err := l.logPoller.LatestBlock(context.Background()) if err != nil { l.mu.Unlock() return nil, nil, err diff --git a/core/services/relay/evm/mercury/config_poller.go b/core/services/relay/evm/mercury/config_poller.go index 98ef78020c7..78ce76e89b3 100644 --- a/core/services/relay/evm/mercury/config_poller.go +++ b/core/services/relay/evm/mercury/config_poller.go @@ -14,7 +14,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils" ) @@ -132,7 +131,7 @@ func (cp *ConfigPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { cp.lggr.Debugw("LatestConfigDetails", "eventSig", FeedScopedConfigSet, "addr", cp.addr, "topicIndex", feedIdTopicIndex, "feedID", cp.feedId) - logs, err := cp.destChainLogPoller.IndexedLogs(FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1, pg.WithParentCtx(ctx)) + logs, err := cp.destChainLogPoller.IndexedLogs(FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1) if err != nil { return 0, ocrtypes.ConfigDigest{}, err } @@ -149,7 +148,7 @@ func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, pg.WithParentCtx(ctx)) + lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}) if err != nil { return ocrtypes.ContractConfig{}, err } @@ -166,7 +165,7 @@ func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) // LatestBlockHeight returns the latest block height from the logs func (cp *ConfigPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { - latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + latest, err := cp.destChainLogPoller.LatestBlock(ctx) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, nil diff --git a/core/services/vrf/v2/listener_v2_log_listener.go b/core/services/vrf/v2/listener_v2_log_listener.go index 07b4c2c3800..bc53f8aa400 100644 --- a/core/services/vrf/v2/listener_v2_log_listener.go +++ b/core/services/vrf/v2/listener_v2_log_listener.go @@ -14,7 +14,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon" ) @@ -107,7 +106,7 @@ func (lsn *listenerV2) initializeLastProcessedBlock(ctx context.Context) (lastPr start := time.Now() // will retry on error in the runLogListener loop - latestBlock, err := lp.LatestBlock() + latestBlock, err := lp.LatestBlock(ctx) if err != nil { return 0, fmt.Errorf("LogPoller.LatestBlock(): %w", err) } @@ -172,7 +171,7 @@ func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastPro lp := lsn.chain.LogPoller() start := time.Now() - latestBlock, err := lp.LatestBlock(pg.WithParentCtx(ctx)) + latestBlock, err := lp.LatestBlock(ctx) if err != nil { lsn.l.Errorw("error getting latest block", "err", err) return 0, fmt.Errorf("LogPoller.LatestBlock(): %w", err) @@ -191,7 +190,6 @@ func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastPro latestBlock.FinalizedBlockNumber, []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, lsn.coordinator.Address(), - pg.WithParentCtx(ctx), ) if err != nil { return currLastProcessedBlock, fmt.Errorf("LogPoller.LogsWithSigs: %w", err) @@ -228,7 +226,7 @@ func (lsn *listenerV2) pollLogs(ctx context.Context, minConfs uint32, lastProces // latest unfinalized block used on purpose to get bleeding edge logs // we don't really have the luxury to wait for finalization on most chains // if we want to fulfill on time. - latestBlock, err := lp.LatestBlock() + latestBlock, err := lp.LatestBlock(ctx) if err != nil { return nil, fmt.Errorf("LogPoller.LatestBlock(): %w", err) } @@ -250,7 +248,6 @@ func (lsn *listenerV2) pollLogs(ctx context.Context, minConfs uint32, lastProces latestBlock.BlockNumber, []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, lsn.coordinator.Address(), - pg.WithParentCtx(ctx), ) if err != nil { return nil, fmt.Errorf("LogPoller.LogsWithSigs: %w", err) diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index db7eaee625b..0624759732e 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -36,6 +36,8 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" + lp_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/log_poller" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" @@ -43,10 +45,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_utils_2_1" le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" core_logger "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" - - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" - lp_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/log_poller" ) var ( @@ -147,7 +145,7 @@ func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_ } db.MapperFunc(reflectx.CamelToSnakeASCII) - return logpoller.NewORM(chainID, db, logger, pg.NewQConfig(false)), db, nil + return logpoller.NewORM(chainID, db, logger), db, nil } type ExpectedFilter struct { @@ -178,7 +176,7 @@ func NodeHasExpectedFilters(expectedFilters []ExpectedFilter, logger core_logger } defer db.Close() - knownFilters, err := orm.LoadFilters() + knownFilters, err := orm.LoadFilters(context.Background()) if err != nil { return false, "", err } @@ -318,7 +316,7 @@ func LogPollerHasFinalisedEndBlock(endBlock int64, chainID *big.Int, l zerolog.L defer db.Close() - latestBlock, err := orm.SelectLatestBlock() + latestBlock, err := orm.SelectLatestBlock(ctx) if err != nil { r <- boolQueryResult{ nodeName: clNode.ContainerName, @@ -415,7 +413,7 @@ func ClNodesHaveExpectedLogCount(startBlock, endBlock int64, chainID *big.Int, e foundLogsCount := 0 for _, filter := range expectedFilters { - logs, err := orm.SelectLogs(startBlock, endBlock, filter.emitterAddress, filter.topic) + logs, err := orm.SelectLogs(ctx, startBlock, endBlock, filter.emitterAddress, filter.topic) if err != nil { resultChan <- logQueryResult{ nodeName: clNode.ContainerName, @@ -541,7 +539,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit for _, event := range cfg.General.EventsToEmit { l.Trace().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching single emitter's logs") - result, err := orm.SelectLogs(startBlock, endBlock, address, event.ID) + result, err := orm.SelectLogs(ctx, startBlock, endBlock, address, event.ID) if err != nil { r <- dbQueryResult{ err: err, From bbcf2f225f3f8e3835ea11d1d3480ea9fd465fe7 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 14 Feb 2024 10:37:12 -0500 Subject: [PATCH 07/65] Refactor logpoller ORM --- core/chains/evm/logpoller/orm.go | 412 ++++++++------------------ core/chains/evm/logpoller/orm_test.go | 3 +- 2 files changed, 131 insertions(+), 284 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index ee75b75240d..86e69570fc2 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -81,6 +81,8 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum ON CONFLICT DO NOTHING` ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() + // TODO: Why do we get a transaction failure?? Not using a transaction.. + // TODO: Maybe because the previous transaction failed for SelectLatestLogByEventSigWithConfs _, err := o.db.ExecContext(ctx, query, o.chainID.String(), blockHash, blockNumber, blockTimestamp, finalizedBlock) return err } @@ -90,31 +92,21 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { - args, err := newQueryArgs(o.chainID). - withCustomArg("name", filter.Name). - withCustomArg("retention", filter.Retention). - withAddressArray(filter.Addresses). - withEventSigArray(filter.EventSigs). - toArgs() - if err != nil { - return err - } - // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 query := ` INSERT INTO evm.log_poller_filters (name, evm_chain_id, retention, created_at, address, event) SELECT * FROM - (SELECT :name, :evm_chain_id ::::NUMERIC, :retention ::::BIGINT, NOW()) x, - (SELECT unnest(:address_array ::::BYTEA[]) addr) a, - (SELECT unnest(:event_sig_array ::::BYTEA[]) ev) e + (SELECT $1, $2 ::NUMERIC, $3 ::BIGINT, NOW()) x, + (SELECT unnest($4 ::BYTEA[]) addr) a, + (SELECT unnest($5 ::BYTEA[]) ev) e ON CONFLICT (name, evm_chain_id, address, event) - DO UPDATE SET retention=:retention ::::BIGINT` + DO UPDATE SET retention=$3 ::BIGINT` ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - _, err = o.db.ExecContext(ctx, query, args) + _, err = o.db.ExecContext(ctx, query, filter.Name, o.chainID.String(), filter.Retention, concatBytes(filter.Addresses), concatBytes(filter.EventSigs)) return err } @@ -190,24 +182,19 @@ func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) } func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND event_sig = :event_sig - AND address = :address + WHERE evm_chain_id = $1 + AND event_sig = $2 + AND address = $3 AND block_number <= %s - ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index) DESC LIMIT 1`, + nestedBlockNumberQuery(confs, o.chainID.String())) var l Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &l, query, args); err != nil { + if err := o.db.GetContext(ctx, &l, query, o.chainID.String(), eventSig, address); err != nil { return nil, err } return &l, nil @@ -359,23 +346,15 @@ func (o *DbORM) validateLogs(logs []Log) error { } func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { - args, err := newQueryArgs(o.chainID). - withStartBlock(start). - withEndBlock(end). - toArgs() - if err != nil { - return nil, err - } - var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - err = o.db.SelectContext(ctx, &logs, ` + err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND block_number >= :start_block - AND block_number <= :end_block - ORDER BY (block_number, log_index, created_at)`, args) + WHERE evm_chain_id = $1 + AND block_number >= $2 + AND block_number <= $3 + ORDER BY (block_number, log_index, created_at)`, o.chainID.String(), start, end) if err != nil { return nil, err } @@ -384,25 +363,17 @@ func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([ // SelectLogs finds the logs in a given block range. func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withStartBlock(start). - withEndBlock(end). - toArgs() - if err != nil { - return nil, err - } - var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - err = o.db.SelectContext(ctx, &logs, ` + err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND block_number >= :start_block - AND block_number <= :end_block - ORDER BY (block_number, log_index)`, args) + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND block_number >= $4 + AND block_number <= $5 + ORDER BY (block_number, log_index)`, o.chainID.String(), address, eventSig, start, end) if err != nil { return nil, err } @@ -411,27 +382,20 @@ func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common // SelectLogsCreatedAfter finds logs created after some timestamp. func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withBlockTimestampAfter(after). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } - query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND block_timestamp > :block_timestamp_after + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND block_timestamp > $4 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, + nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, after); err != nil { return nil, err } return logs, nil @@ -440,25 +404,15 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre // SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { - args, err := newQueryArgs(o.chainID). - withAddress(address). - withEventSigArray(eventSigs). - withStartBlock(start). - withEndBlock(end). - toArgs() - if err != nil { - return nil, err - } - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = ANY(:event_sig_array) - AND block_number BETWEEN :start_block AND :end_block - ORDER BY (block_number, log_index)`, args) + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = ANY($3) + AND block_number BETWEEN $4 AND $5 + ORDER BY (block_number, log_index)`, o.chainID.String(), address, concatBytes(eventSigs), start, end) if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -483,32 +437,23 @@ func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]L // SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgs(o.chainID). - withAddressArray(addresses). - withEventSigArray(eventSigs). - withStartBlock(fromBlock). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } - + // TODO: cant convert byteArray!? query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE (block_number, address, event_sig) IN ( SELECT MAX(block_number), address, event_sig FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND event_sig = ANY(:event_sig_array) - AND address = ANY(:address_array) - AND block_number > :start_block + WHERE evm_chain_id = $1 + AND event_sig = ANY($2) + AND address = ANY($3) + AND block_number > $4 AND block_number <= %s GROUP BY event_sig, address ) - ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return nil, errors.Wrap(err, "failed to execute query") } return logs, nil @@ -516,187 +461,127 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, from // SelectLatestBlockByEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { - args, err := newQueryArgs(o.chainID). - withEventSigArray(eventSigs). - withAddressArray(addresses). - withStartBlock(fromBlock). - withConfs(confs). - toArgs() - if err != nil { - return 0, err - } query := fmt.Sprintf(` SELECT COALESCE(MAX(block_number), 0) FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND event_sig = ANY(:event_sig_array) - AND address = ANY(:address_array) - AND block_number > :start_block - AND block_number <= %s`, nestedBlockNumberQuery(confs)) + WHERE evm_chain_id = $1 + AND event_sig = ANY($2) + AND address = ANY($3) + AND block_number > $4 + AND block_number <= %s`, nestedBlockNumberQuery(confs, o.chainID.String())) var blockNumber int64 ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &blockNumber, query, args); err != nil { + if err := o.db.GetContext(ctx, &blockNumber, query, o.chainID.String(), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return 0, err } return blockNumber, nil } func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withWordIndex(wordIndex). - withWordValueMin(wordValueMin). - withWordValueMax(wordValueMax). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(`SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND substring(data from 32*:word_index+1 for 32) >= :word_value_min - AND substring(data from 32*:word_index+1 for 32) <= :word_value_max + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND substring(data from 32*$4+1 for 32) >= $5 + AND substring(data from 32*$4+1 for 32) <= $6 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndex, wordValueMin, wordValueMax); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withWordIndex(wordIndex). - withWordValueMin(wordValueMin). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND substring(data from 32*:word_index+1 for 32) >= :word_value_min + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND substring(data from 32*$4+1 for 32) >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndex, wordValueMin); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withWordIndexMin(wordIndexMin). - withWordIndexMax(wordIndexMax). - withWordValue(wordValue). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND substring(data from 32*:word_index_min+1 for 32) <= :word_value - AND substring(data from 32*:word_index_max+1 for 32) >= :word_value + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND substring(data from 32*$4+1 for 32) <= $5 + AND substring(data from 32*$6+1 for 32) >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndexMin, wordValue, wordIndexMax); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withTopicIndex(topicIndex). - withTopicValueMin(topicValueMin). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND topics[:topic_index] >= :topic_value_min + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND topics[$4] >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, topicValueMin); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withTopicIndex(topicIndex). - withTopicValueMin(topicValueMin). - withTopicValueMax(topicValueMax). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND topics[:topic_index] >= :topic_value_min - AND topics[:topic_index] <= :topic_value_max + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND topics[$4] >= $5 + AND topics[$4] <= $6 AND block_number <= %s - ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, topicValueMin, topicValueMax); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withTopicIndex(topicIndex). - withTopicValues(topicValues). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND topics[:topic_index] = ANY(:topic_values) + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND topics[$4] = ANY($5) AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues)); err != nil { return nil, err } return logs, nil @@ -704,27 +589,19 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e // SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withTopicIndex(topicIndex). - withTopicValues(topicValues). - withStartBlock(start). - withEndBlock(end). - toArgs() - if err != nil { - return nil, err - } var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - err = o.db.SelectContext(ctx, &logs, ` + err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND topics[:topic_index] = ANY(:topic_values) - AND block_number >= :start_block - AND block_number <= :end_block - ORDER BY (block_number, log_index)`, args) + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND topics[$4] = ANY($5) + AND block_number >= $6 + AND block_number <= $7 + ORDER BY (block_number, log_index)`, + o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues), start, end) if err != nil { return nil, err } @@ -732,54 +609,36 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in } func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { - args, err := newQueryArgsForEvent(o.chainID, address, eventSig). - withBlockTimestampAfter(after). - withConfs(confs). - withTopicIndex(topicIndex). - withTopicValues(topicValues). - toArgs() - if err != nil { - return nil, err - } - query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND topics[:topic_index] = ANY(:topic_values) - AND block_timestamp > :block_timestamp_after + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND topics[$4] = ANY($5) + AND block_timestamp > $6 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues), after); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { - args, err := newQueryArgs(o.chainID). - withTxHash(txHash). - withAddress(address). - withEventSig(eventSig). - toArgs() - if err != nil { - return nil, err - } var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - err = o.db.SelectContext(ctx, &logs, ` + err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :event_sig - AND tx_hash = :tx_hash - ORDER BY (block_number, log_index)`, args) + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND tx_hash = $4 + ORDER BY (block_number, log_index)`, o.chainID.String(), address, eventSig, txHash) if err != nil { return nil, err } @@ -788,61 +647,48 @@ func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Ad // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { - args, err := newQueryArgs(o.chainID). - withAddress(address). - withTopicIndex(topicIndex). - withStartBlock(startBlock). - withEndBlock(endBlock). - withCustomHashArg("sigA", sigA). - withCustomHashArg("sigB", sigB). - withConfs(confs). - toArgs() - if err != nil { - return nil, err - } - - nestedQuery := nestedBlockNumberQuery(confs) + nestedQuery := nestedBlockNumberQuery(confs, o.chainID.String()) query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = :evm_chain_id - AND address = :address - AND event_sig = :sigA - AND block_number BETWEEN :start_block AND :end_block + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND block_number BETWEEN $5 AND $6 AND block_number <= %s EXCEPT SELECT a.* FROM evm.logs AS a INNER JOIN evm.logs B ON a.evm_chain_id = b.evm_chain_id AND a.address = b.address - AND a.topics[:topic_index] = b.topics[:topic_index] - AND a.event_sig = :sigA - AND b.event_sig = :sigB - AND b.block_number BETWEEN :start_block AND :end_block + AND a.topics[$7] = b.topics[$7] + AND a.event_sig = $3 + AND b.event_sig = $4 + AND b.block_number BETWEEN $5 AND $6 AND b.block_number <= %s ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, args); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, sigA, sigB, startBlock, endBlock, topicIndex); err != nil { return nil, err } return logs, nil } -func nestedBlockNumberQuery(confs Confirmations) string { +func nestedBlockNumberQuery(confs Confirmations, chainID string) string { if confs == Finalized { - return ` + return fmt.Sprintf(` (SELECT finalized_block_number FROM evm.log_poller_blocks - WHERE evm_chain_id = :evm_chain_id - ORDER BY block_number DESC LIMIT 1) ` + WHERE evm_chain_id = %s + ORDER BY block_number DESC LIMIT 1) `, chainID) } // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number // It doesn't impact logic of the outer query, because block numbers are never less or equal to 0 (guarded by log_poller_blocks_block_number_check) - return ` - (SELECT greatest(block_number - :confs, 0) + return fmt.Sprintf(` + (SELECT greatest(block_number - %d, 0) FROM evm.log_poller_blocks - WHERE evm_chain_id = :evm_chain_id - ORDER BY block_number DESC LIMIT 1) ` + WHERE evm_chain_id = %s + ORDER BY block_number DESC LIMIT 1) `, confs, chainID) } diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 400551211de..03145aaaf0a 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -332,7 +332,7 @@ func TestORM(t *testing.T) { // With no blocks, should be an error _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + require.True(t, errors.Is(err, sql.ErrNoRows)) // With block 10, only 0 confs should work require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 10, time.Now(), 0)) log, err := o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) @@ -1393,6 +1393,7 @@ func TestInsertLogsWithBlock(t *testing.T) { logs, logsErr := o.SelectLogs(ctx, 0, math.MaxInt, address, event) block, blockErr := o.SelectLatestBlock(ctx) + fmt.Println("block: ", block.BlockNumber, "blockErr: ", blockErr) if tt.shouldRollback { assert.Error(t, insertError) From 9b0f2a12320bf5b65a4e2dcefe99c6fb8d800495 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 14 Feb 2024 15:11:06 -0500 Subject: [PATCH 08/65] Fix logpoller tests --- core/chains/evm/logpoller/log_poller.go | 2 +- core/chains/evm/logpoller/log_poller_test.go | 15 +- core/chains/evm/logpoller/orm.go | 229 +++++++++++-------- core/chains/evm/logpoller/orm_test.go | 17 +- core/chains/evm/logpoller/query.go | 4 + core/chains/evm/logpoller/query_test.go | 3 + core/web/evm_forwarders_controller.go | 2 +- 7 files changed, 157 insertions(+), 115 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 2c1c674fc29..1f9814f11f1 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -1059,7 +1059,7 @@ func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]Lo blocksFound := make(map[uint64]LogPollerBlock) minRequestedBlock := int64(mathutil.Min(numbers[0], numbers[1:]...)) maxRequestedBlock := int64(mathutil.Max(numbers[0], numbers[1:]...)) - lpBlocks, err := lp.orm.GetBlocksRange(lp.ctx, minRequestedBlock, maxRequestedBlock) + lpBlocks, err := lp.orm.GetBlocksRange(ctx, minRequestedBlock, maxRequestedBlock) if err != nil { lp.lggr.Warnw("Error while retrieving blocks from log pollers blocks table. Falling back to RPC...", "requestedBlocks", numbers, "err", err) } else { diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index aa7c966f41a..bbd21eb5af5 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -106,7 +106,7 @@ func BenchmarkSelectLogsCreatedAfter(b *testing.B) { func TestPopulateLoadedDB(t *testing.T) { t.Skip("Only for local load testing and query analysis") _, db := heavyweight.FullTestDBV2(t, nil) - ctx := context.Background() + ctx := testutils.Context(t) chainID := big.NewInt(137) o := logpoller.NewORM(big.NewInt(137), db, logger.Test(t)) @@ -127,31 +127,30 @@ func TestPopulateLoadedDB(t *testing.T) { require.NoError(t, o.InsertBlock(ctx, common.HexToHash("0x10"), 1000000, time.Now(), 0)) func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectLogsDataWordRange(ctx, address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + lgs, err1 := o.SelectLogsDataWordRange(ctx, address1, event1, 0, logpoller.EvmWord(50000), logpoller.EvmWord(50020), 0) require.NoError(t, err1) // 10 since every other log is for address1 - assert.Equal(t, 10, len(lgs)) + require.Equal(t, 10, len(lgs)) }() func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectIndexedLogs(ctx, address2, event1, 1, []common.Hash{logpoller.EvmWord(500000), logpoller.EvmWord(500020)}, 0) + lgs, err1 := o.SelectIndexedLogs(ctx, address2, event1, 1, []common.Hash{logpoller.EvmWord(50000), logpoller.EvmWord(50020)}, 0) require.NoError(t, err1) - assert.Equal(t, 2, len(lgs)) + require.Equal(t, 2, len(lgs)) }() func() { defer logRuntime(t, time.Now()) - lgs, err1 := o.SelectIndexedLogsTopicRange(ctx, address1, event1, 1, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + lgs, err1 := o.SelectIndexedLogsTopicRange(ctx, address1, event1, 1, logpoller.EvmWord(50000), logpoller.EvmWord(50020), 0) require.NoError(t, err1) - assert.Equal(t, 10, len(lgs)) + require.Equal(t, 10, len(lgs)) }() } func TestLogPoller_Integration(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) th.Client.Commit() // Block 2. Ensure we have finality number of blocks - ctx := context.Background() require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 86e69570fc2..4709a828e29 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "github.com/lib/pq" "math/big" "time" @@ -66,7 +67,7 @@ type DbORM struct { var _ ORM = &DbORM{} // NewORM creates a DbORM scoped to chainID. -func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger) *DbORM { +func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) *DbORM { return &DbORM{ chainID: chainID, db: db, @@ -74,6 +75,13 @@ func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger) *DbORM { } } +func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { + return sqlutil.Transact(ctx, o.new, o.db, nil, fn) +} + +// new returns a NewORM like o, but backed by q. +func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(o.chainID, q, o.lggr) } + // InsertBlock is idempotent to support replays. func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) @@ -81,9 +89,7 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum ON CONFLICT DO NOTHING` ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - // TODO: Why do we get a transaction failure?? Not using a transaction.. - // TODO: Maybe because the previous transaction failed for SelectLatestLogByEventSigWithConfs - _, err := o.db.ExecContext(ctx, query, o.chainID.String(), blockHash, blockNumber, blockTimestamp, finalizedBlock) + _, err := o.db.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) return err } @@ -106,7 +112,7 @@ func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - _, err = o.db.ExecContext(ctx, query, filter.Name, o.chainID.String(), filter.Retention, concatBytes(filter.Addresses), concatBytes(filter.EventSigs)) + _, err = o.db.ExecContext(ctx, query, filter.Name, ubig.New(o.chainID), filter.Retention, concatBytes(filter.Addresses), concatBytes(filter.EventSigs)) return err } @@ -123,31 +129,20 @@ func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { // LoadFilters returns all filters for this chain func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { - rows := make([]Filter, 0) - query := `SELECT name, ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id = $1 GROUP BY name` - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() + var rows []Filter err := o.db.SelectContext(ctx, &rows, query, ubig.New(o.chainID)) - /* - err := q.Select(&rows, `SELECT name, - ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, - ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, - MAX(retention) AS retention - FROM evm.log_poller_filters WHERE evm_chain_id = $1 - GROUP BY name`, ubig.New(o.chainID)) - */ filters := make(map[string]Filter) for _, filter := range rows { filters[filter.Name] = filter } - return filters, err } @@ -155,7 +150,7 @@ func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPo var b LogPollerBlock ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash, ubig.New(o.chainID)); err != nil { + if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash.Bytes(), ubig.New(o.chainID)); err != nil { return nil, err } return &b, nil @@ -175,7 +170,7 @@ func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) var b LogPollerBlock ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, o.chainID.String()); err != nil { + if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { return nil, err } return &b, nil @@ -189,12 +184,12 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig AND address = $3 AND block_number <= %s ORDER BY (block_number, log_index) DESC LIMIT 1`, - nestedBlockNumberQuery(confs, o.chainID.String())) + nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var l Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &l, query, o.chainID.String(), eventSig, address); err != nil { + if err := o.db.GetContext(ctx, &l, query, ubig.New(o.chainID), eventSig.Bytes(), address); err != nil { return nil, err } return &l, nil @@ -213,33 +208,14 @@ func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error // fast and should not slow down the log readers. ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - - // TODO: Is Transact working?? Why are tests failing - performInsert := func(tx *sqlx.Tx) error { - args, err := newQueryArgs(o.chainID). - withStartBlock(start). - toArgs() - if err != nil { - o.lggr.Error("Cant build args for DeleteLogsAndBlocksAfter queries", "err", err) - return err - } - - _, err = tx.NamedExec(`DELETE FROM evm.log_poller_blocks WHERE block_number >= :start_block AND evm_chain_id = :evm_chain_id`, args) + return o.Transaction(ctx, func(orm *DbORM) error { + _, err := o.db.ExecContext(ctx, `DELETE FROM evm.logs WHERE block_number >= $1 AND evm_chain_id = $2`, start, ubig.New(o.chainID)) if err != nil { - o.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err) return err } - - _, err = tx.NamedExec(`DELETE FROM evm.logs WHERE block_number >= :start_block AND evm_chain_id = :evm_chain_id`, args) - if err != nil { - o.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err) - return err - } - return nil - } - return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { - return q.(*sqlx.Tx) - }, o.db, nil, performInsert) + _, err = o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number >= $1 AND evm_chain_id = $2`, start, ubig.New(o.chainID)) + return err + }) } type Exp struct { @@ -274,12 +250,9 @@ func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - performInsert := func(tx *sqlx.Tx) error { - return o.insertLogsWithinTx(ctx, logs, tx) - } - return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { - return q.(*sqlx.Tx) - }, o.db, nil, performInsert) + return o.Transaction(ctx, func(orm *DbORM) error { + return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + }) } func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { @@ -295,15 +268,22 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() // Block and logs goes with the same TX to ensure atomicity - performInsert := func(tx *sqlx.Tx) error { - if err := o.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { + return o.Transaction(ctx, func(orm *DbORM) error { + if err := o.insertBlockWithinTx(ctx, orm.db.(*sqlx.Tx), block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { return err } - return o.insertLogsWithinTx(ctx, logs, tx) - } - return sqlutil.Transact[*sqlx.Tx](ctx, func(q sqlutil.Queryer) *sqlx.Tx { - return q.(*sqlx.Tx) - }, o.db, nil, performInsert) + return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + }) +} + +func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { + query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES ($1, $2, $3, $4, $5, NOW()) + ON CONFLICT DO NOTHING` + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + _, err := tx.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) + return err } func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) error { @@ -354,7 +334,7 @@ func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([ WHERE evm_chain_id = $1 AND block_number >= $2 AND block_number <= $3 - ORDER BY (block_number, log_index, created_at)`, o.chainID.String(), start, end) + ORDER BY (block_number, log_index, created_at)`, ubig.New(o.chainID), start, end) if err != nil { return nil, err } @@ -373,7 +353,7 @@ func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common AND event_sig = $3 AND block_number >= $4 AND block_number <= $5 - ORDER BY (block_number, log_index)`, o.chainID.String(), address, eventSig, start, end) + ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, eventSig.Bytes(), start, end) if err != nil { return nil, err } @@ -390,12 +370,12 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre AND block_timestamp > $4 AND block_number <= %s ORDER BY (block_number, log_index)`, - nestedBlockNumberQuery(confs, o.chainID.String())) + nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, after); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), after); err != nil { return nil, err } return logs, nil @@ -412,7 +392,7 @@ func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, addres AND address = $2 AND event_sig = ANY($3) AND block_number BETWEEN $4 AND $5 - ORDER BY (block_number, log_index)`, o.chainID.String(), address, concatBytes(eventSigs), start, end) + ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, concatBytes(eventSigs), start, end) if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -428,7 +408,7 @@ func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]L WHERE block_number >= $1 AND block_number <= $2 AND evm_chain_id = $3 - ORDER BY block_number ASC`, start, end, o.chainID.String()) + ORDER BY block_number ASC`, start, end, ubig.New(o.chainID)) if err != nil { return nil, err } @@ -448,12 +428,12 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, from AND block_number <= %s GROUP BY event_sig, address ) - ORDER BY block_number ASC`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return nil, errors.Wrap(err, "failed to execute query") } return logs, nil @@ -467,29 +447,29 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, AND event_sig = ANY($2) AND address = ANY($3) AND block_number > $4 - AND block_number <= %s`, nestedBlockNumberQuery(confs, o.chainID.String())) + AND block_number <= %s`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var blockNumber int64 ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.GetContext(ctx, &blockNumber, query, o.chainID.String(), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { + if err := o.db.GetContext(ctx, &blockNumber, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return 0, err } return blockNumber, nil } func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { - query := fmt.Sprintf(`SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) >= $5 - AND substring(data from 32*$4+1 for 32) <= $6 - AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + query := fmt.Sprintf(`SELECT * FROM evm.logs + WHERE evm_chain_id = $1 + AND address = $2 + AND event_sig = $3 + AND substring(data from 32*$4+1 for 32) >= $5 + AND substring(data from 32*$4+1 for 32) <= $6 + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndex, wordValueMin, wordValueMax); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes(), wordValueMax.Bytes()); err != nil { return nil, err } return logs, nil @@ -503,11 +483,11 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address commo AND event_sig = $3 AND substring(data from 32*$4+1 for 32) >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndex, wordValueMin); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes()); err != nil { return nil, err } return logs, nil @@ -522,17 +502,22 @@ func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Ad AND substring(data from 32*$4+1 for 32) <= $5 AND substring(data from 32*$6+1 for 32) >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, wordIndexMin, wordValue, wordIndexMax); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndexMin, wordValue.Bytes(), wordIndexMax); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -540,17 +525,22 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address c AND event_sig = $3 AND topics[$4] >= $5 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, topicValueMin); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes()); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -559,17 +549,23 @@ func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common. AND topics[$4] >= $5 AND topics[$4] <= $6 AND block_number <= %s - ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, topicValueMin, topicValueMax); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes(), topicValueMax.Bytes()); err != nil { return nil, err } return logs, nil } func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -577,11 +573,12 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e AND event_sig = $3 AND topics[$4] = ANY($5) AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues)); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues)); err != nil { return nil, err } return logs, nil @@ -589,10 +586,16 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e // SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - err := o.db.SelectContext(ctx, &logs, ` + + err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 AND address = $2 @@ -601,7 +604,7 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in AND block_number >= $6 AND block_number <= $7 ORDER BY (block_number, log_index)`, - o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues), start, end) + ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues), start, end) if err != nil { return nil, err } @@ -609,6 +612,11 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in } func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -617,12 +625,12 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address commo AND topics[$4] = ANY($5) AND block_timestamp > $6 AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, o.chainID.String())) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, eventSig, topicIndex, concatBytes(topicValues), after); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues), after); err != nil { return nil, err } return logs, nil @@ -638,7 +646,7 @@ func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Ad AND address = $2 AND event_sig = $3 AND tx_hash = $4 - ORDER BY (block_number, log_index)`, o.chainID.String(), address, eventSig, txHash) + ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, eventSig.Bytes(), txHash.Bytes()) if err != nil { return nil, err } @@ -647,7 +655,12 @@ func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Ad // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { - nestedQuery := nestedBlockNumberQuery(confs, o.chainID.String()) + topicIndex, err := UseTopicIndex(topicIndex) + if err != nil { + return nil, err + } + + nestedQuery := nestedBlockNumberQuery(confs, ubig.New(o.chainID)) query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -666,21 +679,22 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, si AND b.block_number BETWEEN $5 AND $6 AND b.block_number <= %s ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) + var logs []Log ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err := o.db.SelectContext(ctx, &logs, query, o.chainID.String(), address, sigA, sigB, startBlock, endBlock, topicIndex); err != nil { + if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, sigA.Bytes(), sigB.Bytes(), startBlock, endBlock, topicIndex); err != nil { return nil, err } return logs, nil } -func nestedBlockNumberQuery(confs Confirmations, chainID string) string { +func nestedBlockNumberQuery(confs Confirmations, chainID *ubig.Big) string { if confs == Finalized { return fmt.Sprintf(` (SELECT finalized_block_number FROM evm.log_poller_blocks - WHERE evm_chain_id = %s + WHERE evm_chain_id = %v ORDER BY block_number DESC LIMIT 1) `, chainID) } // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number @@ -688,7 +702,28 @@ func nestedBlockNumberQuery(confs Confirmations, chainID string) string { return fmt.Sprintf(` (SELECT greatest(block_number - %d, 0) FROM evm.log_poller_blocks - WHERE evm_chain_id = %s + WHERE evm_chain_id = %v ORDER BY block_number DESC LIMIT 1) `, confs, chainID) } + +func UseTopicIndex(index int) (int, error) { + // Only topicIndex 1 through 3 is valid. 0 is the event sig and only 4 total topics are allowed + if !(index == 1 || index == 2 || index == 3) { + return 0, fmt.Errorf("invalid index for topic: %d", index) + } + // Add 1 since postgresql arrays are 1-indexed. + return index + 1, nil +} + +type bytesProducer interface { + Bytes() []byte +} + +func concatBytes[T bytesProducer](byteSlice []T) pq.ByteaArray { + var output [][]byte + for _, b := range byteSlice { + output = append(output, b.Bytes()) + } + return output +} diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 03145aaaf0a..d2edc72c8c3 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -633,28 +633,28 @@ func TestORM_DataWords(t *testing.T) { // Outside range should fail. lgs, err := o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(2), logpoller.EvmWord(2), 0) require.NoError(t, err) - assert.Equal(t, 0, len(lgs)) + require.Equal(t, 0, len(lgs)) // Range including log should succeed lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(2), 0) require.NoError(t, err) - assert.Equal(t, 1, len(lgs)) + require.Equal(t, 1, len(lgs)) // Range only covering log should succeed lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(1), 0) require.NoError(t, err) - assert.Equal(t, 1, len(lgs)) + require.Equal(t, 1, len(lgs)) // Cannot query for unconfirmed second log. lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) - assert.Equal(t, 0, len(lgs)) + require.Equal(t, 0, len(lgs)) // Confirm it, then can query. require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x2"), 2, time.Now(), 0)) lgs, err = o1.SelectLogsDataWordRange(ctx, addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) - assert.Equal(t, 1, len(lgs)) - assert.Equal(t, lgs[0].Data, append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...)) + require.Equal(t, 1, len(lgs)) + require.Equal(t, lgs[0].Data, append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...)) // Check greater than 1 yields both logs. lgs, err = o1.SelectLogsDataWordGreaterThan(ctx, addr, eventSig, 0, logpoller.EvmWord(1), 0) @@ -852,8 +852,10 @@ func BenchmarkLogs(b *testing.B) { require.NoError(b, o.InsertLogs(ctx, lgs)) b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := o.SelectLogsDataWordRange(ctx, addr, EmitterABI.Events["Log1"].ID, 0, logpoller.EvmWord(8000), logpoller.EvmWord(8002), 0) + lgs, err := o.SelectLogsDataWordRange(ctx, addr, EmitterABI.Events["Log1"].ID, 0, logpoller.EvmWord(8000), logpoller.EvmWord(8002), 0) require.NoError(b, err) + // TODO: Why is SelectLogsDataWordRange not returning any logs?! + fmt.Println("len logs:", len(lgs)) } } @@ -1393,7 +1395,6 @@ func TestInsertLogsWithBlock(t *testing.T) { logs, logsErr := o.SelectLogs(ctx, 0, math.MaxInt, address, event) block, blockErr := o.SelectLatestBlock(ctx) - fmt.Println("block: ", block.BlockNumber, "blockErr: ", blockErr) if tt.shouldRollback { assert.Error(t, insertError) diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go index a37b15b2b2d..d76d619ac8b 100644 --- a/core/chains/evm/logpoller/query.go +++ b/core/chains/evm/logpoller/query.go @@ -1,5 +1,7 @@ package logpoller +/* + import ( "errors" "fmt" @@ -142,3 +144,5 @@ func (q *queryArgs) toArgs() (map[string]interface{}, error) { } return q.args, nil } + +*/ diff --git a/core/chains/evm/logpoller/query_test.go b/core/chains/evm/logpoller/query_test.go index 70ace713228..1c9ce3037de 100644 --- a/core/chains/evm/logpoller/query_test.go +++ b/core/chains/evm/logpoller/query_test.go @@ -1,5 +1,6 @@ package logpoller +/* import ( "math/big" "testing" @@ -81,3 +82,5 @@ func newEmptyArgs() *queryArgs { err: []error{}, } } + +*/ diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index 56d1285c88e..111c198b7e0 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -91,7 +91,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { // handle same as non-existent chain id return nil } - return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr), pg.WithQueryer(tx)) + return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr)) } orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) From c970083bccbbde8d34a7c4464c89f7053426a228 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 10:40:49 -0500 Subject: [PATCH 09/65] Update logpoller orm --- core/chains/evm/logpoller/log_poller_test.go | 6 ++--- core/chains/evm/logpoller/orm.go | 27 +++++++++++++++++--- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index bbd21eb5af5..d5510657499 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1291,9 +1291,10 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { time.Sleep(100 * time.Millisecond) require.NoError(t, lp.Start(ctx)) require.Eventually(t, func() bool { - return observedLogs.Len() >= 5 + return observedLogs.Len() >= 3 }, 2*time.Second, 20*time.Millisecond) - lp.Close() + err = lp.Close() + require.NoError(t, err) logMsgs := make(map[string]int) for _, obs := range observedLogs.All() { @@ -1305,7 +1306,6 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { } } - assert.Contains(t, logMsgs, "SQL ERROR") assert.Contains(t, logMsgs, "Failed loading filters in main logpoller loop, retrying later") assert.Contains(t, logMsgs, "Error executing replay, could not get fromBlock") assert.Contains(t, logMsgs, "Backup log poller ran before filters loaded, skipping") diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 4709a828e29..ac5f7b7d172 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -209,12 +209,33 @@ func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() return o.Transaction(ctx, func(orm *DbORM) error { - _, err := o.db.ExecContext(ctx, `DELETE FROM evm.logs WHERE block_number >= $1 AND evm_chain_id = $2`, start, ubig.New(o.chainID)) + // Applying upper bound filter is critical for Postgres performance (especially for evm.logs table) + // because it allows the planner to properly estimate the number of rows to be scanned. + // If not applied, these queries can become very slow. After some critical number + // of logs, Postgres will try to scan all the logs in the index by block_number. + // Latency without upper bound filter can be orders of magnitude higher for large number of logs. + _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks + WHERE evm_chain_id = $1 + AND block_number >= $2 + AND block_number <= (SELECT MAX(block_number) + FROM evm.log_poller_blocks + WHERE evm_chain_id = $1)`, + ubig.New(o.chainID), start) if err != nil { + o.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err) return err } - _, err = o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number >= $1 AND evm_chain_id = $2`, start, ubig.New(o.chainID)) - return err + + _, err = o.db.ExecContext(ctx, `DELETE FROM evm.logs + WHERE evm_chain_id = $1 + AND block_number >= $2 + AND block_number <= (SELECT MAX(block_number) FROM evm.logs WHERE evm_chain_id = $1)`, + ubig.New(o.chainID), start) + if err != nil { + o.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err) + return err + } + return nil }) } From e90ee08e440e4cdb695a636a9ef15949df805b5b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 10:51:36 -0500 Subject: [PATCH 10/65] Use EventSig --- core/services/relay/evm/config_poller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go index 44e0dda0923..542e6a4c70a 100644 --- a/core/services/relay/evm/config_poller.go +++ b/core/services/relay/evm/config_poller.go @@ -124,7 +124,7 @@ func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, cp.aggregatorContractAddr, 1) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(cp.ld.EventSig(), cp.aggregatorContractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { if cp.isConfigStoreAvailable() { @@ -145,7 +145,7 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, cp.aggregatorContractAddr) + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), cp.ld.EventSig(), cp.aggregatorContractAddr) if err != nil { return ocrtypes.ContractConfig{}, err } From 90c4d8e1f3ebc8ad1d1aac152f1c02ddff3ddc62 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 12:20:38 -0500 Subject: [PATCH 11/65] update logpoller orm --- .../evm/forwarders/forwarder_manager_test.go | 4 +- core/chains/evm/logpoller/log_poller_test.go | 2 +- core/chains/evm/logpoller/mocks/log_poller.go | 485 +++++++----------- core/chains/evm/logpoller/orm.go | 3 +- core/chains/evm/txmgr/txmgr_test.go | 2 +- .../v21/logprovider/integration_test.go | 8 +- .../v21/logprovider/recoverer_test.go | 47 +- .../v21/registry_check_pipeline_test.go | 21 +- .../evmregistry/v21/registry_test.go | 15 +- .../promreporter/prom_reporter_test.go | 2 +- core/services/relay/evm/chain_reader_test.go | 2 +- core/services/relay/evm/config_poller_test.go | 3 +- .../relay/evm/functions/config_poller_test.go | 3 +- .../relay/evm/mercury/helpers_test.go | 3 +- .../vrf/v2/listener_v2_log_listener_test.go | 6 +- core/store/migrate/migrate_test.go | 21 +- 16 files changed, 239 insertions(+), 388 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index 5ef150aa5c3..903ac76322d 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -60,7 +60,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { t.Log(authorized) evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) @@ -113,7 +113,7 @@ func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { ec.Commit() evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index d5510657499..34c819b7ea7 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1209,7 +1209,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { // but context object is not ctx, cancel = context.WithCancel(testutils.Context(t)) cancel() - _, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + _, err = th.LogPoller.GetBlocksRange(ctx, blockNums) require.NoError(t, err) } diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go index 65d808b98d5..796057640d8 100644 --- a/core/chains/evm/logpoller/mocks/log_poller.go +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -11,8 +11,6 @@ import ( mock "github.com/stretchr/testify/mock" - pg "github.com/smartcontractkit/chainlink/v2/core/services/pg" - time "time" ) @@ -39,16 +37,9 @@ func (_m *LogPoller) Close() error { return r0 } -// GetBlocksRange provides a mock function with given fields: ctx, numbers, qopts -func (_m *LogPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, numbers) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// GetBlocksRange provides a mock function with given fields: ctx, numbers +func (_m *LogPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { + ret := _m.Called(ctx, numbers) if len(ret) == 0 { panic("no return value specified for GetBlocksRange") @@ -56,19 +47,19 @@ func (_m *LogPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts var r0 []logpoller.LogPollerBlock var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []uint64, ...pg.QOpt) ([]logpoller.LogPollerBlock, error)); ok { - return rf(ctx, numbers, qopts...) + if rf, ok := ret.Get(0).(func(context.Context, []uint64) ([]logpoller.LogPollerBlock, error)); ok { + return rf(ctx, numbers) } - if rf, ok := ret.Get(0).(func(context.Context, []uint64, ...pg.QOpt) []logpoller.LogPollerBlock); ok { - r0 = rf(ctx, numbers, qopts...) + if rf, ok := ret.Get(0).(func(context.Context, []uint64) []logpoller.LogPollerBlock); ok { + r0 = rf(ctx, numbers) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.LogPollerBlock) } } - if rf, ok := ret.Get(1).(func(context.Context, []uint64, ...pg.QOpt) error); ok { - r1 = rf(ctx, numbers, qopts...) + if rf, ok := ret.Get(1).(func(context.Context, []uint64) error); ok { + r1 = rf(ctx, numbers) } else { r1 = ret.Error(1) } @@ -114,16 +105,9 @@ func (_m *LogPoller) HealthReport() map[string]error { return r0 } -// IndexedLogs provides a mock function with given fields: eventSig, address, topicIndex, topicValues, confs, qopts -func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, topicIndex, topicValues, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogs provides a mock function with given fields: eventSig, address, topicIndex, topicValues, confs +func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, topicIndex, topicValues, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogs") @@ -131,19 +115,19 @@ func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, t var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValues, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValues, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValues, confs) } else { r1 = ret.Error(1) } @@ -151,16 +135,9 @@ func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, t return r0, r1 } -// IndexedLogsByBlockRange provides a mock function with given fields: start, end, eventSig, address, topicIndex, topicValues, qopts -func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, start, end, eventSig, address, topicIndex, topicValues) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsByBlockRange provides a mock function with given fields: start, end, eventSig, address, topicIndex, topicValues +func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]logpoller.Log, error) { + ret := _m.Called(start, end, eventSig, address, topicIndex, topicValues) if len(ret) == 0 { panic("no return value specified for IndexedLogsByBlockRange") @@ -168,19 +145,19 @@ func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig co var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSig, address, topicIndex, topicValues) } - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) []logpoller.Log); ok { + r0 = rf(start, end, eventSig, address, topicIndex, topicValues) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) error); ok { - r1 = rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) error); ok { + r1 = rf(start, end, eventSig, address, topicIndex, topicValues) } else { r1 = ret.Error(1) } @@ -188,16 +165,9 @@ func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig co return r0, r1 } -// IndexedLogsByTxHash provides a mock function with given fields: eventSig, address, txHash, qopts -func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, txHash) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsByTxHash provides a mock function with given fields: eventSig, address, txHash +func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, txHash) if len(ret) == 0 { panic("no return value specified for IndexedLogsByTxHash") @@ -205,19 +175,19 @@ func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Ad var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, txHash, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, txHash) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, txHash, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash) []logpoller.Log); ok { + r0 = rf(eventSig, address, txHash) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, txHash, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, common.Hash) error); ok { + r1 = rf(eventSig, address, txHash) } else { r1 = ret.Error(1) } @@ -225,16 +195,9 @@ func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Ad return r0, r1 } -// IndexedLogsCreatedAfter provides a mock function with given fields: eventSig, address, topicIndex, topicValues, after, confs, qopts -func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, topicIndex, topicValues, after, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsCreatedAfter provides a mock function with given fields: eventSig, address, topicIndex, topicValues, after, confs +func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, topicIndex, topicValues, after, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsCreatedAfter") @@ -242,19 +205,19 @@ func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address commo var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValues, after, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValues, after, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValues, after, confs) } else { r1 = ret.Error(1) } @@ -262,16 +225,9 @@ func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address commo return r0, r1 } -// IndexedLogsTopicGreaterThan provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, confs, qopts -func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, topicIndex, topicValueMin, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsTopicGreaterThan provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, confs +func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, topicIndex, topicValueMin, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsTopicGreaterThan") @@ -279,19 +235,19 @@ func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValueMin, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValueMin, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValueMin, confs) } else { r1 = ret.Error(1) } @@ -299,16 +255,9 @@ func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c return r0, r1 } -// IndexedLogsTopicRange provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts -func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsTopicRange provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, topicValueMax, confs +func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsTopicRange") @@ -316,19 +265,19 @@ func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common. var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } else { r1 = ret.Error(1) } @@ -336,16 +285,9 @@ func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common. return r0, r1 } -// IndexedLogsWithSigsExcluding provides a mock function with given fields: address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts -func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA common.Hash, eventSigB common.Hash, topicIndex int, fromBlock int64, toBlock int64, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// IndexedLogsWithSigsExcluding provides a mock function with given fields: address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs +func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA common.Hash, eventSigB common.Hash, topicIndex int, fromBlock int64, toBlock int64, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsWithSigsExcluding") @@ -353,19 +295,19 @@ func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventS var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } - if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) error); ok { + r1 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } else { r1 = ret.Error(1) } @@ -373,15 +315,9 @@ func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventS return r0, r1 } -// LatestBlock provides a mock function with given fields: qopts -func (_m *LogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LatestBlock provides a mock function with given fields: ctx +func (_m *LogPoller) LatestBlock(ctx context.Context) (logpoller.LogPollerBlock, error) { + ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for LatestBlock") @@ -389,17 +325,17 @@ func (_m *LogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, er var r0 logpoller.LogPollerBlock var r1 error - if rf, ok := ret.Get(0).(func(...pg.QOpt) (logpoller.LogPollerBlock, error)); ok { - return rf(qopts...) + if rf, ok := ret.Get(0).(func(context.Context) (logpoller.LogPollerBlock, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(...pg.QOpt) logpoller.LogPollerBlock); ok { - r0 = rf(qopts...) + if rf, ok := ret.Get(0).(func(context.Context) logpoller.LogPollerBlock); ok { + r0 = rf(ctx) } else { r0 = ret.Get(0).(logpoller.LogPollerBlock) } - if rf, ok := ret.Get(1).(func(...pg.QOpt) error); ok { - r1 = rf(qopts...) + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) } else { r1 = ret.Error(1) } @@ -407,16 +343,9 @@ func (_m *LogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, er return r0, r1 } -// LatestBlockByEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs, qopts -func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) (int64, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, fromBlock, eventSigs, addresses, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LatestBlockByEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs +func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) (int64, error) { + ret := _m.Called(fromBlock, eventSigs, addresses, confs) if len(ret) == 0 { panic("no return value specified for LatestBlockByEventSigsAddrsWithConfs") @@ -424,17 +353,17 @@ func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event var r0 int64 var r1 error - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) (int64, error)); ok { - return rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) (int64, error)); ok { + return rf(fromBlock, eventSigs, addresses, confs) } - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) int64); ok { - r0 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) int64); ok { + r0 = rf(fromBlock, eventSigs, addresses, confs) } else { r0 = ret.Get(0).(int64) } - if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { + r1 = rf(fromBlock, eventSigs, addresses, confs) } else { r1 = ret.Error(1) } @@ -442,16 +371,9 @@ func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event return r0, r1 } -// LatestLogByEventSigWithConfs provides a mock function with given fields: eventSig, address, confs, qopts -func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) (*logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LatestLogByEventSigWithConfs provides a mock function with given fields: eventSig, address, confs +func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs logpoller.Confirmations) (*logpoller.Log, error) { + ret := _m.Called(eventSig, address, confs) if len(ret) == 0 { panic("no return value specified for LatestLogByEventSigWithConfs") @@ -459,19 +381,19 @@ func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address var r0 *logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) (*logpoller.Log, error)); ok { - return rf(eventSig, address, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations) (*logpoller.Log, error)); ok { + return rf(eventSig, address, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) *logpoller.Log); ok { - r0 = rf(eventSig, address, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations) *logpoller.Log); ok { + r0 = rf(eventSig, address, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, confs) } else { r1 = ret.Error(1) } @@ -479,16 +401,9 @@ func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address return r0, r1 } -// LatestLogEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs, qopts -func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, fromBlock, eventSigs, addresses, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LatestLogEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs +func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(fromBlock, eventSigs, addresses, confs) if len(ret) == 0 { panic("no return value specified for LatestLogEventSigsAddrsWithConfs") @@ -496,19 +411,19 @@ func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(fromBlock, eventSigs, addresses, confs) } - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(fromBlock, eventSigs, addresses, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { + r1 = rf(fromBlock, eventSigs, addresses, confs) } else { r1 = ret.Error(1) } @@ -516,16 +431,9 @@ func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs return r0, r1 } -// Logs provides a mock function with given fields: start, end, eventSig, address, qopts -func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, start, end, eventSig, address) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// Logs provides a mock function with given fields: start, end, eventSig, address +func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address common.Address) ([]logpoller.Log, error) { + ret := _m.Called(start, end, eventSig, address) if len(ret) == 0 { panic("no return value specified for Logs") @@ -533,19 +441,19 @@ func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSig, address, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSig, address) } - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(start, end, eventSig, address, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address) []logpoller.Log); ok { + r0 = rf(start, end, eventSig, address) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) error); ok { - r1 = rf(start, end, eventSig, address, qopts...) + if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address) error); ok { + r1 = rf(start, end, eventSig, address) } else { r1 = ret.Error(1) } @@ -553,16 +461,9 @@ func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address return r0, r1 } -// LogsCreatedAfter provides a mock function with given fields: eventSig, address, _a2, confs, qopts -func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, _a2 time.Time, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, _a2, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LogsCreatedAfter provides a mock function with given fields: eventSig, address, _a2, confs +func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, _a2 time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, _a2, confs) if len(ret) == 0 { panic("no return value specified for LogsCreatedAfter") @@ -570,19 +471,19 @@ func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Addre var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, _a2, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, _a2, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, _a2, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, _a2, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, _a2, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, _a2, confs) } else { r1 = ret.Error(1) } @@ -590,16 +491,9 @@ func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Addre return r0, r1 } -// LogsDataWordBetween provides a mock function with given fields: eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts -func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LogsDataWordBetween provides a mock function with given fields: eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs +func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordBetween") @@ -607,19 +501,19 @@ func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Ad var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } else { r1 = ret.Error(1) } @@ -627,16 +521,9 @@ func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Ad return r0, r1 } -// LogsDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, confs, qopts -func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LogsDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, confs +func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, wordIndex, wordValueMin, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordGreaterThan") @@ -644,19 +531,19 @@ func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address commo var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndex, wordValueMin, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndex, wordValueMin, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, wordIndex, wordValueMin, confs) } else { r1 = ret.Error(1) } @@ -664,16 +551,9 @@ func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address commo return r0, r1 } -// LogsDataWordRange provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts -func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, wordValueMax common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LogsDataWordRange provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, wordValueMax, confs +func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, wordValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordRange") @@ -681,19 +561,19 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } else { r1 = ret.Error(1) } @@ -701,16 +581,9 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr return r0, r1 } -// LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address, qopts -func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, start, end, eventSigs, address) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address +func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + ret := _m.Called(start, end, eventSigs, address) if len(ret) == 0 { panic("no return value specified for LogsWithSigs") @@ -718,19 +591,19 @@ func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Has var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSigs, address, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSigs, address) } - if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(start, end, eventSigs, address, qopts...) + if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address) []logpoller.Log); ok { + r0 = rf(start, end, eventSigs, address) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) error); ok { - r1 = rf(start, end, eventSigs, address, qopts...) + if rf, ok := ret.Get(1).(func(int64, int64, []common.Hash, common.Address) error); ok { + r1 = rf(start, end, eventSigs, address) } else { r1 = ret.Error(1) } @@ -774,24 +647,17 @@ func (_m *LogPoller) Ready() error { return r0 } -// RegisterFilter provides a mock function with given fields: filter, qopts -func (_m *LogPoller) RegisterFilter(filter logpoller.Filter, qopts ...pg.QOpt) error { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, filter) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// RegisterFilter provides a mock function with given fields: filter +func (_m *LogPoller) RegisterFilter(filter logpoller.Filter) error { + ret := _m.Called(filter) if len(ret) == 0 { panic("no return value specified for RegisterFilter") } var r0 error - if rf, ok := ret.Get(0).(func(logpoller.Filter, ...pg.QOpt) error); ok { - r0 = rf(filter, qopts...) + if rf, ok := ret.Get(0).(func(logpoller.Filter) error); ok { + r0 = rf(filter) } else { r0 = ret.Error(0) } @@ -840,24 +706,17 @@ func (_m *LogPoller) Start(_a0 context.Context) error { return r0 } -// UnregisterFilter provides a mock function with given fields: name, qopts -func (_m *LogPoller) UnregisterFilter(name string, qopts ...pg.QOpt) error { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, name) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) +// UnregisterFilter provides a mock function with given fields: name +func (_m *LogPoller) UnregisterFilter(name string) error { + ret := _m.Called(name) if len(ret) == 0 { panic("no return value specified for UnregisterFilter") } var r0 error - if rf, ok := ret.Get(0).(func(string, ...pg.QOpt) error); ok { - r0 = rf(name, qopts...) + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(name) } else { r0 = ret.Error(0) } diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 17b8394bc26..d7efdf11442 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -4,10 +4,11 @@ import ( "context" "database/sql" "fmt" - "github.com/lib/pq" "math/big" "time" + "github.com/lib/pq" + "github.com/ethereum/go-ethereum/common" "github.com/jmoiron/sqlx" "github.com/pkg/errors" diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 0e28f2948ee..94fea8432fa 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -50,7 +50,7 @@ import ( func makeTestEvmTxm( t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth) (txmgr.TxManager, error) { lggr := logger.Test(t) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) // logic for building components (from evm/evm_txm.go) ------- lggr.Infow("Initializing EVM transaction manager", diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 5ef06f1bd08..355ec529b8e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -30,12 +30,10 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" evmregistry21 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func TestIntegration_LogEventProvider(t *testing.T) { @@ -315,7 +313,7 @@ func TestIntegration_LogEventProvider_RateLimit(t *testing.T) { { // total block history at this point should be 566 var minimumBlockCount int64 = 500 - latestBlock, _ := lp.LatestBlock() + latestBlock, _ := lp.LatestBlock(ctx) assert.GreaterOrEqual(t, latestBlock.BlockNumber, minimumBlockCount, "to ensure the integrety of the test, the minimum block count before the test should be %d but got %d", minimumBlockCount, latestBlock) } @@ -562,7 +560,7 @@ func waitLogPoller(ctx context.Context, t *testing.T, backend *backends.Simulate require.NoError(t, err) latestBlock := b.Number().Int64() for { - latestPolled, lberr := lp.LatestBlock(pg.WithParentCtx(ctx)) + latestPolled, lberr := lp.LatestBlock(ctx) require.NoError(t, lberr) if latestPolled.BlockNumber >= latestBlock { break @@ -660,7 +658,7 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac ethClient := evmclient.NewSimulatedBackendClient(t, backend, big.NewInt(1337)) pollerLggr := logger.TestLogger(t) pollerLggr.SetLogLevel(zapcore.WarnLevel) - lorm := logpoller.NewORM(big.NewInt(1337), db, pollerLggr, pgtest.NewQConfig(false)) + lorm := logpoller.NewORM(big.NewInt(1337), db, pollerLggr) lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, false, 1, 2, 2, 1000) return lp, ethClient } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go index eadd0446da8..b07698d3172 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -28,7 +28,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/mocks" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func TestLogRecoverer_GetRecoverables(t *testing.T) { @@ -607,7 +606,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 0, errors.New("latest block boom") }, }, @@ -630,7 +629,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 100, nil }, }, @@ -658,7 +657,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 100, nil }, }, @@ -686,7 +685,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 100, nil }, }, @@ -716,7 +715,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 100, nil }, }, @@ -747,7 +746,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 100, nil }, }, @@ -778,7 +777,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, }, @@ -813,7 +812,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, }, @@ -853,7 +852,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, }, @@ -885,10 +884,10 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return nil, errors.New("logs with sigs boom") }, }, @@ -920,10 +919,10 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { }, }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 80, @@ -968,10 +967,10 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { WorkID: "7f775793422d178c90e99c3bbdf05181bc6bb6ce13170e87c92ac396bb7ddda0", }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 80, @@ -1019,10 +1018,10 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { WorkID: "7f775793422d178c90e99c3bbdf05181bc6bb6ce13170e87c92ac396bb7ddda0", }, logPoller: &mockLogPoller{ - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { EvmChainId: ubig.New(big.NewInt(1)), @@ -1200,15 +1199,15 @@ func (s *mockFilterStore) Has(id *big.Int) bool { type mockLogPoller struct { logpoller.LogPoller - LatestBlockFn func(qopts ...pg.QOpt) (int64, error) - LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) + LatestBlockFn func(ctx context.Context) (int64, error) + LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) } -func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { - return p.LogsWithSigsFn(start, end, eventSigs, address, qopts...) +func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return p.LogsWithSigsFn(start, end, eventSigs, address) } -func (p *mockLogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { - block, err := p.LatestBlockFn(qopts...) +func (p *mockLogPoller) LatestBlock(ctx context.Context) (logpoller.LogPollerBlock, error) { + block, err := p.LatestBlockFn(ctx) return logpoller.LogPollerBlock{BlockNumber: block}, err } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go index e6b61be8d0a..54e90ddc0b4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go @@ -32,7 +32,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func TestRegistry_GetBlockAndUpkeepId(t *testing.T) { @@ -109,7 +108,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { WorkID: "work", }, poller: &mockLogPoller{ - GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { return []logpoller.LogPollerBlock{ { BlockHash: common.HexToHash("abcdef"), @@ -133,7 +132,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { WorkID: "work", }, poller: &mockLogPoller{ - GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { return []logpoller.LogPollerBlock{ { BlockHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), @@ -157,7 +156,7 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { WorkID: "work", }, poller: &mockLogPoller{ - GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { return []logpoller.LogPollerBlock{ { BlockHash: common.HexToHash("0xcba5cf9e2bb32373c76015384e1098912d9510a72481c78057fcb088209167de"), @@ -215,16 +214,16 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { type mockLogPoller struct { logpoller.LogPoller - GetBlocksRangeFn func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) - IndexedLogsFn func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) + GetBlocksRangeFn func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) + IndexedLogsFn func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) } -func (p *mockLogPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { - return p.GetBlocksRangeFn(ctx, numbers, qopts...) +func (p *mockLogPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { + return p.GetBlocksRangeFn(ctx, numbers) } -func (p *mockLogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { - return p.IndexedLogsFn(eventSig, address, topicIndex, topicValues, confs, qopts...) +func (p *mockLogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + return p.IndexedLogsFn(eventSig, address, topicIndex, topicValues, confs) } func TestRegistry_VerifyLogExists(t *testing.T) { @@ -486,7 +485,7 @@ func TestRegistry_CheckUpkeeps(t *testing.T) { }, receipts: map[string]*types.Receipt{}, poller: &mockLogPoller{ - GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { return []logpoller.LogPollerBlock{ { BlockHash: common.HexToHash("0xcba5cf9e2bb32373c76015384e1098912d9510a72481c78057fcb088209167de"), diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go index dc48c3d75f6..263dcbc67c1 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -27,7 +27,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func TestPollLogs(t *testing.T) { @@ -221,7 +220,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic()) { return nil, errors.New("indexed logs boom") } @@ -246,7 +245,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic()) { return nil, errors.New("indexed logs boom") } @@ -271,7 +270,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ {}, }, nil @@ -303,7 +302,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 1, @@ -357,7 +356,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, @@ -409,7 +408,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, @@ -463,7 +462,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, diff --git a/core/services/promreporter/prom_reporter_test.go b/core/services/promreporter/prom_reporter_test.go index 60d6d9388fa..a64c52aceaa 100644 --- a/core/services/promreporter/prom_reporter_test.go +++ b/core/services/promreporter/prom_reporter_test.go @@ -38,7 +38,7 @@ func newLegacyChainContainer(t *testing.T, db *sqlx.DB) legacyevm.LegacyChainCon ethClient := evmtest.NewEthClientMockWithDefaultChain(t) estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator()) lggr := logger.TestLogger(t) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) txm, err := txmgr.NewTxm( db, diff --git a/core/services/relay/evm/chain_reader_test.go b/core/services/relay/evm/chain_reader_test.go index 02e9d4e3f6a..aaa636051ed 100644 --- a/core/services/relay/evm/chain_reader_test.go +++ b/core/services/relay/evm/chain_reader_test.go @@ -257,7 +257,7 @@ func (it *chainReaderInterfaceTester) GetChainReader(t *testing.T) clcommontypes lggr := logger.NullLogger db := pgtest.NewSqlxDB(t) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), it.chain.Client(), lggr, time.Millisecond, false, 0, 1, 1, 10000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), it.chain.Client(), lggr, time.Millisecond, false, 0, 1, 1, 10000) require.NoError(t, lp.Start(ctx)) it.chain.On("LogPoller").Return(lp) cr, err := evm.NewChainReaderService(lggr, lp, it.chain, it.chainConfig) diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index cd66e5479bf..9caf09612e6 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -86,9 +86,8 @@ func TestConfigPoller(t *testing.T) { b.Commit() db := pgtest.NewSqlxDB(t) - cfg := pgtest.NewQConfig(false) ethClient = evmclient.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) - lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr, cfg) + lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr) lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) servicetest.Run(t, lp) } diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index 2cf373d2e86..ef49f7bec42 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -76,11 +76,10 @@ func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDig b.Commit() db := pgtest.NewSqlxDB(t) defer db.Close() - cfg := pgtest.NewQConfig(false) ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) defer ethClient.Close() lggr := logger.TestLogger(t) - lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) + lorm := logpoller.NewORM(big.NewInt(1337), db, lggr) lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) servicetest.Run(t, lp) configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go index f1686ee00c8..86645b78f6b 100644 --- a/core/services/relay/evm/mercury/helpers_test.go +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -163,10 +163,9 @@ func SetupTH(t *testing.T, feedID common.Hash) TestHarness { b.Commit() db := pgtest.NewSqlxDB(t) - cfg := pgtest.NewQConfig(false) ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) lggr := logger.TestLogger(t) - lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) + lorm := logpoller.NewORM(big.NewInt(1337), db, lggr) lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) servicetest.Run(t, lp) diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go index 6f5177c230a..c21af8db14c 100644 --- a/core/services/vrf/v2/listener_v2_log_listener_test.go +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -30,7 +30,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon" "github.com/smartcontractkit/chainlink/v2/core/testdata/testspecs" "github.com/smartcontractkit/chainlink/v2/core/utils" @@ -68,7 +67,7 @@ func setupVRFLogPollerListenerTH(t *testing.T, chainID := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) - o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + o := logpoller.NewORM(chainID, db, lggr) owner := testutils.MustNewSimTransactor(t) ethDB := rawdb.NewMemoryDatabase() ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ @@ -211,8 +210,7 @@ func TestInitProcessedBlock_NoVRFReqs(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // Should return logs from block 5 to 7 (inclusive) - logs, err := th.LogPoller.Logs(4, 7, emitterABI.Events["Log1"].ID, th.EmitterAddress, - pg.WithParentCtx(testutils.Context(t))) + logs, err := th.LogPoller.Logs(4, 7, emitterABI.Events["Log1"].ID, th.EmitterAddress) require.NoError(t, err) require.Equal(t, 3, len(logs)) diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index 56d1fe41eb5..70bc651fa0a 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -445,21 +445,22 @@ func TestSetMigrationENVVars(t *testing.T) { func TestDatabaseBackFillWithMigration202(t *testing.T) { _, db := heavyweight.FullTestDBEmptyV2(t, nil) + ctx := testutils.Context(t) err := goose.UpTo(db.DB, migrationDir, 201) require.NoError(t, err) - simulatedOrm := logpoller.NewORM(testutils.SimulatedChainID, db, logger.TestLogger(t), pgtest.NewQConfig(true)) - require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 10, time.Now(), 0), err) - require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 51, time.Now(), 0), err) - require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 90, time.Now(), 0), err) - require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 120, time.Now(), 23), err) + simulatedOrm := logpoller.NewORM(testutils.SimulatedChainID, db, logger.TestLogger(t)) + require.NoError(t, simulatedOrm.InsertBlock(ctx, testutils.Random32Byte(), 10, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(ctx, testutils.Random32Byte(), 51, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(ctx, testutils.Random32Byte(), 90, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(ctx, testutils.Random32Byte(), 120, time.Now(), 23), err) - baseOrm := logpoller.NewORM(big.NewInt(int64(84531)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) - require.NoError(t, baseOrm.InsertBlock(testutils.Random32Byte(), 400, time.Now(), 0), err) + baseOrm := logpoller.NewORM(big.NewInt(int64(84531)), db, logger.TestLogger(t)) + require.NoError(t, baseOrm.InsertBlock(ctx, testutils.Random32Byte(), 400, time.Now(), 0), err) - klaytnOrm := logpoller.NewORM(big.NewInt(int64(1001)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) - require.NoError(t, klaytnOrm.InsertBlock(testutils.Random32Byte(), 100, time.Now(), 0), err) + klaytnOrm := logpoller.NewORM(big.NewInt(int64(1001)), db, logger.TestLogger(t)) + require.NoError(t, klaytnOrm.InsertBlock(ctx, testutils.Random32Byte(), 100, time.Now(), 0), err) err = goose.UpTo(db.DB, migrationDir, 202) require.NoError(t, err) @@ -509,7 +510,7 @@ func TestDatabaseBackFillWithMigration202(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - block, err := tt.orm.SelectBlockByNumber(tt.blockNumber) + block, err := tt.orm.SelectBlockByNumber(ctx, tt.blockNumber) require.NoError(t, err) require.Equal(t, tt.expectedFinalizedBlock, block.FinalizedBlockNumber) }) From 3598fb920de05b4e029f29c339b5468a94faf39b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 13:00:04 -0500 Subject: [PATCH 12/65] Update orm.go --- core/chains/evm/headtracker/orm.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index dcd949b3425..218789b02d5 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -82,9 +82,7 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) head = new(evmtypes.Head) ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() - if err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID); err != nil { - return nil, err - } + err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) if errors.Is(err, sql.ErrNoRows) { return nil, nil } From 3794e2f271e2498d584ed8665e6a5fb9f68983e3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 13:30:33 -0500 Subject: [PATCH 13/65] Update logpoller_wrapper_test.go --- core/services/relay/evm/functions/logpoller_wrapper_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go index 9df285b4c25..c8c41bf4d4b 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper_test.go +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -86,7 +86,7 @@ func getMockedRequestLog(t *testing.T) logpoller.Log { func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { t.Parallel() lp, lpWrapper, client := setUp(t, 100_000) // check only once - lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) @@ -106,7 +106,7 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) { t.Parallel() lp, lpWrapper, client := setUp(t, 100_000) // check only once - lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "00"), nil) @@ -118,7 +118,7 @@ func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) { func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { t.Parallel() lp, lpWrapper, client := setUp(t, 100_000) - lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) lp.On("RegisterFilter", mock.Anything).Return(nil) subscriber := newSubscriber(1) From 99eb21cf095d26669bc695c4a05369a3c47e0f9a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 13:50:26 -0500 Subject: [PATCH 14/65] Update log_poller_test.go --- core/chains/evm/logpoller/log_poller_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 34c819b7ea7..b3d89d7ca61 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1205,12 +1205,11 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { require.Error(t, err) assert.Contains(t, err.Error(), "context canceled") - // test still works when qopts is cancelled - // but context object is not + // test canceled ctx ctx, cancel = context.WithCancel(testutils.Context(t)) cancel() _, err = th.LogPoller.GetBlocksRange(ctx, blockNums) - require.NoError(t, err) + require.Equal(t, err, context.Canceled) } func TestGetReplayFromBlock(t *testing.T) { From 0d830ec97a9a381b01204d18b6e57cd2bc79d0f8 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 15 Feb 2024 15:08:26 -0500 Subject: [PATCH 15/65] Remove query --- core/chains/evm/logpoller/query.go | 148 ------------------------ core/chains/evm/logpoller/query_test.go | 86 -------------- 2 files changed, 234 deletions(-) delete mode 100644 core/chains/evm/logpoller/query.go delete mode 100644 core/chains/evm/logpoller/query_test.go diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go deleted file mode 100644 index d76d619ac8b..00000000000 --- a/core/chains/evm/logpoller/query.go +++ /dev/null @@ -1,148 +0,0 @@ -package logpoller - -/* - -import ( - "errors" - "fmt" - "math/big" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/lib/pq" - - ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" -) - -type bytesProducer interface { - Bytes() []byte -} - -func concatBytes[T bytesProducer](byteSlice []T) pq.ByteaArray { - var output [][]byte - for _, b := range byteSlice { - output = append(output, b.Bytes()) - } - return output -} - -// queryArgs is a helper for building the arguments to a postgres query created by DbORM -// Besides the convenience methods, it also keeps track of arguments validation and sanitization. -type queryArgs struct { - args map[string]interface{} - err []error -} - -func newQueryArgs(chainId *big.Int) *queryArgs { - return &queryArgs{ - args: map[string]interface{}{ - "evm_chain_id": ubig.New(chainId), - }, - err: []error{}, - } -} - -func newQueryArgsForEvent(chainId *big.Int, address common.Address, eventSig common.Hash) *queryArgs { - return newQueryArgs(chainId). - withAddress(address). - withEventSig(eventSig) -} - -func (q *queryArgs) withEventSig(eventSig common.Hash) *queryArgs { - return q.withCustomHashArg("event_sig", eventSig) -} - -func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { - return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) -} - -func (q *queryArgs) withAddress(address common.Address) *queryArgs { - return q.withCustomArg("address", address) -} - -func (q *queryArgs) withAddressArray(addresses []common.Address) *queryArgs { - return q.withCustomArg("address_array", concatBytes(addresses)) -} - -func (q *queryArgs) withStartBlock(startBlock int64) *queryArgs { - return q.withCustomArg("start_block", startBlock) -} - -func (q *queryArgs) withEndBlock(endBlock int64) *queryArgs { - return q.withCustomArg("end_block", endBlock) -} - -func (q *queryArgs) withWordIndex(wordIndex int) *queryArgs { - return q.withCustomArg("word_index", wordIndex) -} - -func (q *queryArgs) withWordValueMin(wordValueMin common.Hash) *queryArgs { - return q.withCustomHashArg("word_value_min", wordValueMin) -} - -func (q *queryArgs) withWordValueMax(wordValueMax common.Hash) *queryArgs { - return q.withCustomHashArg("word_value_max", wordValueMax) -} - -func (q *queryArgs) withWordIndexMin(wordIndex int) *queryArgs { - return q.withCustomArg("word_index_min", wordIndex) -} - -func (q *queryArgs) withWordIndexMax(wordIndex int) *queryArgs { - return q.withCustomArg("word_index_max", wordIndex) -} - -func (q *queryArgs) withWordValue(wordValue common.Hash) *queryArgs { - return q.withCustomHashArg("word_value", wordValue) -} - -func (q *queryArgs) withConfs(confs Confirmations) *queryArgs { - return q.withCustomArg("confs", confs) -} - -func (q *queryArgs) withTopicIndex(index int) *queryArgs { - // Only topicIndex 1 through 3 is valid. 0 is the event sig and only 4 total topics are allowed - if !(index == 1 || index == 2 || index == 3) { - q.err = append(q.err, fmt.Errorf("invalid index for topic: %d", index)) - } - // Add 1 since postgresql arrays are 1-indexed. - return q.withCustomArg("topic_index", index+1) -} - -func (q *queryArgs) withTopicValueMin(valueMin common.Hash) *queryArgs { - return q.withCustomHashArg("topic_value_min", valueMin) -} - -func (q *queryArgs) withTopicValueMax(valueMax common.Hash) *queryArgs { - return q.withCustomHashArg("topic_value_max", valueMax) -} - -func (q *queryArgs) withTopicValues(values []common.Hash) *queryArgs { - return q.withCustomArg("topic_values", concatBytes(values)) -} - -func (q *queryArgs) withBlockTimestampAfter(after time.Time) *queryArgs { - return q.withCustomArg("block_timestamp_after", after) -} - -func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { - return q.withCustomHashArg("tx_hash", hash) -} - -func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { - return q.withCustomArg(name, arg.Bytes()) -} - -func (q *queryArgs) withCustomArg(name string, arg any) *queryArgs { - q.args[name] = arg - return q -} - -func (q *queryArgs) toArgs() (map[string]interface{}, error) { - if len(q.err) > 0 { - return nil, errors.Join(q.err...) - } - return q.args, nil -} - -*/ diff --git a/core/chains/evm/logpoller/query_test.go b/core/chains/evm/logpoller/query_test.go deleted file mode 100644 index 1c9ce3037de..00000000000 --- a/core/chains/evm/logpoller/query_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package logpoller - -/* -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/lib/pq" - "github.com/stretchr/testify/require" - - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" - ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" -) - -func Test_QueryArgs(t *testing.T) { - tests := []struct { - name string - queryArgs *queryArgs - want map[string]interface{} - wantErr bool - }{ - { - name: "valid arguments", - queryArgs: newQueryArgs(big.NewInt(20)).withAddress(utils.ZeroAddress), - want: map[string]interface{}{ - "evm_chain_id": ubig.NewI(20), - "address": utils.ZeroAddress, - }, - }, - { - name: "invalid topic index", - queryArgs: newQueryArgs(big.NewInt(20)).withTopicIndex(0), - wantErr: true, - }, - { - name: "custom argument", - queryArgs: newEmptyArgs().withCustomArg("arg", "value"), - want: map[string]interface{}{ - "arg": "value", - }, - }, - { - name: "hash converted to bytes", - queryArgs: newEmptyArgs().withCustomHashArg("hash", common.Hash{}), - want: map[string]interface{}{ - "hash": make([]byte, 32), - }, - }, - { - name: "hash array converted to bytes array", - queryArgs: newEmptyArgs().withEventSigArray([]common.Hash{{}, {}}), - want: map[string]interface{}{ - "event_sig_array": pq.ByteaArray{make([]byte, 32), make([]byte, 32)}, - }, - }, - { - name: "topic index incremented", - queryArgs: newEmptyArgs().withTopicIndex(2), - want: map[string]interface{}{ - "topic_index": 3, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - args, err := tt.queryArgs.toArgs() - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.want, args) - } - }) - } -} - -func newEmptyArgs() *queryArgs { - return &queryArgs{ - args: map[string]interface{}{}, - err: []error{}, - } -} - -*/ From 759a2fc136b3e820f04b185bb06b075ec23940bd Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 13:07:53 -0500 Subject: [PATCH 16/65] Remove ORM timeouts --- core/chains/evm/headtracker/orm.go | 19 +------- core/chains/evm/logpoller/orm.go | 69 ------------------------------ 2 files changed, 1 insertion(+), 87 deletions(-) diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 218789b02d5..7991ebecb73 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -3,11 +3,9 @@ package headtracker import ( "context" "database/sql" - "math/big" - "time" - "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "math/big" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" @@ -30,9 +28,6 @@ type ORM interface { var _ ORM = &orm{} -// TODO: Set a reasonable timeout -const defaultTimeout = 100 * time.Millisecond - type orm struct { chainID ubig.Big db sqlutil.Queryer @@ -52,17 +47,11 @@ func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) e INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, l1_block_number, evm_chain_id, base_fee_per_gas) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (evm_chain_id, hash) DO NOTHING` - - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err := orm.db.ExecContext(ctx, query, head.Hash, head.Number, head.ParentHash, head.CreatedAt, head.Timestamp, head.L1BlockNumber, orm.chainID, head.BaseFeePerGas) - return errors.Wrap(err, "IdempotentInsertHead failed to insert head") } func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err = orm.db.ExecContext(ctx, ` DELETE FROM evm.heads WHERE evm_chain_id = $1 AND number < ( @@ -80,8 +69,6 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) if errors.Is(err, sql.ErrNoRows) { return nil, nil @@ -91,8 +78,6 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) } func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) err = errors.Wrap(err, "LatestHeads failed") return @@ -100,8 +85,6 @@ func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes. func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) if errors.Is(err, sql.ErrNoRows) { return nil, nil diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index d7efdf11442..2c79e8ed326 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -19,9 +19,6 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) -// TODO: Set a reasonable timeout -const defaultTimeout = 10 * time.Second - // ORM represents the persistent data access layer used by the log poller. At this moment, it's a bit leaky abstraction, because // it exposes some of the database implementation details (e.g. pg.Q). Ideally it should be agnostic and could be applied to any persistence layer. // What is more, LogPoller should not be aware of the underlying database implementation and delegate all the queries to the ORM. @@ -89,8 +86,6 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err := o.db.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) return err } @@ -112,16 +107,12 @@ func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { ON CONFLICT (name, evm_chain_id, address, event) DO UPDATE SET retention=$3 ::BIGINT` - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err = o.db.ExecContext(ctx, query, filter.Name, ubig.New(o.chainID), filter.Retention, concatBytes(filter.Addresses), concatBytes(filter.EventSigs)) return err } // DeleteFilter removes all events,address pairs associated with the Filter func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) @@ -137,8 +128,6 @@ func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id = $1 GROUP BY name` - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() var rows []Filter err := o.db.SelectContext(ctx, &rows, query, ubig.New(o.chainID)) filters := make(map[string]Filter) @@ -150,8 +139,6 @@ func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { var b LogPollerBlock - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash.Bytes(), ubig.New(o.chainID)); err != nil { return nil, err } @@ -160,8 +147,6 @@ func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPo func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { var b LogPollerBlock - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { return nil, err } @@ -170,8 +155,6 @@ func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlo func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { var b LogPollerBlock - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { return nil, err } @@ -189,8 +172,6 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var l Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.GetContext(ctx, &l, query, ubig.New(o.chainID), eventSig.Bytes(), address); err != nil { return nil, err } @@ -199,8 +180,6 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig // DeleteBlocksBefore delete all blocks before and including end. func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64) error { - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) return err } @@ -208,8 +187,6 @@ func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64) error { func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { // These deletes are bounded by reorg depth, so they are // fast and should not slow down the log readers. - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() return o.Transaction(ctx, func(orm *DbORM) error { // Applying upper bound filter is critical for Postgres performance (especially for evm.logs table) // because it allows the planner to properly estimate the number of rows to be scanned. @@ -250,10 +227,6 @@ type Exp struct { } func (o *DbORM) DeleteExpiredLogs(ctx context.Context) error { - // TODO: LongQueryTimeout? - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() - _, err := o.db.ExecContext(ctx, `WITH r AS ( SELECT address, event, MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id=$1 @@ -270,9 +243,6 @@ func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { if err := o.validateLogs(logs); err != nil { return err } - - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() return o.Transaction(ctx, func(orm *DbORM) error { return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) }) @@ -288,8 +258,6 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo return err } - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() // Block and logs goes with the same TX to ensure atomicity return o.Transaction(ctx, func(orm *DbORM) error { if err := o.insertBlockWithinTx(ctx, orm.db.(*sqlx.Tx), block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { @@ -303,8 +271,6 @@ func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() _, err := tx.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) return err } @@ -350,8 +316,6 @@ func (o *DbORM) validateLogs(logs []Log) error { func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -367,8 +331,6 @@ func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([ // SelectLogs finds the logs in a given block range. func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -396,8 +358,6 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), after); err != nil { return nil, err } @@ -407,8 +367,6 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre // SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -424,8 +382,6 @@ func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, addres func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { var blocks []LogPollerBlock - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err := o.db.SelectContext(ctx, &blocks, ` SELECT * FROM evm.log_poller_blocks WHERE block_number >= $1 @@ -454,8 +410,6 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, from ORDER BY block_number ASC`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return nil, errors.Wrap(err, "failed to execute query") } @@ -472,8 +426,6 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, AND block_number > $4 AND block_number <= %s`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var blockNumber int64 - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.GetContext(ctx, &blockNumber, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { return 0, err } @@ -490,8 +442,6 @@ func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Addr AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes(), wordValueMax.Bytes()); err != nil { return nil, err } @@ -508,8 +458,6 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address commo AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes()); err != nil { return nil, err } @@ -527,8 +475,6 @@ func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Ad AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndexMin, wordValue.Bytes(), wordIndexMax); err != nil { return nil, err } @@ -550,8 +496,6 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address c AND block_number <= %s ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes()); err != nil { return nil, err } @@ -575,8 +519,6 @@ func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common. ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes(), topicValueMax.Bytes()); err != nil { return nil, err } @@ -599,8 +541,6 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues)); err != nil { return nil, err } @@ -615,9 +555,6 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in } var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() - err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -651,8 +588,6 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address commo ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues), after); err != nil { return nil, err } @@ -661,8 +596,6 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address commo func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -704,8 +637,6 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, si ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) var logs []Log - ctx, cancel := context.WithTimeout(ctx, defaultTimeout) - defer cancel() if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, sigA.Bytes(), sigB.Bytes(), startBlock, endBlock, topicIndex); err != nil { return nil, err } From 0d7636055c9e457e415776ac23e426b8e056f58e Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 14:11:46 -0500 Subject: [PATCH 17/65] Add context --- .../evm/forwarders/forwarder_manager.go | 2 + core/chains/evm/headtracker/orm.go | 3 +- core/chains/evm/logpoller/disabled.go | 36 +- core/chains/evm/logpoller/log_poller.go | 115 ++++--- .../evm/logpoller/log_poller_internal_test.go | 20 +- core/chains/evm/logpoller/log_poller_test.go | 70 ++-- core/chains/evm/logpoller/mocks/log_poller.go | 308 +++++++++--------- core/services/blockhashstore/coordinators.go | 12 +- core/services/blockhashstore/feeder_test.go | 11 +- core/services/ocr2/delegate.go | 5 +- .../evmregistry/v20/log_provider.go | 8 +- .../ocr2keeper/evmregistry/v20/registry.go | 7 +- .../evmregistry/v21/logprovider/provider.go | 2 +- .../v21/logprovider/provider_life_cycle.go | 8 +- .../evmregistry/v21/logprovider/recoverer.go | 4 +- .../v21/logprovider/recoverer_test.go | 14 +- .../ocr2keeper/evmregistry/v21/registry.go | 13 +- .../v21/registry_check_pipeline_test.go | 6 +- .../evmregistry/v21/registry_test.go | 14 +- .../v21/transmit/event_provider.go | 5 +- .../evmregistry/v21/upkeepstate/scanner.go | 6 +- .../ocr2vrf/coordinator/coordinator.go | 8 +- core/services/relay/evm/config_poller.go | 8 +- .../relay/evm/contract_transmitter.go | 7 +- core/services/relay/evm/event_binding.go | 12 +- .../relay/evm/functions/config_poller.go | 10 +- .../evm/functions/contract_transmitter.go | 7 +- .../relay/evm/functions/logpoller_wrapper.go | 10 +- .../relay/evm/mercury/config_poller.go | 8 +- .../vrf/v2/listener_v2_log_listener.go | 6 +- .../vrf/v2/listener_v2_log_listener_test.go | 6 +- core/web/evm_forwarders_controller.go | 5 +- 32 files changed, 417 insertions(+), 339 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index 53f8ed86356..8b68ee3ba83 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -211,6 +211,7 @@ func (f *FwdMgr) subscribeSendersChangedLogs(addr common.Address) error { } err := f.logpoller.RegisterFilter( + f.ctx, evmlogpoller.Filter{ Name: FilterName(addr), EventSigs: []common.Hash{authChangedTopic}, @@ -251,6 +252,7 @@ func (f *FwdMgr) runLoop() { } logs, err := f.logpoller.LatestLogEventSigsAddrsWithConfs( + f.ctx, f.latestBlock, []common.Hash{authChangedTopic}, addrs, diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 7991ebecb73..7bc630f9d28 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -3,9 +3,10 @@ package headtracker import ( "context" "database/sql" + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" - "math/big" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index 15d86bf23d2..b05c456f565 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -31,9 +31,9 @@ func (disabled) Replay(ctx context.Context, fromBlock int64) error { return ErrD func (disabled) ReplayAsync(fromBlock int64) {} -func (disabled) RegisterFilter(filter Filter) error { return ErrDisabled } +func (disabled) RegisterFilter(ctx context.Context, filter Filter) error { return ErrDisabled } -func (disabled) UnregisterFilter(name string) error { return ErrDisabled } +func (disabled) UnregisterFilter(ctx context.Context, name string) error { return ErrDisabled } func (disabled) HasFilter(name string) bool { return false } @@ -45,66 +45,66 @@ func (disabled) GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPoll return nil, ErrDisabled } -func (disabled) Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { +func (disabled) Logs(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { +func (disabled) LogsWithSigs(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { +func (disabled) LatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { return nil, ErrDisabled } -func (disabled) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { +func (disabled) LatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { +func (disabled) IndexedLogs(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { +func (disabled) IndexedLogsByBlockRange(ctx context.Context, tart, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { +func (d disabled) IndexedLogsByTxHash(ctx context.Context, eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (disabled) IndexedLogsTopicGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (disabled) IndexedLogsTopicRange(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (disabled) LogsDataWordRange(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (disabled) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (disabled) LogsDataWordGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { +func (d disabled) IndexedLogsWithSigsExcluding(ctx context.Context, address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) { +func (d disabled) LogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { +func (d disabled) IndexedLogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } -func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { +func (d disabled) LatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { return 0, ErrDisabled } -func (d disabled) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { +func (d disabled) LogsDataWordBetween(ctx context.Context, eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { return nil, ErrDisabled } diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 1f9814f11f1..6fcb3ee97e2 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -35,31 +35,31 @@ type LogPoller interface { services.Service Replay(ctx context.Context, fromBlock int64) error ReplayAsync(fromBlock int64) - RegisterFilter(filter Filter) error - UnregisterFilter(name string) error + RegisterFilter(ctx context.Context, filter Filter) error + UnregisterFilter(ctx context.Context, name string) error HasFilter(name string) bool LatestBlock(ctx context.Context) (LogPollerBlock, error) GetBlocksRange(ctx context.Context, numbers []uint64) ([]LogPollerBlock, error) // General querying - Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) - LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) - LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) - LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) - LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) - LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) + Logs(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) + LogsWithSigs(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) + LogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, time time.Time, confs Confirmations) ([]Log, error) + LatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) + LatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) + LatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) // Content based querying - IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) - IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) - IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) - IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) - IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) - IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) - IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) - LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) - LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) - LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) + IndexedLogs(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsByBlockRange(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) + IndexedLogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) + IndexedLogsByTxHash(ctx context.Context, eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) + IndexedLogsTopicGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsTopicRange(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) + IndexedLogsWithSigsExcluding(ctx context.Context, address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) + LogsDataWordRange(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) + LogsDataWordGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) + LogsDataWordBetween(ctx context.Context, eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) } type Confirmations int @@ -217,7 +217,7 @@ func (filter *Filter) Contains(other *Filter) bool { // which means that anonymous events are not supported and log.Topics >= 1 always (log.Topics[0] is the event signature). // The filter may be unregistered later by Filter.Name // Warnings/debug information is keyed by filter name. -func (lp *logPoller) RegisterFilter(filter Filter) error { +func (lp *logPoller) RegisterFilter(ctx context.Context, filter Filter) error { if len(filter.Addresses) == 0 { return errors.Errorf("at least one address must be specified") } @@ -248,7 +248,7 @@ func (lp *logPoller) RegisterFilter(filter Filter) error { lp.lggr.Warnw("Updating existing filter with more events or addresses", "name", filter.Name, "filter", filter) } - if err := lp.orm.InsertFilter(lp.ctx, filter); err != nil { + if err := lp.orm.InsertFilter(ctx, filter); err != nil { return errors.Wrap(err, "error inserting filter") } lp.filters[filter.Name] = filter @@ -259,7 +259,7 @@ func (lp *logPoller) RegisterFilter(filter Filter) error { // UnregisterFilter will remove the filter with the given name. // If the name does not exist, it will log an error but not return an error. // Warnings/debug information is keyed by filter name. -func (lp *logPoller) UnregisterFilter(name string) error { +func (lp *logPoller) UnregisterFilter(ctx context.Context, name string) error { lp.filterMu.Lock() defer lp.filterMu.Unlock() @@ -269,7 +269,7 @@ func (lp *logPoller) UnregisterFilter(name string) error { return nil } - if err := lp.orm.DeleteFilter(lp.ctx, name); err != nil { + if err := lp.orm.DeleteFilter(ctx, name); err != nil { return errors.Wrap(err, "error deleting filter") } delete(lp.filters, name) @@ -413,7 +413,7 @@ func (lp *logPoller) HealthReport() map[string]error { } func (lp *logPoller) GetReplayFromBlock(ctx context.Context, requested int64) (int64, error) { - lastProcessed, err := lp.orm.SelectLatestBlock(lp.ctx) + lastProcessed, err := lp.orm.SelectLatestBlock(ctx) if err != nil { if !errors.Is(err, sql.ErrNoRows) { // Real DB error @@ -641,7 +641,6 @@ func (lp *logPoller) blocksFromLogs(ctx context.Context, logs []types.Log) (bloc for _, log := range logs { numbers = append(numbers, log.BlockNumber) } - return lp.GetBlocksRange(ctx, numbers) } @@ -948,54 +947,54 @@ func (lp *logPoller) PruneOldBlocks(ctx context.Context) error { // Logs returns logs matching topics and address (exactly) in the given block range, // which are canonical at time of query. -func (lp *logPoller) Logs(start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { - return lp.orm.SelectLogs(lp.ctx, start, end, address, eventSig) +func (lp *logPoller) Logs(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address) ([]Log, error) { + return lp.orm.SelectLogs(ctx, start, end, address, eventSig) } -func (lp *logPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { - return lp.orm.SelectLogsWithSigs(lp.ctx, start, end, address, eventSigs) +func (lp *logPoller) LogsWithSigs(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]Log, error) { + return lp.orm.SelectLogsWithSigs(ctx, start, end, address, eventSigs) } -func (lp *logPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, after time.Time, confs Confirmations) ([]Log, error) { - return lp.orm.SelectLogsCreatedAfter(lp.ctx, address, eventSig, after, confs) +func (lp *logPoller) LogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, after time.Time, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsCreatedAfter(ctx, address, eventSig, after, confs) } // IndexedLogs finds all the logs that have a topic value in topicValues at index topicIndex. -func (lp *logPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectIndexedLogs(lp.ctx, address, eventSig, topicIndex, topicValues, confs) +func (lp *logPoller) IndexedLogs(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogs(ctx, address, eventSig, topicIndex, topicValues, confs) } // IndexedLogsByBlockRange finds all the logs that have a topic value in topicValues at index topicIndex within the block range -func (lp *logPoller) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { - return lp.orm.SelectIndexedLogsByBlockRange(lp.ctx, start, end, address, eventSig, topicIndex, topicValues) +func (lp *logPoller) IndexedLogsByBlockRange(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { + return lp.orm.SelectIndexedLogsByBlockRange(ctx, start, end, address, eventSig, topicIndex, topicValues) } -func (lp *logPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { - return lp.orm.SelectIndexedLogsCreatedAfter(lp.ctx, address, eventSig, topicIndex, topicValues, after, confs) +func (lp *logPoller) IndexedLogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsCreatedAfter(ctx, address, eventSig, topicIndex, topicValues, after, confs) } -func (lp *logPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { - return lp.orm.SelectIndexedLogsByTxHash(lp.ctx, address, eventSig, txHash) +func (lp *logPoller) IndexedLogsByTxHash(ctx context.Context, eventSig common.Hash, address common.Address, txHash common.Hash) ([]Log, error) { + return lp.orm.SelectIndexedLogsByTxHash(ctx, address, eventSig, txHash) } // LogsDataWordGreaterThan note index is 0 based. -func (lp *logPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectLogsDataWordGreaterThan(lp.ctx, address, eventSig, wordIndex, wordValueMin, confs) +func (lp *logPoller) LogsDataWordGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordGreaterThan(ctx, address, eventSig, wordIndex, wordValueMin, confs) } // LogsDataWordRange note index is 0 based. -func (lp *logPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectLogsDataWordRange(lp.ctx, address, eventSig, wordIndex, wordValueMin, wordValueMax, confs) +func (lp *logPoller) LogsDataWordRange(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordRange(ctx, address, eventSig, wordIndex, wordValueMin, wordValueMax, confs) } // IndexedLogsTopicGreaterThan finds all the logs that have a topic value greater than topicValueMin at index topicIndex. // Only works for integer topics. -func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectIndexedLogsTopicGreaterThan(lp.ctx, address, eventSig, topicIndex, topicValueMin, confs) +func (lp *logPoller) IndexedLogsTopicGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicGreaterThan(ctx, address, eventSig, topicIndex, topicValueMin, confs) } -func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectIndexedLogsTopicRange(lp.ctx, address, eventSig, topicIndex, topicValueMin, topicValueMax, confs) +func (lp *logPoller) IndexedLogsTopicRange(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicRange(ctx, address, eventSig, topicIndex, topicValueMin, topicValueMax, confs) } // LatestBlock returns the latest block the log poller is on. It tracks blocks to be able @@ -1009,21 +1008,21 @@ func (lp *logPoller) LatestBlock(ctx context.Context) (LogPollerBlock, error) { return *b, nil } -func (lp *logPoller) BlockByNumber(n int64) (*LogPollerBlock, error) { - return lp.orm.SelectBlockByNumber(lp.ctx, n) +func (lp *logPoller) BlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { + return lp.orm.SelectBlockByNumber(ctx, n) } // LatestLogByEventSigWithConfs finds the latest log that has confs number of blocks on top of the log. -func (lp *logPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { - return lp.orm.SelectLatestLogByEventSigWithConfs(lp.ctx, eventSig, address, confs) +func (lp *logPoller) LatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { + return lp.orm.SelectLatestLogByEventSigWithConfs(ctx, eventSig, address, confs) } -func (lp *logPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { - return lp.orm.SelectLatestLogEventSigsAddrsWithConfs(lp.ctx, fromBlock, addresses, eventSigs, confs) +func (lp *logPoller) LatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLatestLogEventSigsAddrsWithConfs(ctx, fromBlock, addresses, eventSigs, confs) } -func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { - return lp.orm.SelectLatestBlockByEventSigsAddrsWithConfs(lp.ctx, fromBlock, eventSigs, addresses, confs) +func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { + return lp.orm.SelectLatestBlockByEventSigsAddrsWithConfs(ctx, fromBlock, eventSigs, addresses, confs) } // LogsDataWordBetween retrieves a slice of Log records that match specific criteria. @@ -1035,8 +1034,8 @@ func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event // // This function is particularly useful for filtering logs by data word values and their positions within the event data. // It returns an empty slice if no logs match the provided criteria. -func (lp *logPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { - return lp.orm.SelectLogsDataWordBetween(lp.ctx, address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs) +func (lp *logPoller) LogsDataWordBetween(ctx context.Context, eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { + return lp.orm.SelectLogsDataWordBetween(ctx, address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs) } // GetBlocksRange tries to get the specified block numbers from the log pollers @@ -1184,8 +1183,8 @@ func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []str // // For example, query to retrieve unfulfilled requests by querying request log events without matching fulfillment log events. // The order of events is not significant. Both logs must be inside the block range and have the minimum number of confirmations -func (lp *logPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { - return lp.orm.SelectIndexedLogsWithSigsExcluding(lp.ctx, eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs) +func (lp *logPoller) IndexedLogsWithSigsExcluding(ctx context.Context, address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations) ([]Log, error) { + return lp.orm.SelectIndexedLogsWithSigsExcluding(ctx, eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs) } func EvmWord(i uint64) common.Hash { diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index df6c41a9702..011bdc05789 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -71,36 +71,36 @@ func TestLogPoller_RegisterFilter(t *testing.T) { require.Equal(t, 1, len(f.Addresses)) assert.Equal(t, common.HexToAddress("0x0000000000000000000000000000000000000000"), f.Addresses[0]) - err := lp.RegisterFilter(Filter{"Emitter Log 1", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{a1}, 0}) + err := lp.RegisterFilter(testutils.Context(t), Filter{"Emitter Log 1", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{a1}, 0}) require.NoError(t, err) assert.Equal(t, []common.Address{a1}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Should de-dupe EventSigs - err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + err = lp.RegisterFilter(testutils.Context(t), Filter{"Emitter Log 1 + 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) require.NoError(t, err) assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Should de-dupe Addresses - err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2 dupe", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + err = lp.RegisterFilter(testutils.Context(t), Filter{"Emitter Log 1 + 2 dupe", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) require.NoError(t, err) assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) validateFiltersTable(t, lp, orm) // Address required. - err = lp.RegisterFilter(Filter{"no address", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{}, 0}) + err = lp.RegisterFilter(testutils.Context(t), Filter{"no address", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{}, 0}) require.Error(t, err) // Event required - err = lp.RegisterFilter(Filter{"No event", []common.Hash{}, []common.Address{a1}, 0}) + err = lp.RegisterFilter(testutils.Context(t), Filter{"No event", []common.Hash{}, []common.Address{a1}, 0}) require.Error(t, err) validateFiltersTable(t, lp, orm) // Removing non-existence Filter should log error but return nil - err = lp.UnregisterFilter("Filter doesn't exist") + err = lp.UnregisterFilter(testutils.Context(t), "Filter doesn't exist") require.NoError(t, err) require.Equal(t, observedLogs.Len(), 1) require.Contains(t, observedLogs.TakeAll()[0].Entry.Message, "not found") @@ -114,16 +114,16 @@ func TestLogPoller_RegisterFilter(t *testing.T) { require.True(t, ok, "'Emitter Log 1 + 2 dupe' Filter missing") // Removing an existing Filter should remove it from both memory and db - err = lp.UnregisterFilter("Emitter Log 1 + 2") + err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1 + 2") require.NoError(t, err) _, ok = lp.filters["Emitter Log 1 + 2"] require.False(t, ok, "'Emitter Log 1 Filter' should have been removed by UnregisterFilter()") require.Len(t, lp.filters, 2) validateFiltersTable(t, lp, orm) - err = lp.UnregisterFilter("Emitter Log 1 + 2 dupe") + err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1 + 2 dupe") require.NoError(t, err) - err = lp.UnregisterFilter("Emitter Log 1") + err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1") require.NoError(t, err) assert.Len(t, lp.filters, 0) filters, err := lp.orm.LoadFilters(lp.ctx) @@ -516,7 +516,7 @@ func benchmarkFilter(b *testing.B, nFilters, nAddresses, nEvents int) { for j := 0; j < nEvents; j++ { events = append(events, common.BigToHash(big.NewInt(int64(j+1)))) } - err := lp.RegisterFilter(Filter{Name: "my Filter", EventSigs: events, Addresses: addresses}) + err := lp.RegisterFilter(testutils.Context(b), Filter{Name: "my Filter", EventSigs: events, Addresses: addresses}) require.NoError(b, err) } b.ResetTimer() diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index b3d89d7ca61..3f52b69c0e6 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -151,8 +151,9 @@ func TestPopulateLoadedDB(t *testing.T) { func TestLogPoller_Integration(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) th.Client.Commit() // Block 2. Ensure we have finality number of blocks + ctx := testutils.Context(t) - require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) + require.NoError(t, th.LogPoller.RegisterFilter(ctx, logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) require.Len(t, th.LogPoller.Filter(nil, nil, nil).Topics, 1) @@ -176,19 +177,19 @@ func TestLogPoller_Integration(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // We should immediately have at least logs 4-7 - logs, err := th.LogPoller.Logs(4, 7, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + logs, err := th.LogPoller.Logs(ctx, 4, 7, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) require.Equal(t, 4, len(logs)) // Once the backup poller runs we should also have the log from block 3 testutils.AssertEventually(t, func() bool { - l, err2 := th.LogPoller.Logs(3, 3, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + l, err2 := th.LogPoller.Logs(ctx, 3, 3, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err2) return len(l) == 1 }) // Now let's update the Filter and replay to get Log2 logs. - err = th.LogPoller.RegisterFilter(logpoller.Filter{ + err = th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ "Emitter - log2", []common.Hash{EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1}, 0, }) @@ -198,7 +199,7 @@ func TestLogPoller_Integration(t *testing.T) { assert.Error(t, th.LogPoller.Replay(testutils.Context(t), 20)) // Still shouldn't have any Log2 logs yet - logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) + logs, err = th.LogPoller.Logs(ctx, 2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) require.NoError(t, err) require.Len(t, logs, 0) @@ -206,7 +207,7 @@ func TestLogPoller_Integration(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // We should immediately see 4 logs2 logs. - logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) + logs, err = th.LogPoller.Logs(ctx, 2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 4, len(logs)) @@ -259,7 +260,7 @@ func Test_BackupLogPoller(t *testing.T) { EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1}, 0} - err := th.LogPoller.RegisterFilter(filter1) + err := th.LogPoller.RegisterFilter(ctx, filter1) require.NoError(t, err) filters, err := th.ORM.LoadFilters(ctx) @@ -268,16 +269,17 @@ func Test_BackupLogPoller(t *testing.T) { require.Equal(t, filter1, filters["filter1"]) err = th.LogPoller.RegisterFilter( + ctx, logpoller.Filter{"filter2", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress2}, 0}) require.NoError(t, err) defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) + assert.NoError(t, th.LogPoller.UnregisterFilter(ctx, "filter1")) }() defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) + assert.NoError(t, th.LogPoller.UnregisterFilter(ctx, "filter2")) }() // generate some tx's with logs @@ -333,7 +335,7 @@ func Test_BackupLogPoller(t *testing.T) { require.Equal(t, 32, len(fLogs)) // logs shouldn't show up yet - logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + logs, err := th.LogPoller.Logs(ctx, 34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 0, len(logs)) @@ -351,7 +353,7 @@ func Test_BackupLogPoller(t *testing.T) { // logs still shouldn't show up, because we don't want to backfill the last finalized log // to help with reorg detection - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + logs, err = th.LogPoller.Logs(ctx, 34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 0, len(logs)) th.Client.Commit() @@ -365,13 +367,13 @@ func Test_BackupLogPoller(t *testing.T) { require.Equal(t, int64(38), currentBlock.BlockNumber+1) // all 3 logs in block 34 should show up now, thanks to backup logger - logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + logs, err = th.LogPoller.Logs(ctx, 30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 5, len(logs)) - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) + logs, err = th.LogPoller.Logs(ctx, 34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) require.NoError(t, err) assert.Equal(t, 1, len(logs)) - logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2) + logs, err = th.LogPoller.Logs(ctx, 32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2) require.NoError(t, err) assert.Equal(t, 1, len(logs)) }) @@ -405,7 +407,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { // 0 -> 1 -> 2 -> ... -> currentBlock - 10 (finalized) -> .. -> currentBlock markBlockAsFinalized(t, th, currentBlock-10) - err = th.LogPoller.RegisterFilter(logpoller.Filter{ + err = th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ Name: "Test Emitter", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, @@ -418,6 +420,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { require.NoError(t, err) logs, err := th.LogPoller.Logs( + ctx, 0, currentBlock, EmitterABI.Events["Log1"].ID, @@ -433,6 +436,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { // All emitted logs should be backfilled logs, err = th.LogPoller.Logs( + ctx, 0, currentBlock+1, EmitterABI.Events["Log1"].ID, @@ -473,7 +477,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { assert.Equal(t, latestBlock.BlockHash, header.Hash()) // Register filter - err = th.LogPoller.RegisterFilter(logpoller.Filter{ + err = th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ Name: "Test Emitter", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, @@ -486,6 +490,7 @@ func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { // All emitted logs should be backfilled logs, err := th.LogPoller.Logs( + ctx, 0, header.Number.Int64()+1, EmitterABI.Events["Log1"].ID, @@ -530,7 +535,7 @@ func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) markBlockAsFinalized(t, th, secondBatchBlock) // Register filter - err := th.LogPoller.RegisterFilter(logpoller.Filter{ + err := th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ Name: "Test Emitter", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, @@ -544,6 +549,7 @@ func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) // Only the 2nd batch + 1 log from a previous batch should be backfilled, because we perform backfill starting // from one block behind the latest finalized block logs, err := th.LogPoller.Logs( + ctx, 0, secondBatchBlock, EmitterABI.Events["Log1"].ID, @@ -562,7 +568,7 @@ func TestLogPoller_BlockTimestamps(t *testing.T) { addresses := []common.Address{th.EmitterAddress1, th.EmitterAddress2} topics := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} - err := th.LogPoller.RegisterFilter(logpoller.Filter{"convertLogs", topics, addresses, 0}) + err := th.LogPoller.RegisterFilter(ctx, logpoller.Filter{"convertLogs", topics, addresses, 0}) require.NoError(t, err) blk, err := th.Client.BlockByNumber(ctx, nil) @@ -619,9 +625,9 @@ func TestLogPoller_BlockTimestamps(t *testing.T) { lb, _ := th.LogPoller.LatestBlock(ctx) th.PollAndSaveLogs(ctx, lb.BlockNumber+1) - lg1, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + lg1, err := th.LogPoller.Logs(ctx, 0, 20, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) require.NoError(t, err) - lg2, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log2"].ID, th.EmitterAddress2) + lg2, err := th.LogPoller.Logs(ctx, 0, 20, EmitterABI.Events["Log2"].ID, th.EmitterAddress2) require.NoError(t, err) // Logs should have correct timestamps @@ -673,7 +679,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { latest, err1 := ec.BlockByNumber(testutils.Context(t), nil) require.NoError(t, err1) for i := 1; i < int(latest.NumberU64()); i++ { - ourBlock, err1 := lp.BlockByNumber(int64(i)) + ourBlock, err1 := lp.BlockByNumber(testutils.Context(t), int64(i)) require.NoError(t, err1) gethBlock, err1 := ec.BlockByNumber(testutils.Context(t), big.NewInt(int64(i))) require.NoError(t, err1) @@ -750,7 +756,7 @@ func TestLogPoller_PollAndSaveLogs(t *testing.T) { th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) // Set up a log poller listening for log emitter logs. - err := th.LogPoller.RegisterFilter(logpoller.Filter{ + err := th.LogPoller.RegisterFilter(testutils.Context(t), logpoller.Filter{ "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, }) @@ -998,7 +1004,7 @@ func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) // Set up a log poller listening for log emitter logs. - err := th.LogPoller.RegisterFilter(logpoller.Filter{ + err := th.LogPoller.RegisterFilter(testutils.Context(t), logpoller.Filter{ Name: "Test Emitter", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, @@ -1069,11 +1075,11 @@ func TestLogPoller_LoadFilters(t *testing.T) { assert.False(t, filter2.Contains(&filter1)) assert.True(t, filter1.Contains(&filter3)) - err := th.LogPoller.RegisterFilter(filter1) + err := th.LogPoller.RegisterFilter(testutils.Context(t), filter1) require.NoError(t, err) - err = th.LogPoller.RegisterFilter(filter2) + err = th.LogPoller.RegisterFilter(testutils.Context(t), filter2) require.NoError(t, err) - err = th.LogPoller.RegisterFilter(filter3) + err = th.LogPoller.RegisterFilter(testutils.Context(t), filter3) require.NoError(t, err) filters, err := th.ORM.LoadFilters(testutils.Context(t)) @@ -1108,7 +1114,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { t.Parallel() th := SetupTH(t, false, 2, 3, 2, 1000) - err := th.LogPoller.RegisterFilter(logpoller.Filter{"GetBlocks Test", []common.Hash{ + err := th.LogPoller.RegisterFilter(testutils.Context(t), logpoller.Filter{"GetBlocks Test", []common.Hash{ EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0}, ) require.NoError(t, err) @@ -1352,7 +1358,7 @@ func TestTooManyLogResults(t *testing.T) { }) addr := testutils.NewAddress() - err := lp.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{addr}, 0}) + err := lp.RegisterFilter(testutils.Context(t), logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{addr}, 0}) require.NoError(t, err) lp.PollAndSaveLogs(ctx, 5) block, err2 := o.SelectLatestBlock(testutils.Context(t)) @@ -1409,7 +1415,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { th := SetupTH(t, true, 2, 3, 2, 1000) eventSig := EmitterABI.Events["Log1"].ID - err := th.LogPoller.RegisterFilter(logpoller.Filter{ + err := th.LogPoller.RegisterFilter(testutils.Context(t), logpoller.Filter{ Name: "GetBlocks Test", EventSigs: []common.Hash{eventSig}, Addresses: []common.Address{th.EmitterAddress1}}, @@ -1438,6 +1444,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { require.Equal(t, int(currentBlock), firstBatchLen+secondBatchLen+2) finalizedLogs, err := th.LogPoller.LogsDataWordGreaterThan( + testutils.Context(t), eventSig, th.EmitterAddress1, 0, @@ -1449,6 +1456,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { numberOfConfirmations := 1 logsByConfs, err := th.LogPoller.LogsDataWordGreaterThan( + testutils.Context(t), eventSig, th.EmitterAddress1, 0, @@ -1554,7 +1562,7 @@ func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { // First PollAndSave, no filters are registered currentBlock := th.PollAndSaveLogs(ctx, 1) - err = th.LogPoller.RegisterFilter(logpoller.Filter{ + err = th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ Name: "Test Emitter", EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, @@ -1573,6 +1581,7 @@ func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { // Make sure that all logs are backfilled logs, err := th.LogPoller.Logs( + ctx, 0, currentBlock, EmitterABI.Events["Log1"].ID, @@ -1583,6 +1592,7 @@ func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { // We should get all the logs by the block_timestamp logs, err = th.LogPoller.LogsCreatedAfter( + ctx, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, genesisBlockTime, diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go index 796057640d8..2bf24881405 100644 --- a/core/chains/evm/logpoller/mocks/log_poller.go +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -105,9 +105,9 @@ func (_m *LogPoller) HealthReport() map[string]error { return r0 } -// IndexedLogs provides a mock function with given fields: eventSig, address, topicIndex, topicValues, confs -func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, topicIndex, topicValues, confs) +// IndexedLogs provides a mock function with given fields: ctx, eventSig, address, topicIndex, topicValues, confs +func (_m *LogPoller) IndexedLogs(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, topicIndex, topicValues, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogs") @@ -115,19 +115,19 @@ func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, t var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValues, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, topicIndex, topicValues, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValues, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, topicIndex, topicValues, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValues, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, topicIndex, topicValues, confs) } else { r1 = ret.Error(1) } @@ -135,9 +135,9 @@ func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, t return r0, r1 } -// IndexedLogsByBlockRange provides a mock function with given fields: start, end, eventSig, address, topicIndex, topicValues -func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]logpoller.Log, error) { - ret := _m.Called(start, end, eventSig, address, topicIndex, topicValues) +// IndexedLogsByBlockRange provides a mock function with given fields: ctx, start, end, eventSig, address, topicIndex, topicValues +func (_m *LogPoller) IndexedLogsByBlockRange(ctx context.Context, start int64, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]logpoller.Log, error) { + ret := _m.Called(ctx, start, end, eventSig, address, topicIndex, topicValues) if len(ret) == 0 { panic("no return value specified for IndexedLogsByBlockRange") @@ -145,19 +145,19 @@ func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig co var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSig, address, topicIndex, topicValues) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, common.Hash, common.Address, int, []common.Hash) ([]logpoller.Log, error)); ok { + return rf(ctx, start, end, eventSig, address, topicIndex, topicValues) } - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) []logpoller.Log); ok { - r0 = rf(start, end, eventSig, address, topicIndex, topicValues) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, common.Hash, common.Address, int, []common.Hash) []logpoller.Log); ok { + r0 = rf(ctx, start, end, eventSig, address, topicIndex, topicValues) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, int, []common.Hash) error); ok { - r1 = rf(start, end, eventSig, address, topicIndex, topicValues) + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, common.Hash, common.Address, int, []common.Hash) error); ok { + r1 = rf(ctx, start, end, eventSig, address, topicIndex, topicValues) } else { r1 = ret.Error(1) } @@ -165,9 +165,9 @@ func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig co return r0, r1 } -// IndexedLogsByTxHash provides a mock function with given fields: eventSig, address, txHash -func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, txHash) +// IndexedLogsByTxHash provides a mock function with given fields: ctx, eventSig, address, txHash +func (_m *LogPoller) IndexedLogsByTxHash(ctx context.Context, eventSig common.Hash, address common.Address, txHash common.Hash) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, txHash) if len(ret) == 0 { panic("no return value specified for IndexedLogsByTxHash") @@ -175,19 +175,19 @@ func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Ad var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, txHash) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, common.Hash) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, txHash) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash) []logpoller.Log); ok { - r0 = rf(eventSig, address, txHash) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, common.Hash) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, txHash) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, common.Hash) error); ok { - r1 = rf(eventSig, address, txHash) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, common.Hash) error); ok { + r1 = rf(ctx, eventSig, address, txHash) } else { r1 = ret.Error(1) } @@ -195,9 +195,9 @@ func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Ad return r0, r1 } -// IndexedLogsCreatedAfter provides a mock function with given fields: eventSig, address, topicIndex, topicValues, after, confs -func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, topicIndex, topicValues, after, confs) +// IndexedLogsCreatedAfter provides a mock function with given fields: ctx, eventSig, address, topicIndex, topicValues, after, confs +func (_m *LogPoller) IndexedLogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, topicIndex, topicValues, after, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsCreatedAfter") @@ -205,19 +205,19 @@ func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address commo var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValues, after, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, topicIndex, topicValues, after, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValues, after, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, topicIndex, topicValues, after, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValues, after, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, topicIndex, topicValues, after, confs) } else { r1 = ret.Error(1) } @@ -225,9 +225,9 @@ func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address commo return r0, r1 } -// IndexedLogsTopicGreaterThan provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, confs -func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, topicIndex, topicValueMin, confs) +// IndexedLogsTopicGreaterThan provides a mock function with given fields: ctx, eventSig, address, topicIndex, topicValueMin, confs +func (_m *LogPoller) IndexedLogsTopicGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, topicIndex, topicValueMin, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsTopicGreaterThan") @@ -235,19 +235,19 @@ func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValueMin, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, topicIndex, topicValueMin, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValueMin, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, topicIndex, topicValueMin, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValueMin, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, topicIndex, topicValueMin, confs) } else { r1 = ret.Error(1) } @@ -255,9 +255,9 @@ func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c return r0, r1 } -// IndexedLogsTopicRange provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, topicValueMax, confs -func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) +// IndexedLogsTopicRange provides a mock function with given fields: ctx, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs +func (_m *LogPoller) IndexedLogsTopicRange(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsTopicRange") @@ -265,19 +265,19 @@ func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common. var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) } else { r1 = ret.Error(1) } @@ -285,9 +285,9 @@ func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common. return r0, r1 } -// IndexedLogsWithSigsExcluding provides a mock function with given fields: address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs -func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA common.Hash, eventSigB common.Hash, topicIndex int, fromBlock int64, toBlock int64, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) +// IndexedLogsWithSigsExcluding provides a mock function with given fields: ctx, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs +func (_m *LogPoller) IndexedLogsWithSigsExcluding(ctx context.Context, address common.Address, eventSigA common.Hash, eventSigB common.Hash, topicIndex int, fromBlock int64, toBlock int64, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) if len(ret) == 0 { panic("no return value specified for IndexedLogsWithSigsExcluding") @@ -295,19 +295,19 @@ func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventS var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } - if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) error); ok { - r1 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations) error); ok { + r1 = rf(ctx, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) } else { r1 = ret.Error(1) } @@ -343,9 +343,9 @@ func (_m *LogPoller) LatestBlock(ctx context.Context) (logpoller.LogPollerBlock, return r0, r1 } -// LatestBlockByEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs -func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) (int64, error) { - ret := _m.Called(fromBlock, eventSigs, addresses, confs) +// LatestBlockByEventSigsAddrsWithConfs provides a mock function with given fields: ctx, fromBlock, eventSigs, addresses, confs +func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) (int64, error) { + ret := _m.Called(ctx, fromBlock, eventSigs, addresses, confs) if len(ret) == 0 { panic("no return value specified for LatestBlockByEventSigsAddrsWithConfs") @@ -353,17 +353,17 @@ func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event var r0 int64 var r1 error - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) (int64, error)); ok { - return rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(0).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) (int64, error)); ok { + return rf(ctx, fromBlock, eventSigs, addresses, confs) } - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) int64); ok { - r0 = rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(0).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) int64); ok { + r0 = rf(ctx, fromBlock, eventSigs, addresses, confs) } else { r0 = ret.Get(0).(int64) } - if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { - r1 = rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(1).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { + r1 = rf(ctx, fromBlock, eventSigs, addresses, confs) } else { r1 = ret.Error(1) } @@ -371,9 +371,9 @@ func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, event return r0, r1 } -// LatestLogByEventSigWithConfs provides a mock function with given fields: eventSig, address, confs -func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs logpoller.Confirmations) (*logpoller.Log, error) { - ret := _m.Called(eventSig, address, confs) +// LatestLogByEventSigWithConfs provides a mock function with given fields: ctx, eventSig, address, confs +func (_m *LogPoller) LatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs logpoller.Confirmations) (*logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, confs) if len(ret) == 0 { panic("no return value specified for LatestLogByEventSigWithConfs") @@ -381,19 +381,19 @@ func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address var r0 *logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations) (*logpoller.Log, error)); ok { - return rf(eventSig, address, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, logpoller.Confirmations) (*logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations) *logpoller.Log); ok { - r0 = rf(eventSig, address, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, logpoller.Confirmations) *logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, confs) } else { r1 = ret.Error(1) } @@ -401,9 +401,9 @@ func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address return r0, r1 } -// LatestLogEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs -func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(fromBlock, eventSigs, addresses, confs) +// LatestLogEventSigsAddrsWithConfs provides a mock function with given fields: ctx, fromBlock, eventSigs, addresses, confs +func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, fromBlock, eventSigs, addresses, confs) if len(ret) == 0 { panic("no return value specified for LatestLogEventSigsAddrsWithConfs") @@ -411,19 +411,19 @@ func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(0).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, fromBlock, eventSigs, addresses, confs) } - if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(0).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, fromBlock, eventSigs, addresses, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { - r1 = rf(fromBlock, eventSigs, addresses, confs) + if rf, ok := ret.Get(1).(func(context.Context, int64, []common.Hash, []common.Address, logpoller.Confirmations) error); ok { + r1 = rf(ctx, fromBlock, eventSigs, addresses, confs) } else { r1 = ret.Error(1) } @@ -431,9 +431,9 @@ func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs return r0, r1 } -// Logs provides a mock function with given fields: start, end, eventSig, address -func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address common.Address) ([]logpoller.Log, error) { - ret := _m.Called(start, end, eventSig, address) +// Logs provides a mock function with given fields: ctx, start, end, eventSig, address +func (_m *LogPoller) Logs(ctx context.Context, start int64, end int64, eventSig common.Hash, address common.Address) ([]logpoller.Log, error) { + ret := _m.Called(ctx, start, end, eventSig, address) if len(ret) == 0 { panic("no return value specified for Logs") @@ -441,19 +441,19 @@ func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSig, address) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, common.Hash, common.Address) ([]logpoller.Log, error)); ok { + return rf(ctx, start, end, eventSig, address) } - if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address) []logpoller.Log); ok { - r0 = rf(start, end, eventSig, address) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, common.Hash, common.Address) []logpoller.Log); ok { + r0 = rf(ctx, start, end, eventSig, address) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address) error); ok { - r1 = rf(start, end, eventSig, address) + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, common.Hash, common.Address) error); ok { + r1 = rf(ctx, start, end, eventSig, address) } else { r1 = ret.Error(1) } @@ -461,9 +461,9 @@ func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address return r0, r1 } -// LogsCreatedAfter provides a mock function with given fields: eventSig, address, _a2, confs -func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, _a2 time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, _a2, confs) +// LogsCreatedAfter provides a mock function with given fields: ctx, eventSig, address, _a3, confs +func (_m *LogPoller) LogsCreatedAfter(ctx context.Context, eventSig common.Hash, address common.Address, _a3 time.Time, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, _a3, confs) if len(ret) == 0 { panic("no return value specified for LogsCreatedAfter") @@ -471,19 +471,19 @@ func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Addre var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, _a2, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, time.Time, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, _a3, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, _a2, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, time.Time, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, _a3, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, _a2, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, time.Time, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, _a3, confs) } else { r1 = ret.Error(1) } @@ -491,9 +491,9 @@ func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Addre return r0, r1 } -// LogsDataWordBetween provides a mock function with given fields: eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs -func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) +// LogsDataWordBetween provides a mock function with given fields: ctx, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs +func (_m *LogPoller) LogsDataWordBetween(ctx context.Context, eventSig common.Hash, address common.Address, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordBetween") @@ -501,19 +501,19 @@ func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Ad var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) } else { r1 = ret.Error(1) } @@ -521,9 +521,9 @@ func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Ad return r0, r1 } -// LogsDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, confs -func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, wordIndex, wordValueMin, confs) +// LogsDataWordGreaterThan provides a mock function with given fields: ctx, eventSig, address, wordIndex, wordValueMin, confs +func (_m *LogPoller) LogsDataWordGreaterThan(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, wordIndex, wordValueMin, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordGreaterThan") @@ -531,19 +531,19 @@ func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address commo var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, wordIndex, wordValueMin, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, wordIndex, wordValueMin, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, wordIndex, wordValueMin, confs) } else { r1 = ret.Error(1) } @@ -551,9 +551,9 @@ func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address commo return r0, r1 } -// LogsDataWordRange provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, wordValueMax, confs -func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, wordValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - ret := _m.Called(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) +// LogsDataWordRange provides a mock function with given fields: ctx, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs +func (_m *LogPoller) LogsDataWordRange(ctx context.Context, eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, wordValueMax common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + ret := _m.Called(ctx, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) if len(ret) == 0 { panic("no return value specified for LogsDataWordRange") @@ -561,19 +561,19 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) ([]logpoller.Log, error)); ok { + return rf(ctx, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) []logpoller.Log); ok { + r0 = rf(ctx, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations) error); ok { + r1 = rf(ctx, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) } else { r1 = ret.Error(1) } @@ -581,9 +581,9 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr return r0, r1 } -// LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address -func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { - ret := _m.Called(start, end, eventSigs, address) +// LogsWithSigs provides a mock function with given fields: ctx, start, end, eventSigs, address +func (_m *LogPoller) LogsWithSigs(ctx context.Context, start int64, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + ret := _m.Called(ctx, start, end, eventSigs, address) if len(ret) == 0 { panic("no return value specified for LogsWithSigs") @@ -591,19 +591,19 @@ func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Has var r0 []logpoller.Log var r1 error - if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address) ([]logpoller.Log, error)); ok { - return rf(start, end, eventSigs, address) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, []common.Hash, common.Address) ([]logpoller.Log, error)); ok { + return rf(ctx, start, end, eventSigs, address) } - if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address) []logpoller.Log); ok { - r0 = rf(start, end, eventSigs, address) + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, []common.Hash, common.Address) []logpoller.Log); ok { + r0 = rf(ctx, start, end, eventSigs, address) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]logpoller.Log) } } - if rf, ok := ret.Get(1).(func(int64, int64, []common.Hash, common.Address) error); ok { - r1 = rf(start, end, eventSigs, address) + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, []common.Hash, common.Address) error); ok { + r1 = rf(ctx, start, end, eventSigs, address) } else { r1 = ret.Error(1) } @@ -647,17 +647,17 @@ func (_m *LogPoller) Ready() error { return r0 } -// RegisterFilter provides a mock function with given fields: filter -func (_m *LogPoller) RegisterFilter(filter logpoller.Filter) error { - ret := _m.Called(filter) +// RegisterFilter provides a mock function with given fields: ctx, filter +func (_m *LogPoller) RegisterFilter(ctx context.Context, filter logpoller.Filter) error { + ret := _m.Called(ctx, filter) if len(ret) == 0 { panic("no return value specified for RegisterFilter") } var r0 error - if rf, ok := ret.Get(0).(func(logpoller.Filter) error); ok { - r0 = rf(filter) + if rf, ok := ret.Get(0).(func(context.Context, logpoller.Filter) error); ok { + r0 = rf(ctx, filter) } else { r0 = ret.Error(0) } @@ -706,17 +706,17 @@ func (_m *LogPoller) Start(_a0 context.Context) error { return r0 } -// UnregisterFilter provides a mock function with given fields: name -func (_m *LogPoller) UnregisterFilter(name string) error { - ret := _m.Called(name) +// UnregisterFilter provides a mock function with given fields: ctx, name +func (_m *LogPoller) UnregisterFilter(ctx context.Context, name string) error { + ret := _m.Called(ctx, name) if len(ret) == 0 { panic("no return value specified for UnregisterFilter") } var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(name) + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, name) } else { r0 = ret.Error(0) } diff --git a/core/services/blockhashstore/coordinators.go b/core/services/blockhashstore/coordinators.go index 64e0f0550f5..7e4a0c5dc02 100644 --- a/core/services/blockhashstore/coordinators.go +++ b/core/services/blockhashstore/coordinators.go @@ -71,7 +71,7 @@ type V1Coordinator struct { // NewV1Coordinator creates a new V1Coordinator from the given contract. func NewV1Coordinator(c v1.VRFCoordinatorInterface, lp logpoller.LogPoller) (*V1Coordinator, error) { - err := lp.RegisterFilter(logpoller.Filter{ + err := lp.RegisterFilter(context.Background(), logpoller.Filter{ Name: logpoller.FilterName("VRFv1CoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v1.VRFCoordinatorRandomnessRequest{}.Topic(), @@ -91,6 +91,7 @@ func (v *V1Coordinator) Requests( toBlock uint64, ) ([]Event, error) { logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), int64(toBlock), []common.Hash{ @@ -125,6 +126,7 @@ func (v *V1Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]E } logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), toBlock.BlockNumber, []common.Hash{ @@ -158,7 +160,7 @@ type V2Coordinator struct { // NewV2Coordinator creates a new V2Coordinator from the given contract. func NewV2Coordinator(c v2.VRFCoordinatorV2Interface, lp logpoller.LogPoller) (*V2Coordinator, error) { - err := lp.RegisterFilter(logpoller.Filter{ + err := lp.RegisterFilter(context.Background(), logpoller.Filter{ Name: logpoller.FilterName("VRFv2CoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), @@ -180,6 +182,7 @@ func (v *V2Coordinator) Requests( toBlock uint64, ) ([]Event, error) { logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), int64(toBlock), []common.Hash{ @@ -214,6 +217,7 @@ func (v *V2Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]E } logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), toBlock.BlockNumber, []common.Hash{ @@ -247,7 +251,7 @@ type V2PlusCoordinator struct { // NewV2Coordinator creates a new V2Coordinator from the given contract. func NewV2PlusCoordinator(c v2plus.IVRFCoordinatorV2PlusInternalInterface, lp logpoller.LogPoller) (*V2PlusCoordinator, error) { - err := lp.RegisterFilter(logpoller.Filter{ + err := lp.RegisterFilter(context.Background(), logpoller.Filter{ Name: logpoller.FilterName("VRFv2PlusCoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), @@ -269,6 +273,7 @@ func (v *V2PlusCoordinator) Requests( toBlock uint64, ) ([]Event, error) { logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), int64(toBlock), []common.Hash{ @@ -303,6 +308,7 @@ func (v *V2PlusCoordinator) Fulfillments(ctx context.Context, fromBlock uint64) } logs, err := v.lp.LogsWithSigs( + ctx, int64(fromBlock), toBlock.BlockNumber, []common.Hash{ diff --git a/core/services/blockhashstore/feeder_test.go b/core/services/blockhashstore/feeder_test.go index 3266b7d92ea..fabde77c786 100644 --- a/core/services/blockhashstore/feeder_test.go +++ b/core/services/blockhashstore/feeder_test.go @@ -410,7 +410,7 @@ func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { // Instantiate log poller & coordinator. lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) c, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(coordinatorAddress, nil) require.NoError(t, err) coordinator := &V1Coordinator{ @@ -449,6 +449,7 @@ func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) lp.On( "LogsWithSigs", + mock.Anything, fromBlock, toBlock, []common.Hash{ @@ -458,6 +459,7 @@ func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { mock.Anything, ).Return(requestLogs, nil) lp.On( + mock.Anything, "LogsWithSigs", fromBlock, latest, @@ -504,7 +506,7 @@ func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { // Instantiate log poller & coordinator. lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) c, err := vrf_coordinator_v2.NewVRFCoordinatorV2(coordinatorAddress, nil) require.NoError(t, err) coordinator := &V2Coordinator{ @@ -547,6 +549,7 @@ func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) lp.On( "LogsWithSigs", + mock.Anything, fromBlock, toBlock, []common.Hash{ @@ -557,6 +560,7 @@ func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { ).Return(requestLogs, nil) lp.On( "LogsWithSigs", + mock.Anything, fromBlock, latest, []common.Hash{ @@ -602,7 +606,7 @@ func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { // Instantiate log poller & coordinator. lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) c, err := vrf_coordinator_v2plus_interface.NewIVRFCoordinatorV2PlusInternal(coordinatorAddress, nil) require.NoError(t, err) coordinator := &V2PlusCoordinator{ @@ -655,6 +659,7 @@ func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { ).Return(requestLogs, nil) lp.On( "LogsWithSigs", + mock.Anything, fromBlock, latest, []common.Hash{ diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 63f49ab8b2c..9442dbc6a18 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -332,10 +332,11 @@ func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error } filters = append(filters, relayFilters...) - + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for _, filter := range filters { d.lggr.Debugf("Unregistering %s filter", filter) - err = lp.UnregisterFilter(filter) + err = lp.UnregisterFilter(ctx, filter) if err != nil { return errors.Wrapf(err, "Failed to unregister filter %s", filter) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go index 79bfd86e9d2..f7f3fcd65b0 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go @@ -68,7 +68,9 @@ func NewLogProvider( // Add log filters for the log poller so that it can poll and find the logs that // we need. - err = logPoller.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: LogProviderFilterName(contract.Address()), EventSigs: []common.Hash{ registry.KeeperRegistryUpkeepPerformed{}.Topic(), @@ -151,6 +153,7 @@ func (c *LogProvider) PerformLogs(ctx context.Context) ([]ocr2keepers.PerformLog // always check the last lookback number of blocks and rebroadcast // this allows the plugin to make decisions based on event confirmations logs, err := c.logPoller.LogsWithSigs( + ctx, end.BlockNumber-c.lookbackBlocks, end.BlockNumber, []common.Hash{ @@ -193,6 +196,7 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR // ReorgedUpkeepReportLogs logs, err := c.logPoller.LogsWithSigs( + ctx, end.BlockNumber-c.lookbackBlocks, end.BlockNumber, []common.Hash{ @@ -210,6 +214,7 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR // StaleUpkeepReportLogs logs, err = c.logPoller.LogsWithSigs( + ctx, end.BlockNumber-c.lookbackBlocks, end.BlockNumber, []common.Hash{ @@ -227,6 +232,7 @@ func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleR // InsufficientFundsUpkeepReportLogs logs, err = c.logPoller.LogsWithSigs( + ctx, end.BlockNumber-c.lookbackBlocks, end.BlockNumber, []common.Hash{ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go index 0b60fba6b95..71d45fcb203 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -367,7 +367,10 @@ func (r *EvmRegistry) pollLogs() error { { var logs []logpoller.Log + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() if logs, err = r.poller.LogsWithSigs( + ctx, end.BlockNumber-logEventLookback, end.BlockNumber, upkeepStateEvents, @@ -391,7 +394,9 @@ func UpkeepFilterName(addr common.Address) string { func (r *EvmRegistry) registerEvents(chainID uint64, addr common.Address) error { // Add log filters for the log poller so that it can poll and find the logs that // we need - return r.poller.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return r.poller.RegisterFilter(ctx, logpoller.Filter{ Name: UpkeepFilterName(addr), EventSigs: append(upkeepStateEvents, upkeepActiveEvents...), Addresses: []common.Address{addr}, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index e8e8c3716ed..ef0490e2d8f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -377,7 +377,7 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ start = configUpdateBlock } // query logs based on contract address, event sig, and blocks - logs, err := p.poller.LogsWithSigs(start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr)) + logs, err := p.poller.LogsWithSigs(ctx, start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr)) if err != nil { // cancel limit reservation as we failed to get logs resv.Cancel() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go index a35200734eb..9109890392f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -114,12 +114,12 @@ func (p *logEventProvider) register(ctx context.Context, lpFilter logpoller.Filt if filterStoreHasFilter { // removing filter in case of an update so we can recreate it with updated values lggr.Debugw("Upserting upkeep filter") - err := p.poller.UnregisterFilter(lpFilter.Name) + err := p.poller.UnregisterFilter(ctx, lpFilter.Name) if err != nil { return fmt.Errorf("failed to upsert (unregister) upkeep filter %s: %w", ufilter.upkeepID.String(), err) } } - if err := p.poller.RegisterFilter(lpFilter); err != nil { + if err := p.poller.RegisterFilter(ctx, lpFilter); err != nil { return err } p.filterStore.AddActiveUpkeeps(ufilter) @@ -146,7 +146,9 @@ func (p *logEventProvider) register(ctx context.Context, lpFilter logpoller.Filt func (p *logEventProvider) UnregisterFilter(upkeepID *big.Int) error { // Filter might have been unregistered already, only try to unregister if it exists if p.poller.HasFilter(p.filterName(upkeepID)) { - if err := p.poller.UnregisterFilter(p.filterName(upkeepID)); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := p.poller.UnregisterFilter(ctx, p.filterName(upkeepID)); err != nil { return fmt.Errorf("failed to unregister upkeep filter %s: %w", upkeepID.String(), err) } } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go index c7d4495dc89..a63499152c1 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -259,7 +259,7 @@ func (r *logRecoverer) getLogTriggerCheckData(ctx context.Context, proposal ocr2 return nil, fmt.Errorf("log block %d is before the filter configUpdateBlock %d for upkeepID %s", logBlock, filter.configUpdateBlock, proposal.UpkeepID.String()) } - logs, err := r.poller.LogsWithSigs(logBlock-1, logBlock+1, filter.topics, common.BytesToAddress(filter.addr)) + logs, err := r.poller.LogsWithSigs(ctx, logBlock-1, logBlock+1, filter.topics, common.BytesToAddress(filter.addr)) if err != nil { return nil, fmt.Errorf("could not read logs: %w", err) } @@ -386,7 +386,7 @@ func (r *logRecoverer) recoverFilter(ctx context.Context, f upkeepFilter, startB end = offsetBlock } // we expect start to be > offsetBlock in any case - logs, err := r.poller.LogsWithSigs(start, end, f.topics, common.BytesToAddress(f.addr)) + logs, err := r.poller.LogsWithSigs(ctx, start, end, f.topics, common.BytesToAddress(f.addr)) if err != nil { return fmt.Errorf("could not read logs: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go index b07698d3172..54338207190 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -887,7 +887,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return nil, errors.New("logs with sigs boom") }, }, @@ -922,7 +922,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 80, @@ -970,7 +970,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 80, @@ -1021,7 +1021,7 @@ func TestLogRecoverer_GetProposalData(t *testing.T) { LatestBlockFn: func(ctx context.Context) (int64, error) { return 300, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{ { EvmChainId: ubig.New(big.NewInt(1)), @@ -1200,11 +1200,11 @@ func (s *mockFilterStore) Has(id *big.Int) bool { type mockLogPoller struct { logpoller.LogPoller LatestBlockFn func(ctx context.Context) (int64, error) - LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) + LogsWithSigsFn func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) } -func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { - return p.LogsWithSigsFn(start, end, eventSigs, address) +func (p *mockLogPoller) LogsWithSigs(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return p.LogsWithSigsFn(ctx, start, end, eventSigs, address) } func (p *mockLogPoller) LatestBlock(ctx context.Context) (logpoller.LogPollerBlock, error) { block, err := p.LatestBlockFn(ctx) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go index 4e55a646fe4..100acdc5dd3 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -338,11 +338,13 @@ func (r *EvmRegistry) refreshLogTriggerUpkeepsBatch(logTriggerIDs []*big.Int) er logTriggerHashes = append(logTriggerHashes, common.BigToHash(id)) } - unpausedLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + unpausedLogs, err := r.poller.IndexedLogs(ctx, iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } - configSetLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) + configSetLogs, err := r.poller.IndexedLogs(ctx, iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } @@ -420,7 +422,10 @@ func (r *EvmRegistry) pollUpkeepStateLogs() error { } var logs []logpoller.Log + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() if logs, err = r.poller.LogsWithSigs( + ctx, end.BlockNumber-logEventLookback, end.BlockNumber, upkeepStateEvents, @@ -503,7 +508,9 @@ func RegistryUpkeepFilterName(addr common.Address) string { // registerEvents registers upkeep state events from keeper registry on log poller func (r *EvmRegistry) registerEvents(_ uint64, addr common.Address) error { // Add log filters for the log poller so that it can poll and find the logs that we need - return r.poller.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return r.poller.RegisterFilter(ctx, logpoller.Filter{ Name: RegistryUpkeepFilterName(addr), EventSigs: upkeepStateEvents, Addresses: []common.Address{addr}, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go index 54e90ddc0b4..ea548249648 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go @@ -215,15 +215,15 @@ func TestRegistry_VerifyCheckBlock(t *testing.T) { type mockLogPoller struct { logpoller.LogPoller GetBlocksRangeFn func(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) - IndexedLogsFn func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) + IndexedLogsFn func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) } func (p *mockLogPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]logpoller.LogPollerBlock, error) { return p.GetBlocksRangeFn(ctx, numbers) } -func (p *mockLogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { - return p.IndexedLogsFn(eventSig, address, topicIndex, topicValues, confs) +func (p *mockLogPoller) IndexedLogs(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + return p.IndexedLogsFn(ctx, eventSig, address, topicIndex, topicValues, confs) } func TestRegistry_VerifyLogExists(t *testing.T) { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go index 263dcbc67c1..2ffb14b61ca 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -220,7 +220,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic()) { return nil, errors.New("indexed logs boom") } @@ -245,7 +245,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic()) { return nil, errors.New("indexed logs boom") } @@ -270,7 +270,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ {}, }, nil @@ -302,7 +302,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 1, @@ -356,7 +356,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, @@ -408,7 +408,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, @@ -462,7 +462,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { }, }, poller: &mockLogPoller{ - IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { + IndexedLogsFn: func(ctx context.Context, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations) ([]logpoller.Log, error) { return []logpoller.Log{ { BlockNumber: 2, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go index ea4a2f58d09..a627fdd0b1e 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go @@ -60,7 +60,9 @@ func NewTransmitEventProvider( if err != nil { return nil, err } - err = logPoller.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: EventProviderFilterName(contract.Address()), EventSigs: []common.Hash{ // These are the events that are emitted when a node transmits a report @@ -143,6 +145,7 @@ func (c *EventProvider) GetLatestEvents(ctx context.Context) ([]ocr2keepers.Tran // always check the last lookback number of blocks and rebroadcast // this allows the plugin to make decisions based on event confirmations logs, err := c.logPoller.LogsWithSigs( + ctx, end.BlockNumber-c.lookbackBlocks, end.BlockNumber, []common.Hash{ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go index a0009ae65c5..5d7b8d73d30 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go @@ -49,8 +49,8 @@ func NewPerformedEventsScanner( } } -func (s *performedEventsScanner) Start(_ context.Context) error { - return s.poller.RegisterFilter(logpoller.Filter{ +func (s *performedEventsScanner) Start(ctx context.Context) error { + return s.poller.RegisterFilter(ctx, logpoller.Filter{ Name: dedupFilterName(s.registryAddress), EventSigs: []common.Hash{ // listening to dedup key added event @@ -78,7 +78,7 @@ func (s *performedEventsScanner) ScanWorkIDs(ctx context.Context, workID ...stri end = len(ids) } batch := ids[i:end] - batchLogs, err := s.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), s.registryAddress, 1, batch, logpoller.Confirmations(s.finalityDepth)) + batchLogs, err := s.poller.IndexedLogs(ctx, iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), s.registryAddress, 1, batch, logpoller.Confirmations(s.finalityDepth)) if err != nil { return nil, fmt.Errorf("error fetching logs: %w", err) } diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go index 6f3696a4871..8c62872c6c8 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go @@ -182,7 +182,9 @@ func New( // Add log filters for the log poller so that it can poll and find the logs that // we need. - err = logPoller.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: filterName(beaconAddress, coordinatorAddress, dkgAddress), EventSigs: []common.Hash{ t.randomnessRequestedTopic, @@ -256,6 +258,7 @@ func (c *coordinator) ReportIsOnchain( c.lggr.Info(fmt.Sprintf("epoch and round: %s %s", epochAndRound.String(), enrTopic.String())) logs, err := c.lp.IndexedLogs( + ctx, c.topics.newTransmissionTopic, c.beaconAddress, 2, @@ -340,6 +343,7 @@ func (c *coordinator) ReportBlocks( c.lggr.Infow("current chain height", "currentHeight", currentHeight) logs, err := c.lp.LogsWithSigs( + ctx, int64(currentHeight-c.coordinatorConfig.LookbackBlocks), int64(currentHeight), []common.Hash{ @@ -908,6 +912,7 @@ func (c *coordinator) DKGVRFCommittees(ctx context.Context) (dkgCommittee, vrfCo defer c.logAndEmitFunctionDuration("DKGVRFCommittees", startTime) latestVRF, err := c.lp.LatestLogByEventSigWithConfs( + ctx, c.configSetTopic, c.beaconAddress, logpoller.Confirmations(c.finalityDepth), @@ -918,6 +923,7 @@ func (c *coordinator) DKGVRFCommittees(ctx context.Context) (dkgCommittee, vrfCo } latestDKG, err := c.lp.LatestLogByEventSigWithConfs( + ctx, c.configSetTopic, c.dkgAddress, logpoller.Confirmations(c.finalityDepth), diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go index 542e6a4c70a..63977295248 100644 --- a/core/services/relay/evm/config_poller.go +++ b/core/services/relay/evm/config_poller.go @@ -75,7 +75,9 @@ func NewConfigPoller(lggr logger.Logger, cfg CPConfig) (evmRelayTypes.ConfigPoll } func newConfigPoller(lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address, ld LogDecoder) (*configPoller, error) { - err := destChainPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(aggregatorContractAddr), EventSigs: []common.Hash{ld.EventSig()}, Addresses: []common.Address{aggregatorContractAddr}}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: configPollerFilterName(aggregatorContractAddr), EventSigs: []common.Hash{ld.EventSig()}, Addresses: []common.Address{aggregatorContractAddr}}) if err != nil { return nil, err } @@ -124,7 +126,7 @@ func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(cp.ld.EventSig(), cp.aggregatorContractAddr, 1) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ctx, cp.ld.EventSig(), cp.aggregatorContractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { if cp.isConfigStoreAvailable() { @@ -145,7 +147,7 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), cp.ld.EventSig(), cp.aggregatorContractAddr) + lgs, err := cp.destChainLogPoller.Logs(ctx, int64(changedInBlock), int64(changedInBlock), cp.ld.EventSig(), cp.aggregatorContractAddr) if err != nil { return ocrtypes.ContractConfig{}, err } diff --git a/core/services/relay/evm/contract_transmitter.go b/core/services/relay/evm/contract_transmitter.go index 1d0d1753dfd..ad673236f65 100644 --- a/core/services/relay/evm/contract_transmitter.go +++ b/core/services/relay/evm/contract_transmitter.go @@ -68,7 +68,9 @@ func NewOCRContractTransmitter( return nil, errors.New("invalid ABI, missing transmitted") } - err := lp.RegisterFilter(logpoller.Filter{Name: transmitterFilterName(address), EventSigs: []common.Hash{transmitted.ID}, Addresses: []common.Address{address}}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := lp.RegisterFilter(ctx, logpoller.Filter{Name: transmitterFilterName(address), EventSigs: []common.Hash{transmitted.ID}, Addresses: []common.Address{address}}) if err != nil { return nil, err } @@ -180,8 +182,7 @@ func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) ( if err != nil { return ocrtypes.ConfigDigest{}, 0, err } - latest, err := oc.lp.LatestLogByEventSigWithConfs( - oc.transmittedEventSig, oc.contractAddress, 1) + latest, err := oc.lp.LatestLogByEventSigWithConfs(ctx, oc.transmittedEventSig, oc.contractAddress, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { // No transmissions yet diff --git a/core/services/relay/evm/event_binding.go b/core/services/relay/evm/event_binding.go index b7148348e4b..6124df55475 100644 --- a/core/services/relay/evm/event_binding.go +++ b/core/services/relay/evm/event_binding.go @@ -52,7 +52,9 @@ func (e *eventBinding) Register() error { return nil } - if err := e.lp.RegisterFilter(logpoller.Filter{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := e.lp.RegisterFilter(ctx, logpoller.Filter{ Name: e.id, EventSigs: evmtypes.HashArray{e.hash}, Addresses: evmtypes.AddressArray{e.address}, @@ -70,7 +72,9 @@ func (e *eventBinding) Unregister() error { return nil } - if err := e.lp.UnregisterFilter(e.id); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := e.lp.UnregisterFilter(ctx, e.id); err != nil { return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) } return nil @@ -109,7 +113,7 @@ func (e *eventBinding) Bind(binding commontypes.BoundContract) error { } func (e *eventBinding) getLatestValueWithoutFilters(ctx context.Context, confs logpoller.Confirmations, into any) error { - log, err := e.lp.LatestLogByEventSigWithConfs(e.hash, e.address, confs) + log, err := e.lp.LatestLogByEventSigWithConfs(ctx, e.hash, e.address, confs) if err = wrapInternalErr(err); err != nil { return err } @@ -142,7 +146,7 @@ func (e *eventBinding) getLatestValueWithFilters( fai := filtersAndIndices[0] remainingFilters := filtersAndIndices[1:] - logs, err := e.lp.IndexedLogs(e.hash, e.address, 1, []common.Hash{fai}, confs) + logs, err := e.lp.IndexedLogs(ctx, e.hash, e.address, 1, []common.Hash{fai}, confs) if err != nil { return wrapInternalErr(err) } diff --git a/core/services/relay/evm/functions/config_poller.go b/core/services/relay/evm/functions/config_poller.go index d4d8d12df30..8d2d7257db3 100644 --- a/core/services/relay/evm/functions/config_poller.go +++ b/core/services/relay/evm/functions/config_poller.go @@ -135,7 +135,7 @@ func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock return 0, ocrtypes.ConfigDigest{}, nil } - latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, *contractAddr, 1) + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ctx, ConfigSet, *contractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { return 0, ocrtypes.ConfigDigest{}, nil @@ -157,7 +157,7 @@ func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) return ocrtypes.ContractConfig{}, errors.New("no target contract address set yet") } - lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, *contractAddr) + lgs, err := cp.destChainLogPoller.Logs(ctx, int64(changedInBlock), int64(changedInBlock), ConfigSet, *contractAddr) if err != nil { return ocrtypes.ContractConfig{}, err } @@ -187,11 +187,13 @@ func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint func (cp *configPoller) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { cp.targetContract.Store(&activeCoordinator) // Register filters for both active and proposed - err := cp.destChainLogPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(activeCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := cp.destChainLogPoller.RegisterFilter(ctx, logpoller.Filter{Name: configPollerFilterName(activeCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) if err != nil { return err } - err = cp.destChainLogPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(proposedCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) + err = cp.destChainLogPoller.RegisterFilter(ctx, logpoller.Filter{Name: configPollerFilterName(proposedCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) if err != nil { return err } diff --git a/core/services/relay/evm/functions/contract_transmitter.go b/core/services/relay/evm/functions/contract_transmitter.go index 76b03afabda..352240233b8 100644 --- a/core/services/relay/evm/functions/contract_transmitter.go +++ b/core/services/relay/evm/functions/contract_transmitter.go @@ -227,8 +227,7 @@ func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) ( if err != nil { return ocrtypes.ConfigDigest{}, 0, err } - latest, err := oc.lp.LatestLogByEventSigWithConfs( - oc.transmittedEventSig, *contractAddr, 1) + latest, err := oc.lp.LatestLogByEventSigWithConfs(ctx, oc.transmittedEventSig, *contractAddr, 1) if err != nil { if errors.Is(err, sql.ErrNoRows) { // No transmissions yet @@ -261,7 +260,9 @@ func (oc *contractTransmitter) UpdateRoutes(activeCoordinator common.Address, pr return nil } oc.lggr.Debugw("FunctionsContractTransmitter: updating routes", "previousContract", previousContract, "activeCoordinator", activeCoordinator) - err := oc.lp.RegisterFilter(logpoller.Filter{Name: transmitterFilterName(activeCoordinator), EventSigs: []common.Hash{oc.transmittedEventSig}, Addresses: []common.Address{activeCoordinator}}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := oc.lp.RegisterFilter(ctx, logpoller.Filter{Name: transmitterFilterName(activeCoordinator), EventSigs: []common.Hash{oc.transmittedEventSig}, Addresses: []common.Address{activeCoordinator}}) if err != nil { return err } diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index 71036d819d7..687d44c3578 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -171,9 +171,12 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR return resultsReq, resultsResp, errors.New("no non-zero coordinators to check") } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, coordinator := range coordinators { requestEndBlock := latestBlockNum - l.requestBlockOffset - requestLogs, err := l.logPoller.Logs(startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator) + requestLogs, err := l.logPoller.Logs(ctx, startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator) if err != nil { l.lggr.Errorw("LatestEvents: fetching request logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", requestEndBlock) return nil, nil, err @@ -181,7 +184,7 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR l.lggr.Debugw("LatestEvents: fetched request logs", "nRequestLogs", len(requestLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", requestEndBlock) requestLogs = l.filterPreviouslyDetectedEvents(requestLogs, &l.detectedRequests, "requests") responseEndBlock := latestBlockNum - l.responseBlockOffset - responseLogs, err := l.logPoller.Logs(startBlockNum, responseEndBlock, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator) + responseLogs, err := l.logPoller.Logs(ctx, startBlockNum, responseEndBlock, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator) if err != nil { l.lggr.Errorw("LatestEvents: fetching response logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", responseEndBlock) return nil, nil, err @@ -428,7 +431,10 @@ func (l *logPollerWrapper) registerFilters(coordinatorAddress common.Address) er if (coordinatorAddress == common.Address{}) { return nil } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() return l.logPoller.RegisterFilter( + ctx, logpoller.Filter{ Name: filterName(coordinatorAddress), EventSigs: []common.Hash{ diff --git a/core/services/relay/evm/mercury/config_poller.go b/core/services/relay/evm/mercury/config_poller.go index 78ce76e89b3..1501db1337a 100644 --- a/core/services/relay/evm/mercury/config_poller.go +++ b/core/services/relay/evm/mercury/config_poller.go @@ -98,7 +98,9 @@ func FilterName(addr common.Address, feedID common.Hash) string { // NewConfigPoller creates a new Mercury ConfigPoller func NewConfigPoller(lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) { - err := destChainPoller.RegisterFilter(logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}}) if err != nil { return nil, err } @@ -131,7 +133,7 @@ func (cp *ConfigPoller) Replay(ctx context.Context, fromBlock int64) error { // LatestConfigDetails returns the latest config details from the logs func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { cp.lggr.Debugw("LatestConfigDetails", "eventSig", FeedScopedConfigSet, "addr", cp.addr, "topicIndex", feedIdTopicIndex, "feedID", cp.feedId) - logs, err := cp.destChainLogPoller.IndexedLogs(FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1) + logs, err := cp.destChainLogPoller.IndexedLogs(ctx, FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1) if err != nil { return 0, ocrtypes.ConfigDigest{}, err } @@ -148,7 +150,7 @@ func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock // LatestConfig returns the latest config from the logs on a certain block func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { - lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}) + lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(ctx, int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}) if err != nil { return ocrtypes.ContractConfig{}, err } diff --git a/core/services/vrf/v2/listener_v2_log_listener.go b/core/services/vrf/v2/listener_v2_log_listener.go index bc53f8aa400..e495eac5d8b 100644 --- a/core/services/vrf/v2/listener_v2_log_listener.go +++ b/core/services/vrf/v2/listener_v2_log_listener.go @@ -40,7 +40,7 @@ func (lsn *listenerV2) runLogListener( lsn.l.Debugw("log listener loop") // Filter registration is idempotent, so we can just call it every time // and retry on errors using the ticker. - err := lsn.chain.LogPoller().RegisterFilter(logpoller.Filter{ + err := lsn.chain.LogPoller().RegisterFilter(ctx, logpoller.Filter{ Name: logpoller.FilterName( "VRFListener", "version", lsn.coordinator.Version(), @@ -130,6 +130,7 @@ func (lsn *listenerV2) initializeLastProcessedBlock(ctx context.Context) (lastPr // get randomness requested logs with the appropriate keyhash // keyhash is specified in topic1 requests, err := lp.IndexedLogsCreatedAfter( + ctx, lsn.coordinator.RandomWordsRequestedTopic(), // event sig lsn.coordinator.Address(), // address 1, // topic index @@ -144,6 +145,7 @@ func (lsn *listenerV2) initializeLastProcessedBlock(ctx context.Context) (lastPr // fulfillments don't have keyhash indexed, we'll have to get all of them // TODO: can we instead write a single query that joins on request id's somehow? fulfillments, err := lp.LogsCreatedAfter( + ctx, lsn.coordinator.RandomWordsFulfilledTopic(), // event sig lsn.coordinator.Address(), // address fromTimestamp, // from time @@ -186,6 +188,7 @@ func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastPro }() logs, err := lp.LogsWithSigs( + ctx, currLastProcessedBlock, latestBlock.FinalizedBlockNumber, []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, @@ -244,6 +247,7 @@ func (lsn *listenerV2) pollLogs(ctx context.Context, minConfs uint32, lastProces // We don't specify confs because each request can have a different conf above // the minimum. So we do all conf handling in getConfirmedAt. logs, err := lp.LogsWithSigs( + ctx, lastProcessedBlock, latestBlock.BlockNumber, []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go index c21af8db14c..9aeaa5a7ed6 100644 --- a/core/services/vrf/v2/listener_v2_log_listener_test.go +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -125,7 +125,7 @@ func setupVRFLogPollerListenerTH(t *testing.T, // Filter registration is idempotent, so we can just call it every time // and retry on errors using the ticker. - err = lp.RegisterFilter(logpoller.Filter{ + err = lp.RegisterFilter(ctx, logpoller.Filter{ Name: fmt.Sprintf("vrf_%s_keyhash_%s_job_%d", "v2", listener.job.VRFSpec.PublicKey.MustHash().String(), listener.job.ID), EventSigs: evmtypes.HashArray{ vrf_log_emitter.VRFLogEmitterRandomWordsRequested{}.Topic(), @@ -137,7 +137,7 @@ func setupVRFLogPollerListenerTH(t *testing.T, }, }) require.Nil(t, err) - require.NoError(t, lp.RegisterFilter(logpoller.Filter{ + require.NoError(t, lp.RegisterFilter(ctx, logpoller.Filter{ Name: "Integration test", EventSigs: []common.Hash{emitterABI.Events["Log1"].ID}, Addresses: []common.Address{emitterAddress1}, @@ -210,7 +210,7 @@ func TestInitProcessedBlock_NoVRFReqs(t *testing.T) { require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) // Should return logs from block 5 to 7 (inclusive) - logs, err := th.LogPoller.Logs(4, 7, emitterABI.Events["Log1"].ID, th.EmitterAddress) + logs, err := th.LogPoller.Logs(testutils.Context(t), 4, 7, emitterABI.Events["Log1"].ID, th.EmitterAddress) require.NoError(t, err) require.Equal(t, 3, len(logs)) diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index 111c198b7e0..b94f99688f4 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -1,6 +1,7 @@ package web import ( + "context" "math/big" "net/http" @@ -91,7 +92,9 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { // handle same as non-existent chain id return nil } - return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return chain.LogPoller().UnregisterFilter(ctx, forwarders.FilterName(addr)) } orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) From 76910bbae4a5d1ec58f908935e33bef899ab3143 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 14:18:28 -0500 Subject: [PATCH 18/65] Use testutils for context --- core/chains/evm/logpoller/log_poller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 3f52b69c0e6..3f86ae846d4 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -49,7 +49,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") startDate := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) - ctx := context.Background() + ctx := testutils.Context(t) for j := 1; j < 100; j++ { var logs []logpoller.Log @@ -86,7 +86,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo func BenchmarkSelectLogsCreatedAfter(b *testing.B) { chainId := big.NewInt(137) - ctx := context.Background() + ctx := testutils.Context(b) _, db := heavyweight.FullTestDBV2(b, nil) o := logpoller.NewORM(chainId, db, logger.Test(b)) event, address, _ := populateDatabase(b, o, chainId) From 2e6da0269b038326ca2f80c62c616fb9b7c47dd0 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 14:24:12 -0500 Subject: [PATCH 19/65] Use testutils context --- core/chains/evm/logpoller/orm_test.go | 41 +++++++++++++-------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 26ea4e559de..dc187e59de0 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -2,7 +2,6 @@ package logpoller_test import ( "bytes" - "context" "database/sql" "fmt" "math" @@ -67,7 +66,7 @@ func GenLogWithData(chainID *big.Int, address common.Address, eventSig common.Ha func TestLogPoller_Batching(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := testutils.Context(t) th := SetupTH(t, false, 2, 3, 2, 1000) var logs []logpoller.Log // Inserts are limited to 65535 parameters. A log being 10 parameters this results in @@ -86,7 +85,7 @@ func TestLogPoller_Batching(t *testing.T) { func TestORM_GetBlocks_From_Range(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) // Insert many blocks and read them back together blocks := []block{ { @@ -142,7 +141,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) // Insert many blocks and read them back together var recentBlocks []block for i := 1; i <= 256; i++ { @@ -176,7 +175,7 @@ func TestORM(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM o2 := th.ORM2 - ctx := context.Background() + ctx := testutils.Context(t) // Insert and read back a block. require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 10, time.Now(), 0)) b, err := o1.SelectBlockByHash(ctx, common.HexToHash("0x1234")) @@ -466,13 +465,13 @@ func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbOR Data: []byte("hello"), }) } - require.NoError(t, o.InsertLogs(context.Background(), lgs)) + require.NoError(t, o.InsertLogs(testutils.Context(t), lgs)) } func TestORM_IndexedLogs(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) @@ -534,7 +533,7 @@ func TestORM_IndexedLogs(t *testing.T) { func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { th := SetupTH(t, false, 0, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) eventSig := common.HexToHash("0x1599") txHash := common.HexToHash("0x1888") addr := common.HexToAddress("0x1234") @@ -601,7 +600,7 @@ func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { func TestORM_DataWords(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1"), 1, time.Now(), 0)) @@ -665,7 +664,7 @@ func TestORM_DataWords(t *testing.T) { func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) // Insert logs on different topics, should be able to read them // back using SelectLogsWithSigs and specifying @@ -760,7 +759,7 @@ func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { func TestORM_DeleteBlocksBefore(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) o1 := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 1, time.Now(), 0)) require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 2, time.Now(), 0)) require.NoError(t, o1.DeleteBlocksBefore(ctx, 1)) @@ -783,7 +782,7 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { func TestLogPoller_Logs(t *testing.T) { t.Parallel() th := SetupTH(t, false, 2, 3, 2, 1000) - ctx := context.Background() + ctx := testutils.Context(t) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") @@ -833,7 +832,7 @@ func TestLogPoller_Logs(t *testing.T) { func BenchmarkLogs(b *testing.B) { th := SetupTH(b, false, 2, 3, 2, 1000) o := th.ORM - ctx := context.Background() + ctx := testutils.Context(b) var lgs []logpoller.Log addr := common.HexToAddress("0x1234") for i := 0; i < 10_000; i++ { @@ -862,7 +861,7 @@ func BenchmarkLogs(b *testing.B) { func TestSelectLogsWithSigsExcluding(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) orm := th.ORM - ctx := context.Background() + ctx := testutils.Context(t) addressA := common.HexToAddress("0x11111") addressB := common.HexToAddress("0x22222") addressC := common.HexToAddress("0x33333") @@ -1108,7 +1107,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) - ctx := context.Background() + ctx := testutils.Context(t) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := utils.RandomAddress() @@ -1206,7 +1205,7 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { func TestSelectLogsCreatedAfter(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) - ctx := context.Background() + ctx := testutils.Context(t) event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() @@ -1311,7 +1310,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { func TestNestedLogPollerBlocksQuery(t *testing.T) { th := SetupTH(t, false, 2, 3, 2, 1000) - ctx := context.Background() + ctx := testutils.Context(t) event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() @@ -1342,7 +1341,7 @@ func TestInsertLogsWithBlock(t *testing.T) { chainID := testutils.NewRandomEVMChainID() event := utils.RandomBytes32() address := utils.RandomAddress() - ctx := context.Background() + ctx := testutils.Context(t) // We need full db here, because we want to test transaction rollbacks. // Using pgtest.NewSqlxDB(t) will run all tests in TXs which is not desired for this type of test @@ -1421,7 +1420,7 @@ func TestInsertLogsInTx(t *testing.T) { event := utils.RandomBytes32() address := utils.RandomAddress() maxLogsSize := 9000 - ctx := context.Background() + ctx := testutils.Context(t) // We need full db here, because we want to test transaction rollbacks. _, db := heavyweight.FullTestDBV2(t, nil) @@ -1471,7 +1470,7 @@ func TestInsertLogsInTx(t *testing.T) { } func TestSelectLogsDataWordBetween(t *testing.T) { - ctx := context.Background() + ctx := testutils.Context(t) address := utils.RandomAddress() eventSig := utils.RandomBytes32() th := SetupTH(t, false, 2, 3, 2, 1000) @@ -1537,7 +1536,7 @@ func Benchmark_LogsDataWordBetween(b *testing.B) { chainId := big.NewInt(137) _, db := heavyweight.FullTestDBV2(b, nil) o := logpoller.NewORM(chainId, db, logger.Test(b)) - ctx := context.Background() + ctx := testutils.Context(b) numberOfReports := 100_000 numberOfMessagesPerReport := 256 From 76ab35c1da98eb07892cfdcada31452aa4dbfd67 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 14:37:43 -0500 Subject: [PATCH 20/65] Use testutils context --- core/chains/evm/logpoller/log_poller_internal_test.go | 4 ++-- core/chains/evm/logpoller/orm.go | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 011bdc05789..a35e01d7af6 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -126,7 +126,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1") require.NoError(t, err) assert.Len(t, lp.filters, 0) - filters, err := lp.orm.LoadFilters(lp.ctx) + filters, err := lp.orm.LoadFilters(testutils.Context(t)) require.NoError(t, err) assert.Len(t, filters, 0) @@ -262,7 +262,7 @@ func TestLogPoller_Replay(t *testing.T) { // process 1 log in block 3 lp.PollAndSaveLogs(testutils.Context(t), 4) - latest, err := lp.LatestBlock(lp.ctx) + latest, err := lp.LatestBlock(testutils.Context(t)) require.NoError(t, err) require.Equal(t, int64(4), latest.BlockNumber) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 2c79e8ed326..296abfcdfc6 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -7,8 +7,6 @@ import ( "math/big" "time" - "github.com/lib/pq" - "github.com/ethereum/go-ethereum/common" "github.com/jmoiron/sqlx" "github.com/pkg/errors" @@ -674,7 +672,7 @@ type bytesProducer interface { Bytes() []byte } -func concatBytes[T bytesProducer](byteSlice []T) pq.ByteaArray { +func concatBytes[T bytesProducer](byteSlice []T) [][]byte { var output [][]byte for _, b := range byteSlice { output = append(output, b.Bytes()) From e55455e5d3209fdc180f13b3631439eec853036d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 14:39:41 -0500 Subject: [PATCH 21/65] Use ctx --- .../plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index ef0490e2d8f..14b640c870a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -195,7 +195,7 @@ func (p *logEventProvider) ReadLogs(pctx context.Context, ids ...*big.Int) error ctx, cancel := context.WithTimeout(pctx, readLogsTimeout) defer cancel() - latest, err := p.poller.LatestBlock(pctx) + latest, err := p.poller.LatestBlock(ctx) if err != nil { return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) } From 3183b2105980df7d9b57d4ab851b791f706e7f83 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 15:45:38 -0500 Subject: [PATCH 22/65] Refactor forwarder ORM --- .../evm/forwarders/forwarder_manager.go | 10 ++- .../evm/forwarders/forwarder_manager_test.go | 25 +++--- core/chains/evm/forwarders/orm.go | 79 ++++++++++--------- core/chains/evm/forwarders/orm_test.go | 20 ++--- core/chains/evm/txmgr/builder.go | 2 +- core/chains/evm/txmgr/txmgr_test.go | 4 +- core/cmd/ocr2vrf_configure_commands.go | 4 +- core/internal/features/features_test.go | 4 +- .../features/ocr2/features_ocr2_test.go | 4 +- core/services/keeper/integration_test.go | 4 +- .../plugins/ocr2keeper/integration_test.go | 8 +- .../internal/ocr2vrf_integration_test.go | 5 +- core/web/evm_forwarders_controller.go | 26 +++--- 13 files changed, 108 insertions(+), 87 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index cabedf79aee..4d0257e87e2 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -56,13 +56,13 @@ type FwdMgr struct { wg sync.WaitGroup } -func NewFwdMgr(db *sqlx.DB, client evmclient.Client, logpoller evmlogpoller.LogPoller, l logger.Logger, cfg Config, dbConfig pg.QConfig) *FwdMgr { +func NewFwdMgr(db *sqlx.DB, client evmclient.Client, logpoller evmlogpoller.LogPoller, l logger.Logger, cfg Config) *FwdMgr { lggr := logger.Sugared(logger.Named(l, "EVMForwarderManager")) fwdMgr := FwdMgr{ logger: lggr, cfg: cfg, evmClient: client, - ORM: NewORM(db, lggr, dbConfig), + ORM: NewORM(db), logpoller: logpoller, sendersCache: make(map[common.Address][]common.Address), } @@ -80,7 +80,7 @@ func (f *FwdMgr) Start(ctx context.Context) error { f.logger.Debug("Initializing EVM forwarder manager") chainId := f.evmClient.ConfiguredChainID() - fwdrs, err := f.ORM.FindForwardersByChain(big.Big(*chainId)) + fwdrs, err := f.ORM.FindForwardersByChain(ctx, big.Big(*chainId)) if err != nil { return errors.Wrapf(err, "Failed to retrieve forwarders for chain %d", chainId) } @@ -113,7 +113,9 @@ func FilterName(addr common.Address) string { func (f *FwdMgr) ForwarderFor(addr common.Address) (forwarder common.Address, err error) { // Gets forwarders for current chain. - fwdrs, err := f.ORM.FindForwardersByChain(big.Big(*f.evmClient.ConfiguredChainID())) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fwdrs, err := f.ORM.FindForwardersByChain(ctx, big.Big(*f.evmClient.ConfiguredChainID())) if err != nil { return common.Address{}, err } diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index 5ef150aa5c3..a602a5c823f 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -26,7 +28,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var GetAuthorisedSendersABI = evmtypes.MustGetABI(authorized_receiver.AuthorizedReceiverABI).Methods["getAuthorizedSenders"] @@ -39,6 +40,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { cfg := configtest.NewTestGeneralConfig(t) evmcfg := evmtest.NewChainScopedConfig(t, cfg) owner := testutils.MustNewSimTransactor(t) + ctx := testutils.Context(t) ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ owner.From: { @@ -61,12 +63,12 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) - fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) - fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) + fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM()) + fwdMgr.ORM = forwarders.NewORM(db) - fwd, err := fwdMgr.ORM.CreateForwarder(forwarderAddr, ubig.Big(*testutils.FixtureChainID)) + fwd, err := fwdMgr.ORM.CreateForwarder(ctx, forwarderAddr, ubig.Big(*testutils.FixtureChainID)) require.NoError(t, err) - lst, err := fwdMgr.ORM.FindForwardersByChain(ubig.Big(*testutils.FixtureChainID)) + lst, err := fwdMgr.ORM.FindForwardersByChain(ctx, ubig.Big(*testutils.FixtureChainID)) require.NoError(t, err) require.Equal(t, len(lst), 1) require.Equal(t, lst[0].Address, forwarderAddr) @@ -79,7 +81,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { require.NoError(t, err) cleanupCalled := false - cleanup := func(tx pg.Queryer, evmChainId int64, addr common.Address) error { + cleanup := func(tx sqlutil.Queryer, evmChainId int64, addr common.Address) error { require.Equal(t, testutils.FixtureChainID.Int64(), evmChainId) require.Equal(t, forwarderAddr, addr) require.NotNil(t, tx) @@ -87,7 +89,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { return nil } - err = fwdMgr.ORM.DeleteForwarder(fwd.ID, cleanup) + err = fwdMgr.ORM.DeleteForwarder(ctx, fwd.ID, cleanup) assert.NoError(t, err) assert.True(t, cleanupCalled) } @@ -95,6 +97,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { lggr := logger.Test(t) db := pgtest.NewSqlxDB(t) + ctx := testutils.Context(t) cfg := configtest.NewTestGeneralConfig(t) evmcfg := evmtest.NewChainScopedConfig(t, cfg) owner := testutils.MustNewSimTransactor(t) @@ -114,12 +117,12 @@ func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) - fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) - fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) + fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM()) + fwdMgr.ORM = forwarders.NewORM(db) - _, err = fwdMgr.ORM.CreateForwarder(forwarderAddr, ubig.Big(*testutils.FixtureChainID)) + _, err = fwdMgr.ORM.CreateForwarder(ctx, forwarderAddr, ubig.Big(*testutils.FixtureChainID)) require.NoError(t, err) - lst, err := fwdMgr.ORM.FindForwardersByChain(ubig.Big(*testutils.FixtureChainID)) + lst, err := fwdMgr.ORM.FindForwardersByChain(ctx, ubig.Big(*testutils.FixtureChainID)) require.NoError(t, err) require.Equal(t, len(lst), 1) require.Equal(t, lst[0].Address, forwarderAddr) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index 2a455360190..e6ce262ff8f 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -1,105 +1,110 @@ package forwarders import ( + "context" "database/sql" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/ethereum/go-ethereum/common" "github.com/jmoiron/sqlx" "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) //go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore type ORM interface { - CreateForwarder(addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) - FindForwarders(offset, limit int) ([]Forwarder, int, error) - FindForwardersByChain(evmChainId big.Big) ([]Forwarder, error) - DeleteForwarder(id int64, cleanup func(tx pg.Queryer, evmChainId int64, addr common.Address) error) error - FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) + CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) + FindForwarders(ctx context.Context, offset, limit int) ([]Forwarder, int, error) + FindForwardersByChain(ctx context.Context, evmChainId big.Big) ([]Forwarder, error) + DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainId int64, addr common.Address) error) error + FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) } -type orm struct { - q pg.Q +type DbORM struct { + db sqlutil.Queryer } -var _ ORM = (*orm)(nil) +var _ ORM = &DbORM{} + +func NewORM(db sqlutil.Queryer) *DbORM { + return &DbORM{db: db} +} -func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *orm { - return &orm{pg.NewQ(db, lggr, cfg)} +func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { + return sqlutil.Transact(ctx, o.new, o.db, nil, fn) } +// new returns a NewORM like o, but backed by q. +func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(q) } + // CreateForwarder creates the Forwarder address associated with the current EVM chain id. -func (o *orm) CreateForwarder(addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { +func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { sql := `INSERT INTO evm.forwarders (address, evm_chain_id, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` - err = o.q.Get(&fwd, sql, addr, evmChainId) + err = o.db.GetContext(ctx, &fwd, sql, addr, evmChainId) return fwd, err } // DeleteForwarder removes a forwarder address. // If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically // on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. -func (o *orm) DeleteForwarder(id int64, cleanup func(tx pg.Queryer, evmChainID int64, addr common.Address) error) (err error) { +func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { var dest struct { EvmChainId int64 Address common.Address } var rowsAffected int64 - err = o.q.Transaction(func(tx pg.Queryer) error { - err = tx.Get(&dest, `SELECT evm_chain_id, address FROM evm.forwarders WHERE id = $1`, id) + return o.Transaction(ctx, func(orm *DbORM) error { + err := orm.db.GetContext(ctx, &dest, `SELECT evm_chain_id, address FROM evm.forwarders WHERE id = $1`, id) if err != nil { return err } if cleanup != nil { - if err = cleanup(tx, dest.EvmChainId, dest.Address); err != nil { + if err = cleanup(orm.db, dest.EvmChainId, dest.Address); err != nil { return err } } - result, err2 := o.q.Exec(`DELETE FROM evm.forwarders WHERE id = $1`, id) + result, err := orm.db.ExecContext(ctx, `DELETE FROM evm.forwarders WHERE id = $1`, id) // If the forwarder wasn't found, we still want to delete the filter. // In that case, the transaction must return nil, even though DeleteForwarder // will return sql.ErrNoRows - if err2 != nil && !errors.Is(err2, sql.ErrNoRows) { - return err2 + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return err } - rowsAffected, err2 = result.RowsAffected() - - return err2 + rowsAffected, err = result.RowsAffected() + if err == nil && rowsAffected == 0 { + err = sql.ErrNoRows + } + return err }) - - if err == nil && rowsAffected == 0 { - err = sql.ErrNoRows - } - return err } // FindForwarders returns all forwarder addresses from offset up until limit. -func (o *orm) FindForwarders(offset, limit int) (fwds []Forwarder, count int, err error) { +func (o *DbORM) FindForwarders(ctx context.Context, offset, limit int) (fwds []Forwarder, count int, err error) { sql := `SELECT count(*) FROM evm.forwarders` - if err = o.q.Get(&count, sql); err != nil { + if err = o.db.GetContext(ctx, &count, sql); err != nil { return } sql = `SELECT * FROM evm.forwarders ORDER BY created_at DESC, id DESC LIMIT $1 OFFSET $2` - if err = o.q.Select(&fwds, sql, limit, offset); err != nil { + if err = o.db.SelectContext(ctx, &fwds, sql, limit, offset); err != nil { return } return } // FindForwardersByChain returns all forwarder addresses for a chain. -func (o *orm) FindForwardersByChain(evmChainId big.Big) (fwds []Forwarder, err error) { +func (o *DbORM) FindForwardersByChain(ctx context.Context, evmChainId big.Big) (fwds []Forwarder, err error) { sql := `SELECT * FROM evm.forwarders where evm_chain_id = $1 ORDER BY created_at DESC, id DESC` - err = o.q.Select(&fwds, sql, evmChainId) + err = o.db.SelectContext(ctx, &fwds, sql, evmChainId) return } -func (o *orm) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { +func (o *DbORM) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { var fwdrs []Forwarder arg := map[string]interface{}{ @@ -124,8 +129,8 @@ func (o *orm) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Add return nil, errors.Wrap(err, "Failed to run sqlx.IN on query") } - query = o.q.Rebind(query) - err = o.q.Select(&fwdrs, query, args...) + query = o.db.Rebind(query) + err = o.db.SelectContext(ctx, &fwdrs, query, args...) if err != nil { return nil, errors.Wrap(err, "Failed to execute query") diff --git a/core/chains/evm/forwarders/orm_test.go b/core/chains/evm/forwarders/orm_test.go index e95ac3778c6..6293471184d 100644 --- a/core/chains/evm/forwarders/orm_test.go +++ b/core/chains/evm/forwarders/orm_test.go @@ -5,17 +5,17 @@ import ( "errors" "testing" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/jmoiron/sqlx" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" - - "github.com/jmoiron/sqlx" ) type TestORM struct { @@ -27,9 +27,8 @@ func setupORM(t *testing.T) *TestORM { t.Helper() var ( - db = pgtest.NewSqlxDB(t) - lggr = logger.Test(t) - orm = NewORM(db, lggr, pgtest.NewQConfig(true)) + db = pgtest.NewSqlxDB(t) + orm = NewORM(db) ) return &TestORM{ORM: orm, db: db} @@ -41,8 +40,9 @@ func Test_DeleteForwarder(t *testing.T) { orm := setupORM(t) addr := testutils.NewAddress() chainID := testutils.FixtureChainID + ctx := testutils.Context(t) - fwd, err := orm.CreateForwarder(addr, *big.New(chainID)) + fwd, err := orm.CreateForwarder(ctx, addr, *big.New(chainID)) require.NoError(t, err) assert.Equal(t, addr, fwd.Address) @@ -56,14 +56,14 @@ func Test_DeleteForwarder(t *testing.T) { rets := []error{ErrCleaningUp, nil, nil, ErrCleaningUp} expected := []error{ErrCleaningUp, nil, sql.ErrNoRows, sql.ErrNoRows} - testCleanupFn := func(q pg.Queryer, evmChainID int64, addr common.Address) error { + testCleanupFn := func(q sqlutil.Queryer, evmChainID int64, addr common.Address) error { require.Less(t, cleanupCalled, len(rets)) cleanupCalled++ return rets[cleanupCalled-1] } for _, expect := range expected { - err = orm.DeleteForwarder(fwd.ID, testCleanupFn) + err = orm.DeleteForwarder(ctx, fwd.ID, testCleanupFn) assert.ErrorIs(t, err, expect) } assert.Equal(t, 2, cleanupCalled) diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go index f0cbcbf8d92..d6b5a59e7de 100644 --- a/core/chains/evm/txmgr/builder.go +++ b/core/chains/evm/txmgr/builder.go @@ -37,7 +37,7 @@ func NewTxm( var fwdMgr FwdMgr if txConfig.ForwardersEnabled() { - fwdMgr = forwarders.NewFwdMgr(db, client, logPoller, lggr, chainConfig, dbConfig) + fwdMgr = forwarders.NewFwdMgr(db, client, logPoller, lggr, chainConfig) } else { lggr.Info("EvmForwarderManager: Disabled") } diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 0e28f2948ee..3f4b4563cbb 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -302,9 +302,9 @@ func TestTxm_CreateTransaction(t *testing.T) { evmConfig.MaxQueued = uint64(1) // Create mock forwarder, mock authorizedsenders call. - form := forwarders.NewORM(db, logger.Test(t), cfg.Database()) + form := forwarders.NewORM(db) fwdrAddr := testutils.NewAddress() - fwdr, err := form.CreateForwarder(fwdrAddr, ubig.Big(cltest.FixtureChainID)) + fwdr, err := form.CreateForwarder(testutils.Context(t), fwdrAddr, ubig.Big(cltest.FixtureChainID)) require.NoError(t, err) require.Equal(t, fwdr.Address, fwdrAddr) diff --git a/core/cmd/ocr2vrf_configure_commands.go b/core/cmd/ocr2vrf_configure_commands.go index 06f26ddb6a4..fc9f2444e40 100644 --- a/core/cmd/ocr2vrf_configure_commands.go +++ b/core/cmd/ocr2vrf_configure_commands.go @@ -342,8 +342,8 @@ func (s *Shell) authorizeForwarder(c *cli.Context, db *sqlx.DB, lggr logger.Logg } // Create forwarder for management in forwarder_manager.go. - orm := forwarders.NewORM(db, lggr, s.Config.Database()) - _, err = orm.CreateForwarder(common.HexToAddress(forwarderAddress), *ubig.NewI(chainID)) + orm := forwarders.NewORM(db) + _, err = orm.CreateForwarder(ctx, common.HexToAddress(forwarderAddress), *ubig.NewI(chainID)) if err != nil { return err } diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go index 1c4d097d633..aa8274aaa4f 100644 --- a/core/internal/features/features_test.go +++ b/core/internal/features/features_test.go @@ -773,9 +773,9 @@ func setupForwarderEnabledNode(t *testing.T, owner *bind.TransactOpts, portV2 in b.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + forwarderORM := forwarders.NewORM(app.GetSqlxDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) - _, err = forwarderORM.CreateForwarder(forwarder, chainID) + _, err = forwarderORM.CreateForwarder(testutils.Context(t), forwarder, chainID) require.NoError(t, err) return app, p2pKey.PeerID().Raw(), transmitter, forwarder, key diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go index 938b7aa2a66..0b53d05d48f 100644 --- a/core/internal/features/ocr2/features_ocr2_test.go +++ b/core/internal/features/ocr2/features_ocr2_test.go @@ -171,9 +171,9 @@ func setupNodeOCR2( b.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + forwarderORM := forwarders.NewORM(app.GetSqlxDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) - _, err2 = forwarderORM.CreateForwarder(faddr, chainID) + _, err2 = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err2) effectiveTransmitter = faddr diff --git a/core/services/keeper/integration_test.go b/core/services/keeper/integration_test.go index af95788029f..e92d2c2a58f 100644 --- a/core/services/keeper/integration_test.go +++ b/core/services/keeper/integration_test.go @@ -413,9 +413,9 @@ func TestKeeperForwarderEthIntegration(t *testing.T) { app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, backend.Backend(), nodeKey) require.NoError(t, app.Start(testutils.Context(t))) - forwarderORM := forwarders.NewORM(db, logger.TestLogger(t), config.Database()) + forwarderORM := forwarders.NewORM(db) chainID := ubig.Big(*backend.ConfiguredChainID()) - _, err = forwarderORM.CreateForwarder(fwdrAddress, chainID) + _, err = forwarderORM.CreateForwarder(testutils.Context(t), fwdrAddress, chainID) require.NoError(t, err) addr, err := app.GetRelayers().LegacyEVMChains().Slice()[0].TxManager().GetForwarderForEOA(nodeAddress) diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go index 56467c60abb..b8a64507d43 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -13,6 +13,8 @@ import ( "testing" "time" + "golang.org/x/net/context" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -438,9 +440,11 @@ func setupForwarderForNode( backend.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), app.GetConfig().Database()) + forwarderORM := forwarders.NewORM(app.GetSqlxDB()) chainID := ubig.Big(*backend.Blockchain().Config().ChainID) - _, err = forwarderORM.CreateForwarder(faddr, chainID) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err = forwarderORM.CreateForwarder(ctx, faddr, chainID) require.NoError(t, err) chain, err := app.GetRelayers().LegacyEVMChains().Get((*big.Int)(&chainID).String()) diff --git a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go index 4a01ee7904f..62a3fd94c56 100644 --- a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go @@ -47,7 +47,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" - "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/dkgencryptkey" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/dkgsignkey" @@ -285,9 +284,9 @@ func setupNodeOCR2( b.Commit() // Add the forwarder to the node's forwarder manager. - forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + forwarderORM := forwarders.NewORM(app.GetSqlxDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) - _, err = forwarderORM.CreateForwarder(faddr, chainID) + _, err = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err) effectiveTransmitter = faddr } diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index 56d1285c88e..8bfa26d8470 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -1,9 +1,12 @@ package web import ( + "context" "math/big" "net/http" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/ethereum/go-ethereum/common" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" @@ -11,7 +14,6 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/logger/audit" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils/stringutils" "github.com/smartcontractkit/chainlink/v2/core/web/presenters" @@ -25,8 +27,10 @@ type EVMForwardersController struct { // Index lists EVM forwarders. func (cc *EVMForwardersController) Index(c *gin.Context, size, page, offset int) { - orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) - fwds, count, err := orm.FindForwarders(0, size) + orm := forwarders.NewORM(cc.App.GetSqlxDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fwds, count, err := orm.FindForwarders(ctx, 0, size) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) @@ -55,8 +59,10 @@ func (cc *EVMForwardersController) Track(c *gin.Context) { jsonAPIError(c, http.StatusUnprocessableEntity, err) return } - orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) - fwd, err := orm.CreateForwarder(request.Address, *request.EVMChainID) + orm := forwarders.NewORM(cc.App.GetSqlxDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fwd, err := orm.CreateForwarder(ctx, request.Address, *request.EVMChainID) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) @@ -79,7 +85,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { return } - filterCleanup := func(tx pg.Queryer, evmChainID int64, addr common.Address) error { + filterCleanup := func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error { chain, err2 := cc.App.GetRelayers().LegacyEVMChains().Get(big.NewInt(evmChainID).String()) if err2 != nil { // If the chain id doesn't even exist, or logpoller is disabled, then there isn't any filter to clean up. Returning an error @@ -91,11 +97,13 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { // handle same as non-existent chain id return nil } - return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr), pg.WithQueryer(tx)) + return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr)) } - orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) - err = orm.DeleteForwarder(id, filterCleanup) + orm := forwarders.NewORM(cc.App.GetSqlxDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = orm.DeleteForwarder(ctx, id, filterCleanup) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) From 21cb891e7b4c523a97cf28684497e6718520b222 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 15:48:15 -0500 Subject: [PATCH 23/65] Generate tidy --- core/chains/evm/forwarders/mocks/orm.go | 90 +++++++++++++------------ go.mod | 2 +- 2 files changed, 47 insertions(+), 45 deletions(-) diff --git a/core/chains/evm/forwarders/mocks/orm.go b/core/chains/evm/forwarders/mocks/orm.go index 691fbce8e9c..5786a1cd277 100644 --- a/core/chains/evm/forwarders/mocks/orm.go +++ b/core/chains/evm/forwarders/mocks/orm.go @@ -6,11 +6,13 @@ import ( common "github.com/ethereum/go-ethereum/common" big "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + context "context" + forwarders "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" mock "github.com/stretchr/testify/mock" - pg "github.com/smartcontractkit/chainlink/v2/core/services/pg" + sqlutil "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" ) // ORM is an autogenerated mock type for the ORM type @@ -18,9 +20,9 @@ type ORM struct { mock.Mock } -// CreateForwarder provides a mock function with given fields: addr, evmChainId -func (_m *ORM) CreateForwarder(addr common.Address, evmChainId big.Big) (forwarders.Forwarder, error) { - ret := _m.Called(addr, evmChainId) +// CreateForwarder provides a mock function with given fields: ctx, addr, evmChainId +func (_m *ORM) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (forwarders.Forwarder, error) { + ret := _m.Called(ctx, addr, evmChainId) if len(ret) == 0 { panic("no return value specified for CreateForwarder") @@ -28,17 +30,17 @@ func (_m *ORM) CreateForwarder(addr common.Address, evmChainId big.Big) (forward var r0 forwarders.Forwarder var r1 error - if rf, ok := ret.Get(0).(func(common.Address, big.Big) (forwarders.Forwarder, error)); ok { - return rf(addr, evmChainId) + if rf, ok := ret.Get(0).(func(context.Context, common.Address, big.Big) (forwarders.Forwarder, error)); ok { + return rf(ctx, addr, evmChainId) } - if rf, ok := ret.Get(0).(func(common.Address, big.Big) forwarders.Forwarder); ok { - r0 = rf(addr, evmChainId) + if rf, ok := ret.Get(0).(func(context.Context, common.Address, big.Big) forwarders.Forwarder); ok { + r0 = rf(ctx, addr, evmChainId) } else { r0 = ret.Get(0).(forwarders.Forwarder) } - if rf, ok := ret.Get(1).(func(common.Address, big.Big) error); ok { - r1 = rf(addr, evmChainId) + if rf, ok := ret.Get(1).(func(context.Context, common.Address, big.Big) error); ok { + r1 = rf(ctx, addr, evmChainId) } else { r1 = ret.Error(1) } @@ -46,17 +48,17 @@ func (_m *ORM) CreateForwarder(addr common.Address, evmChainId big.Big) (forward return r0, r1 } -// DeleteForwarder provides a mock function with given fields: id, cleanup -func (_m *ORM) DeleteForwarder(id int64, cleanup func(pg.Queryer, int64, common.Address) error) error { - ret := _m.Called(id, cleanup) +// DeleteForwarder provides a mock function with given fields: ctx, id, cleanup +func (_m *ORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(sqlutil.Queryer, int64, common.Address) error) error { + ret := _m.Called(ctx, id, cleanup) if len(ret) == 0 { panic("no return value specified for DeleteForwarder") } var r0 error - if rf, ok := ret.Get(0).(func(int64, func(pg.Queryer, int64, common.Address) error) error); ok { - r0 = rf(id, cleanup) + if rf, ok := ret.Get(0).(func(context.Context, int64, func(sqlutil.Queryer, int64, common.Address) error) error); ok { + r0 = rf(ctx, id, cleanup) } else { r0 = ret.Error(0) } @@ -64,9 +66,9 @@ func (_m *ORM) DeleteForwarder(id int64, cleanup func(pg.Queryer, int64, common. return r0 } -// FindForwarders provides a mock function with given fields: offset, limit -func (_m *ORM) FindForwarders(offset int, limit int) ([]forwarders.Forwarder, int, error) { - ret := _m.Called(offset, limit) +// FindForwarders provides a mock function with given fields: ctx, offset, limit +func (_m *ORM) FindForwarders(ctx context.Context, offset int, limit int) ([]forwarders.Forwarder, int, error) { + ret := _m.Called(ctx, offset, limit) if len(ret) == 0 { panic("no return value specified for FindForwarders") @@ -75,25 +77,25 @@ func (_m *ORM) FindForwarders(offset int, limit int) ([]forwarders.Forwarder, in var r0 []forwarders.Forwarder var r1 int var r2 error - if rf, ok := ret.Get(0).(func(int, int) ([]forwarders.Forwarder, int, error)); ok { - return rf(offset, limit) + if rf, ok := ret.Get(0).(func(context.Context, int, int) ([]forwarders.Forwarder, int, error)); ok { + return rf(ctx, offset, limit) } - if rf, ok := ret.Get(0).(func(int, int) []forwarders.Forwarder); ok { - r0 = rf(offset, limit) + if rf, ok := ret.Get(0).(func(context.Context, int, int) []forwarders.Forwarder); ok { + r0 = rf(ctx, offset, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]forwarders.Forwarder) } } - if rf, ok := ret.Get(1).(func(int, int) int); ok { - r1 = rf(offset, limit) + if rf, ok := ret.Get(1).(func(context.Context, int, int) int); ok { + r1 = rf(ctx, offset, limit) } else { r1 = ret.Get(1).(int) } - if rf, ok := ret.Get(2).(func(int, int) error); ok { - r2 = rf(offset, limit) + if rf, ok := ret.Get(2).(func(context.Context, int, int) error); ok { + r2 = rf(ctx, offset, limit) } else { r2 = ret.Error(2) } @@ -101,9 +103,9 @@ func (_m *ORM) FindForwarders(offset int, limit int) ([]forwarders.Forwarder, in return r0, r1, r2 } -// FindForwardersByChain provides a mock function with given fields: evmChainId -func (_m *ORM) FindForwardersByChain(evmChainId big.Big) ([]forwarders.Forwarder, error) { - ret := _m.Called(evmChainId) +// FindForwardersByChain provides a mock function with given fields: ctx, evmChainId +func (_m *ORM) FindForwardersByChain(ctx context.Context, evmChainId big.Big) ([]forwarders.Forwarder, error) { + ret := _m.Called(ctx, evmChainId) if len(ret) == 0 { panic("no return value specified for FindForwardersByChain") @@ -111,19 +113,19 @@ func (_m *ORM) FindForwardersByChain(evmChainId big.Big) ([]forwarders.Forwarder var r0 []forwarders.Forwarder var r1 error - if rf, ok := ret.Get(0).(func(big.Big) ([]forwarders.Forwarder, error)); ok { - return rf(evmChainId) + if rf, ok := ret.Get(0).(func(context.Context, big.Big) ([]forwarders.Forwarder, error)); ok { + return rf(ctx, evmChainId) } - if rf, ok := ret.Get(0).(func(big.Big) []forwarders.Forwarder); ok { - r0 = rf(evmChainId) + if rf, ok := ret.Get(0).(func(context.Context, big.Big) []forwarders.Forwarder); ok { + r0 = rf(ctx, evmChainId) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]forwarders.Forwarder) } } - if rf, ok := ret.Get(1).(func(big.Big) error); ok { - r1 = rf(evmChainId) + if rf, ok := ret.Get(1).(func(context.Context, big.Big) error); ok { + r1 = rf(ctx, evmChainId) } else { r1 = ret.Error(1) } @@ -131,9 +133,9 @@ func (_m *ORM) FindForwardersByChain(evmChainId big.Big) ([]forwarders.Forwarder return r0, r1 } -// FindForwardersInListByChain provides a mock function with given fields: evmChainId, addrs -func (_m *ORM) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]forwarders.Forwarder, error) { - ret := _m.Called(evmChainId, addrs) +// FindForwardersInListByChain provides a mock function with given fields: ctx, evmChainId, addrs +func (_m *ORM) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]forwarders.Forwarder, error) { + ret := _m.Called(ctx, evmChainId, addrs) if len(ret) == 0 { panic("no return value specified for FindForwardersInListByChain") @@ -141,19 +143,19 @@ func (_m *ORM) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Ad var r0 []forwarders.Forwarder var r1 error - if rf, ok := ret.Get(0).(func(big.Big, []common.Address) ([]forwarders.Forwarder, error)); ok { - return rf(evmChainId, addrs) + if rf, ok := ret.Get(0).(func(context.Context, big.Big, []common.Address) ([]forwarders.Forwarder, error)); ok { + return rf(ctx, evmChainId, addrs) } - if rf, ok := ret.Get(0).(func(big.Big, []common.Address) []forwarders.Forwarder); ok { - r0 = rf(evmChainId, addrs) + if rf, ok := ret.Get(0).(func(context.Context, big.Big, []common.Address) []forwarders.Forwarder); ok { + r0 = rf(ctx, evmChainId, addrs) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]forwarders.Forwarder) } } - if rf, ok := ret.Get(1).(func(big.Big, []common.Address) error); ok { - r1 = rf(evmChainId, addrs) + if rf, ok := ret.Get(1).(func(context.Context, big.Big, []common.Address) error); ok { + r1 = rf(ctx, evmChainId, addrs) } else { r1 = ret.Error(1) } diff --git a/go.mod b/go.mod index c0ef2b819de..621091f56f7 100644 --- a/go.mod +++ b/go.mod @@ -97,6 +97,7 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/net v0.21.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 @@ -316,7 +317,6 @@ require ( go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect From f9b166a88c6183bc688eb3a19a77b2251e5c9917 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 16:00:13 -0500 Subject: [PATCH 24/65] Fix logpoller mocks --- core/services/blockhashstore/delegate_test.go | 4 ++-- .../v21/logprovider/provider_life_cycle_test.go | 8 ++++---- .../evmregistry/v21/logprovider/provider_test.go | 2 +- .../evmregistry/v21/transmit/event_provider_test.go | 4 ++-- .../evmregistry/v21/upkeepstate/scanner_test.go | 2 +- core/services/relay/evm/config_poller_test.go | 4 ++-- core/services/relay/evm/contract_transmitter_test.go | 2 +- .../relay/evm/functions/contract_transmitter_test.go | 6 +++--- .../relay/evm/functions/logpoller_wrapper_test.go | 4 ++-- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/core/services/blockhashstore/delegate_test.go b/core/services/blockhashstore/delegate_test.go index 6fffcfdd493..da857b5268e 100644 --- a/core/services/blockhashstore/delegate_test.go +++ b/core/services/blockhashstore/delegate_test.go @@ -58,8 +58,8 @@ func createTestDelegate(t *testing.T) (*blockhashstore.Delegate, *testData) { kst := cltest.NewKeyStore(t, db, cfg.Database()).Eth() sendingKey, _ := cltest.MustInsertRandomKey(t, kst) lp := &mocklp.LogPoller{} - lp.On("RegisterFilter", mock.Anything).Return(nil) - lp.On("LatestBlock", mock.Anything, mock.Anything).Return(logpoller.LogPollerBlock{}, nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) relayExtenders := evmtest.NewChainRelayExtenders( t, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go index d978940d297..80db1241a1f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go @@ -108,8 +108,8 @@ func TestLogEventProvider_LifeCycle(t *testing.T) { if tc.mockPoller { lp := new(mocks.LogPoller) - lp.On("RegisterFilter", mock.Anything).Return(nil) - lp.On("UnregisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + lp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) hasFitlerTimes := 1 if tc.unregister { @@ -146,8 +146,8 @@ func TestLogEventProvider_LifeCycle(t *testing.T) { func TestEventLogProvider_RefreshActiveUpkeeps(t *testing.T) { ctx := testutils.Context(t) mp := new(mocks.LogPoller) - mp.On("RegisterFilter", mock.Anything).Return(nil) - mp.On("UnregisterFilter", mock.Anything).Return(nil) + mp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + mp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) mp.On("HasFilter", mock.Anything).Return(false) mp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) mp.On("ReplayAsync", mock.Anything).Return(nil) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 464b9aa3ba6..6ed68d4028a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -242,7 +242,7 @@ func TestLogEventProvider_ReadLogs(t *testing.T) { mp := new(mocks.LogPoller) - mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) mp.On("ReplayAsync", mock.Anything).Return() mp.On("HasFilter", mock.Anything).Return(false) mp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go index 89a49f07807..ac2eb82d49d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go @@ -29,7 +29,7 @@ func TestTransmitEventProvider_Sanity(t *testing.T) { lp := new(mocks.LogPoller) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client.NewNullClient(big.NewInt(1), logger.TestLogger(t)), 32) require.NoError(t, err) @@ -103,7 +103,7 @@ func TestTransmitEventProvider_Sanity(t *testing.T) { func TestTransmitEventProvider_ProcessLogs(t *testing.T) { lp := new(mocks.LogPoller) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) client := evmClientMocks.NewClient(t) provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client, 250) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go index 9442a5f5d7a..4e710c2fadd 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go @@ -83,7 +83,7 @@ func TestPerformedEventsScanner(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mp := new(mocks.LogPoller) - mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) mp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) scanner := NewPerformedEventsScanner(lggr, mp, registryAddr, 100) diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 9caf09612e6..aed6f8ef4e8 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -169,7 +169,7 @@ func TestConfigPoller(t *testing.T) { t.Run("LatestConfigDetails, when logs have been pruned and config store contract is configured", func(t *testing.T) { // Give it a log poller that will never return logs mp := new(mocks.LogPoller) - mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) t.Run("if callLatestConfigDetails succeeds", func(t *testing.T) { @@ -244,7 +244,7 @@ func TestConfigPoller(t *testing.T) { t.Run("LatestConfig, when logs have been pruned and config store contract is configured", func(t *testing.T) { // Give it a log poller that will never return logs mp := mocks.NewLogPoller(t) - mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) mp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) diff --git a/core/services/relay/evm/contract_transmitter_test.go b/core/services/relay/evm/contract_transmitter_test.go index e03c5508247..a51c2fde0bd 100644 --- a/core/services/relay/evm/contract_transmitter_test.go +++ b/core/services/relay/evm/contract_transmitter_test.go @@ -43,7 +43,7 @@ func TestContractTransmitter(t *testing.T) { "0000000000000000000000000000000000000000000000000000000000000002") // epoch c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochDontScanLogs, nil).Once() contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) ot, err := NewOCRContractTransmitter(gethcommon.Address{}, c, contractABI, mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { return &txmgr.TxMeta{}, nil }) diff --git a/core/services/relay/evm/functions/contract_transmitter_test.go b/core/services/relay/evm/functions/contract_transmitter_test.go index c9dc942c5df..aaf4a5715d2 100644 --- a/core/services/relay/evm/functions/contract_transmitter_test.go +++ b/core/services/relay/evm/functions/contract_transmitter_test.go @@ -48,7 +48,7 @@ func TestContractTransmitter_LatestConfigDigestAndEpoch(t *testing.T) { c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochDontScanLogs, nil).Once() contractABI, err := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) require.NoError(t, err) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) functionsTransmitter, err := functions.NewFunctionsContractTransmitter(c, contractABI, &mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { return &txmgr.TxMeta{}, nil @@ -71,7 +71,7 @@ func TestContractTransmitter_Transmit_V1(t *testing.T) { c := evmclimocks.NewClient(t) lp := lpmocks.NewLogPoller(t) contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) ocrTransmitter := mockTransmitter{} ot, err := functions.NewFunctionsContractTransmitter(c, contractABI, &ocrTransmitter, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { @@ -114,7 +114,7 @@ func TestContractTransmitter_Transmit_V1_CoordinatorMismatch(t *testing.T) { c := evmclimocks.NewClient(t) lp := lpmocks.NewLogPoller(t) contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) ocrTransmitter := mockTransmitter{} ot, err := functions.NewFunctionsContractTransmitter(c, contractABI, &ocrTransmitter, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go index c8c41bf4d4b..b103d7bd5a8 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper_test.go +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -90,7 +90,7 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) subscriber := newSubscriber(1) lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) @@ -120,7 +120,7 @@ func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { lp, lpWrapper, client := setUp(t, 100_000) lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) - lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) subscriber := newSubscriber(1) lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) mockedLog := getMockedRequestLog(t) From 6085cb080e9aacec9d0d5db9cd8b2012ada447e7 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 16:18:33 -0500 Subject: [PATCH 25/65] Remove pg dependency --- core/chains/evm/forwarders/forwarder_manager.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index cc52e02e31f..3058f716358 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -23,7 +23,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/authorized_forwarder" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/authorized_receiver" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/offchain_aggregator_wrapper" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) var forwardABI = evmtypes.MustGetABI(authorized_forwarder.AuthorizedForwarderABI).Methods["forward"] From 007a348bf8c5551c27ea93f1a00ec4b922259649 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 21 Feb 2024 16:34:40 -0500 Subject: [PATCH 26/65] Fix mock calls --- .../evmregistry/v21/block_subscriber_test.go | 4 +-- .../ocr2vrf/coordinator/coordinator_test.go | 26 +++++++++---------- .../evm/functions/logpoller_wrapper_test.go | 10 +++---- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go index 2be6a6a874c..b984101bc16 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go @@ -155,7 +155,7 @@ func TestBlockSubscriber_InitializeBlocks(t *testing.T) { for _, tc := range tests { t.Run(tc.Name, func(t *testing.T) { lp := new(mocks.LogPoller) - lp.On("GetBlocksRange", mock.Anything, tc.Blocks, mock.Anything).Return(tc.PollerBlocks, tc.Error) + lp.On("GetBlocksRange", mock.Anything, tc.Blocks).Return(tc.PollerBlocks, tc.Error) bs := NewBlockSubscriber(hb, lp, finality, lggr) bs.blockHistorySize = historySize bs.blockSize = blockSize @@ -299,7 +299,7 @@ func TestBlockSubscriber_Start(t *testing.T) { }, } - lp.On("GetBlocksRange", mock.Anything, blocks, mock.Anything).Return(pollerBlocks, nil) + lp.On("GetBlocksRange", mock.Anything, blocks).Return(pollerBlocks, nil) bs := NewBlockSubscriber(hb, lp, finality, lggr) bs.blockHistorySize = historySize diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go index 096589b2053..f9bd32a7ead 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go @@ -85,11 +85,11 @@ func TestCoordinator_DKGVRFCommittees(t *testing.T) { coordinatorAddress := newAddress(t) beaconAddress := newAddress(t) dkgAddress := newAddress(t) - lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + lp.On("LatestLogByEventSigWithConfs", mock.Anything, tp.configSetTopic, beaconAddress, logpoller.Confirmations(10)). Return(&logpoller.Log{ Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), }, nil) - lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, dkgAddress, logpoller.Confirmations(10), mock.Anything). + lp.On("LatestLogByEventSigWithConfs", mock.Anything, tp.configSetTopic, dkgAddress, logpoller.Confirmations(10)). Return(&logpoller.Log{ Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), }, nil) @@ -134,7 +134,7 @@ func TestCoordinator_DKGVRFCommittees(t *testing.T) { tp := newTopics() beaconAddress := newAddress(t) - lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + lp.On("LatestLogByEventSigWithConfs", mock.Anything, tp.configSetTopic, beaconAddress, logpoller.Confirmations(10)). Return(nil, errors.New("rpc error")) c := &coordinator{ @@ -156,7 +156,7 @@ func TestCoordinator_DKGVRFCommittees(t *testing.T) { beaconAddress := newAddress(t) coordinatorAddress := newAddress(t) dkgAddress := newAddress(t) - lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + lp.On("LatestLogByEventSigWithConfs", mock.Anything, tp.configSetTopic, beaconAddress, logpoller.Confirmations(10)). Return(&logpoller.Log{ Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), }, nil) @@ -1035,10 +1035,11 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp.On("LatestBlock", mock.Anything). Return(logpoller.LogPollerBlock{BlockNumber: int64(latestHeadNumber)}, nil) - lp.On("GetBlocksRange", mock.Anything, append(requestedBlocks, latestHeadNumber-lookbackBlocks+1, latestHeadNumber), mock.Anything). + lp.On("GetBlocksRange", mock.Anything, append(requestedBlocks, latestHeadNumber-lookbackBlocks+1, latestHeadNumber)). Return(nil, errors.New("GetBlocks error")) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -1048,7 +1049,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), @@ -1218,9 +1218,9 @@ func TestCoordinator_ReportIsOnchain(t *testing.T) { configDigest := common.BigToHash(big.NewInt(1337)) log := newNewTransmissionLog(t, beaconAddress, configDigest) log.BlockNumber = 195 - lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + lp.On("IndexedLogs", mock.Anything, tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ enrTopic, - }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{log}, nil) + }, logpoller.Confirmations(1)).Return([]logpoller.Log{log}, nil) c := &coordinator{ lp: lp, @@ -1254,9 +1254,9 @@ func TestCoordinator_ReportIsOnchain(t *testing.T) { newConfigDigest := common.BigToHash(big.NewInt(8888)) log := newNewTransmissionLog(t, beaconAddress, oldConfigDigest) log.BlockNumber = 195 - lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + lp.On("IndexedLogs", mock.Anything, tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ enrTopic, - }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{log}, nil) + }, logpoller.Confirmations(1)).Return([]logpoller.Log{log}, nil) c := &coordinator{ lp: lp, @@ -1281,9 +1281,9 @@ func TestCoordinator_ReportIsOnchain(t *testing.T) { epochAndRound := toEpochAndRoundUint40(epoch, round) enrTopic := common.BytesToHash(common.LeftPadBytes(epochAndRound.Bytes(), 32)) lp := lp_mocks.NewLogPoller(t) - lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + lp.On("IndexedLogs", mock.Anything, tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ enrTopic, - }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{}, nil) + }, logpoller.Confirmations(1)).Return([]logpoller.Log{}, nil) c := &coordinator{ lp: lp, @@ -1751,7 +1751,7 @@ func getLogPoller( }) } - lp.On("GetBlocksRange", mock.Anything, requestedBlocks, mock.Anything). + lp.On("GetBlocksRange", mock.Anything, requestedBlocks). Return(logPollerBlocks, nil) return lp diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go index b103d7bd5a8..8e7d08410e4 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper_test.go +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -88,7 +88,7 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { lp, lpWrapper, client := setUp(t, 100_000) // check only once lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) - lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) @@ -125,13 +125,13 @@ func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) mockedLog := getMockedRequestLog(t) // All logPoller queries for responses return none - lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil) + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil) // On the first logPoller query for requests, the request log appears - lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() // On the 2nd query, the request log disappears - lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil).Once() + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil).Once() // On the 3rd query, the original request log appears again - lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() servicetest.Run(t, lpWrapper) subscriber.updates.Wait() From c8c0f3089fc59e62712c4d1db70ef5cc8963c168 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 10:56:14 -0500 Subject: [PATCH 27/65] Fix mock calls --- core/services/blockhashstore/feeder_test.go | 4 +--- .../ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go | 2 +- .../ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/core/services/blockhashstore/feeder_test.go b/core/services/blockhashstore/feeder_test.go index fabde77c786..d75ab13bdd8 100644 --- a/core/services/blockhashstore/feeder_test.go +++ b/core/services/blockhashstore/feeder_test.go @@ -456,18 +456,16 @@ func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(requestLogs, nil) lp.On( - mock.Anything, "LogsWithSigs", + mock.Anything, fromBlock, latest, []common.Hash{ solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(fulfillmentLogs, nil) // Instantiate feeder. diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go index 51448db35cf..3de22e507c7 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go @@ -195,7 +195,7 @@ func TestPollLogs(t *testing.T) { if test.LogsWithSigs != nil { fc := test.LogsWithSigs - mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + mp.On("LogsWithSigs", mock.Anything, fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address).Return(fc.OutputLogs, fc.OutputErr) } rg := &EvmRegistry{ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go index 2ffb14b61ca..2a147b4faa4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -150,7 +150,7 @@ func TestPollLogs(t *testing.T) { if test.LogsWithSigs != nil { fc := test.LogsWithSigs - mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + mp.On("LogsWithSigs", mock.Anything, fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address).Return(fc.OutputLogs, fc.OutputErr) } rg := &EvmRegistry{ From 66050eb80678d1882171bbedd622f33699671e80 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 11:02:38 -0500 Subject: [PATCH 28/65] Fix mock calls --- core/services/blockhashstore/feeder_test.go | 9 ++------- .../plugins/ocr2keeper/evmregistry/v20/registry_test.go | 2 +- .../plugins/ocr2keeper/evmregistry/v21/registry_test.go | 2 +- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/core/services/blockhashstore/feeder_test.go b/core/services/blockhashstore/feeder_test.go index fabde77c786..945359dd81f 100644 --- a/core/services/blockhashstore/feeder_test.go +++ b/core/services/blockhashstore/feeder_test.go @@ -456,18 +456,16 @@ func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(requestLogs, nil) lp.On( - mock.Anything, "LogsWithSigs", + mock.Anything, fromBlock, latest, []common.Hash{ solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(fulfillmentLogs, nil) // Instantiate feeder. @@ -556,7 +554,6 @@ func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(requestLogs, nil) lp.On( "LogsWithSigs", @@ -567,7 +564,6 @@ func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(fulfillmentLogs, nil) // Instantiate feeder. @@ -649,13 +645,13 @@ func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) lp.On( "LogsWithSigs", + mock.Anything, fromBlock, toBlock, []common.Hash{ vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(requestLogs, nil) lp.On( "LogsWithSigs", @@ -666,7 +662,6 @@ func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic(), }, coordinatorAddress, - mock.Anything, ).Return(fulfillmentLogs, nil) // Instantiate feeder. diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go index 51448db35cf..3de22e507c7 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go @@ -195,7 +195,7 @@ func TestPollLogs(t *testing.T) { if test.LogsWithSigs != nil { fc := test.LogsWithSigs - mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + mp.On("LogsWithSigs", mock.Anything, fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address).Return(fc.OutputLogs, fc.OutputErr) } rg := &EvmRegistry{ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go index 2ffb14b61ca..2a147b4faa4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -150,7 +150,7 @@ func TestPollLogs(t *testing.T) { if test.LogsWithSigs != nil { fc := test.LogsWithSigs - mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + mp.On("LogsWithSigs", mock.Anything, fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address).Return(fc.OutputLogs, fc.OutputErr) } rg := &EvmRegistry{ From b1a7c57a9567c82b7457091c676819dbbc16d36b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 11:10:26 -0500 Subject: [PATCH 29/65] Use request context --- core/web/evm_forwarders_controller.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index b94f99688f4..62f42838479 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -1,7 +1,6 @@ package web import ( - "context" "math/big" "net/http" @@ -92,9 +91,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { // handle same as non-existent chain id return nil } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - return chain.LogPoller().UnregisterFilter(ctx, forwarders.FilterName(addr)) + return chain.LogPoller().UnregisterFilter(c.Request.Context(), forwarders.FilterName(addr)) } orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) From bae605bebc1545263baa5ce240bfae2141d25370 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 11:17:15 -0500 Subject: [PATCH 30/65] Update context --- core/chains/evm/forwarders/orm.go | 12 +++++------- .../ocr2/plugins/ocr2keeper/integration_test.go | 3 +-- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index e6ce262ff8f..a1dc1f6d883 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -51,13 +51,11 @@ func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmCha // If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically // on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { - var dest struct { - EvmChainId int64 - Address common.Address - } - - var rowsAffected int64 return o.Transaction(ctx, func(orm *DbORM) error { + var dest struct { + EvmChainId int64 + Address common.Address + } err := orm.db.GetContext(ctx, &dest, `SELECT evm_chain_id, address FROM evm.forwarders WHERE id = $1`, id) if err != nil { return err @@ -75,7 +73,7 @@ func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx s if err != nil && !errors.Is(err, sql.ErrNoRows) { return err } - rowsAffected, err = result.RowsAffected() + rowsAffected, err := result.RowsAffected() if err == nil && rowsAffected == 0 { err = sql.ErrNoRows } diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go index b8a64507d43..c797f80d15d 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -1,6 +1,7 @@ package ocr2keeper_test import ( + "context" "crypto/rand" "encoding/hex" "encoding/json" @@ -13,8 +14,6 @@ import ( "testing" "time" - "golang.org/x/net/context" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" From b0c1e6f8be6b06caf2a587ddb0af8576ffb04e1c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 11:24:22 -0500 Subject: [PATCH 31/65] Update contexts --- core/chains/evm/forwarders/forwarder_manager.go | 4 +--- core/chains/evm/forwarders/orm_test.go | 4 +--- core/web/evm_forwarders_controller.go | 16 +++++----------- go.mod | 2 +- 4 files changed, 8 insertions(+), 18 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index 3058f716358..8b7690ebed8 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -112,9 +112,7 @@ func FilterName(addr common.Address) string { func (f *FwdMgr) ForwarderFor(addr common.Address) (forwarder common.Address, err error) { // Gets forwarders for current chain. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - fwdrs, err := f.ORM.FindForwardersByChain(ctx, big.Big(*f.evmClient.ConfiguredChainID())) + fwdrs, err := f.ORM.FindForwardersByChain(f.ctx, big.Big(*f.evmClient.ConfiguredChainID())) if err != nil { return common.Address{}, err } diff --git a/core/chains/evm/forwarders/orm_test.go b/core/chains/evm/forwarders/orm_test.go index 6293471184d..a662be80cf3 100644 --- a/core/chains/evm/forwarders/orm_test.go +++ b/core/chains/evm/forwarders/orm_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/jmoiron/sqlx" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" @@ -20,7 +18,7 @@ import ( type TestORM struct { ORM - db *sqlx.DB + db sqlutil.Queryer } func setupORM(t *testing.T) *TestORM { diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index ff6a4b87489..02eb6d7e566 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -4,10 +4,10 @@ import ( "math/big" "net/http" - "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" - "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -27,9 +27,7 @@ type EVMForwardersController struct { // Index lists EVM forwarders. func (cc *EVMForwardersController) Index(c *gin.Context, size, page, offset int) { orm := forwarders.NewORM(cc.App.GetSqlxDB()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - fwds, count, err := orm.FindForwarders(ctx, 0, size) + fwds, count, err := orm.FindForwarders(c.Request.Context(), 0, size) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) @@ -59,9 +57,7 @@ func (cc *EVMForwardersController) Track(c *gin.Context) { return } orm := forwarders.NewORM(cc.App.GetSqlxDB()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - fwd, err := orm.CreateForwarder(ctx, request.Address, *request.EVMChainID) + fwd, err := orm.CreateForwarder(c.Request.Context(), request.Address, *request.EVMChainID) if err != nil { jsonAPIError(c, http.StatusBadRequest, err) @@ -100,9 +96,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { } orm := forwarders.NewORM(cc.App.GetSqlxDB()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - err = orm.DeleteForwarder(ctx, id, filterCleanup) + err = orm.DeleteForwarder(c.Request.Context(), id, filterCleanup) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) diff --git a/go.mod b/go.mod index 621091f56f7..c0ef2b819de 100644 --- a/go.mod +++ b/go.mod @@ -97,7 +97,6 @@ require ( go.uber.org/zap v1.26.0 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a - golang.org/x/net v0.21.0 golang.org/x/sync v0.6.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 @@ -317,6 +316,7 @@ require ( go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/arch v0.7.0 // indirect golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect From edb8ac0667a00c5a6762d530fe6ce01cad1793fd Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 22 Feb 2024 11:32:44 -0500 Subject: [PATCH 32/65] Fix mock call args --- .../ocr2vrf/coordinator/coordinator_test.go | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go index f9bd32a7ead..beee01eaf7a 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go @@ -160,7 +160,7 @@ func TestCoordinator_DKGVRFCommittees(t *testing.T) { Return(&logpoller.Log{ Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), }, nil) - lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, dkgAddress, logpoller.Confirmations(10), mock.Anything). + lp.On("LatestLogByEventSigWithConfs", mock.Anything, tp.configSetTopic, dkgAddress, logpoller.Confirmations(10)). Return(nil, errors.New("rpc error")) c := &coordinator{ @@ -230,6 +230,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -239,7 +240,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessRequestedLog(t, 3, 195, 192, 1, coordinatorAddress), @@ -289,6 +289,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -298,7 +299,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), @@ -351,6 +351,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -360,7 +361,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessRequestedLog(t, 3, 195, 192, 1, coordinatorAddress), @@ -420,6 +420,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { // when a VRF fulfillment happens on chain. lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -429,7 +430,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), @@ -489,6 +489,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, []uint64{}, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -498,7 +499,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ { Height: 195, @@ -600,6 +600,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { c.lp = lp lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -609,7 +610,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), @@ -662,6 +662,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, blockhashLookback) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -671,7 +672,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return(logs, nil) c := &coordinator{ @@ -724,6 +724,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -733,7 +734,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 2_000_000, coordinatorAddress), @@ -791,6 +791,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -800,7 +801,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), @@ -854,6 +854,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -863,7 +864,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{ newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), @@ -920,6 +920,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -929,7 +930,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{}, nil) c := &coordinator{ @@ -977,6 +977,7 @@ func TestCoordinator_ReportBlocks(t *testing.T) { lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) lp.On( "LogsWithSigs", + mock.Anything, int64(latestHeadNumber-lookbackBlocks), int64(latestHeadNumber), []common.Hash{ @@ -986,7 +987,6 @@ func TestCoordinator_ReportBlocks(t *testing.T) { tp.outputsServedTopic, }, coordinatorAddress, - mock.Anything, ).Return([]logpoller.Log{}, nil) c := &coordinator{ From bcd151609bb3245858f0d6f6c7902df2dc61328c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 23 Feb 2024 09:54:48 -0500 Subject: [PATCH 33/65] Unexport orm --- core/chains/evm/logpoller/helper_test.go | 2 +- .../evm/logpoller/log_poller_internal_test.go | 2 +- core/chains/evm/logpoller/log_poller_test.go | 2 +- core/chains/evm/logpoller/orm.go | 89 ++++++++++--------- core/chains/evm/logpoller/orm_test.go | 2 +- .../plugins/ocr2keeper/integration_test.go | 5 +- .../vrf/v2/listener_v2_log_listener_test.go | 2 +- core/store/migrate/migrate_test.go | 2 +- .../universal/log_poller/helpers.go | 4 +- 9 files changed, 55 insertions(+), 55 deletions(-) diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index cd248cf5e70..a76b96265a9 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -35,7 +35,7 @@ type TestHarness struct { Lggr logger.Logger // Chain2/ORM2 is just a dummy second chain, doesn't have a client. ChainID, ChainID2 *big.Int - ORM, ORM2 *logpoller.DbORM + ORM, ORM2 logpoller.ORM LogPoller logpoller.LogPollerTest Client *backends.SimulatedBackend Owner *bind.TransactOpts diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index a35e01d7af6..de6b66acfe3 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -37,7 +37,7 @@ var ( ) // Validate that filters stored in log_filters_table match the filters stored in memory -func validateFiltersTable(t *testing.T, lp *logPoller, orm *DbORM) { +func validateFiltersTable(t *testing.T, lp *logPoller, orm ORM) { ctx := testutils.Context(t) filters, err := orm.LoadFilters(ctx) require.NoError(t, err) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 3f86ae846d4..2b0799ee184 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -44,7 +44,7 @@ func logRuntime(t testing.TB, start time.Time) { t.Log("runtime", time.Since(start)) } -func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (common.Hash, common.Address, common.Address) { +func populateDatabase(t testing.TB, o logpoller.ORM, chainID *big.Int) (common.Hash, common.Address, common.Address) { event1 := EmitterABI.Events["Log1"].ID address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 296abfcdfc6..45b02eabbc8 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -28,12 +28,14 @@ type ORM interface { LoadFilters(ctx context.Context) (map[string]Filter, error) DeleteFilter(ctx context.Context, name string) error + InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error DeleteBlocksBefore(ctx context.Context, end int64) error DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error DeleteExpiredLogs(ctx context.Context) error GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) SelectBlockByNumber(ctx context.Context, blockNumber int64) (*LogPollerBlock, error) + SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) @@ -42,6 +44,7 @@ type ORM interface { SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) + SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) @@ -55,32 +58,32 @@ type ORM interface { SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) } -type DbORM struct { +type orm struct { chainID *big.Int db sqlutil.Queryer lggr logger.Logger } -var _ ORM = &DbORM{} +var _ ORM = &orm{} -// NewORM creates a DbORM scoped to chainID. -func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) *DbORM { - return &DbORM{ +// NewORM creates an orm scoped to chainID. +func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) ORM { + return &orm{ chainID: chainID, db: db, lggr: lggr, } } -func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { +func (o *orm) Transaction(ctx context.Context, fn func(*orm) error) (err error) { return sqlutil.Transact(ctx, o.new, o.db, nil, fn) } // new returns a NewORM like o, but backed by q. -func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(o.chainID, q, o.lggr) } +func (o *orm) new(q sqlutil.Queryer) *orm { return NewORM(o.chainID, q, o.lggr).(*orm) } // InsertBlock is idempotent to support replays. -func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { +func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` @@ -92,7 +95,7 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum // // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. -func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { +func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 query := ` @@ -110,7 +113,7 @@ func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { } // DeleteFilter removes all events,address pairs associated with the Filter -func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { +func (o *orm) DeleteFilter(ctx context.Context, name string) error { _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) @@ -119,7 +122,7 @@ func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { } // LoadFilters returns all filters for this chain -func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { +func (o *orm) LoadFilters(ctx context.Context) (map[string]Filter, error) { query := `SELECT name, ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, @@ -135,7 +138,7 @@ func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { return filters, err } -func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { +func (o *orm) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash.Bytes(), ubig.New(o.chainID)); err != nil { return nil, err @@ -143,7 +146,7 @@ func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPo return &b, nil } -func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { +func (o *orm) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { return nil, err @@ -151,7 +154,7 @@ func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlo return &b, nil } -func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { +func (o *orm) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { return nil, err @@ -159,7 +162,7 @@ func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) return &b, nil } -func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { +func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -177,15 +180,15 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig } // DeleteBlocksBefore delete all blocks before and including end. -func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64) error { +func (o *orm) DeleteBlocksBefore(ctx context.Context, end int64) error { _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) return err } -func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { +func (o *orm) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { // These deletes are bounded by reorg depth, so they are // fast and should not slow down the log readers. - return o.Transaction(ctx, func(orm *DbORM) error { + return o.Transaction(ctx, func(orm *orm) error { // Applying upper bound filter is critical for Postgres performance (especially for evm.logs table) // because it allows the planner to properly estimate the number of rows to be scanned. // If not applied, these queries can become very slow. After some critical number @@ -224,7 +227,7 @@ type Exp struct { ShouldDelete bool } -func (o *DbORM) DeleteExpiredLogs(ctx context.Context) error { +func (o *orm) DeleteExpiredLogs(ctx context.Context) error { _, err := o.db.ExecContext(ctx, `WITH r AS ( SELECT address, event, MAX(retention) AS retention FROM evm.log_poller_filters WHERE evm_chain_id=$1 @@ -237,16 +240,16 @@ func (o *DbORM) DeleteExpiredLogs(ctx context.Context) error { } // InsertLogs is idempotent to support replays. -func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { +func (o *orm) InsertLogs(ctx context.Context, logs []Log) error { if err := o.validateLogs(logs); err != nil { return err } - return o.Transaction(ctx, func(orm *DbORM) error { + return o.Transaction(ctx, func(orm *orm) error { return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) }) } -func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { +func (o *orm) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { // Optimization, don't open TX when there is only a block to be persisted if len(logs) == 0 { return o.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber) @@ -257,7 +260,7 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo } // Block and logs goes with the same TX to ensure atomicity - return o.Transaction(ctx, func(orm *DbORM) error { + return o.Transaction(ctx, func(orm *orm) error { if err := o.insertBlockWithinTx(ctx, orm.db.(*sqlx.Tx), block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { return err } @@ -265,7 +268,7 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo }) } -func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { +func (o *orm) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` @@ -273,7 +276,7 @@ func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash return err } -func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) error { +func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { start, end := i, i+batchInsertSize @@ -303,7 +306,7 @@ func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) return nil } -func (o *DbORM) validateLogs(logs []Log) error { +func (o *orm) validateLogs(logs []Log) error { for _, log := range logs { if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 { return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) @@ -312,7 +315,7 @@ func (o *DbORM) validateLogs(logs []Log) error { return nil } -func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { +func (o *orm) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { var logs []Log err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs @@ -327,7 +330,7 @@ func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([ } // SelectLogs finds the logs in a given block range. -func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { +func (o *orm) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { var logs []Log err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs @@ -344,7 +347,7 @@ func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common } // SelectLogsCreatedAfter finds logs created after some timestamp. -func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { +func (o *orm) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -364,7 +367,7 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre // SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. -func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { +func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { err = o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -378,7 +381,7 @@ func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, addres return logs, err } -func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { +func (o *orm) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { var blocks []LogPollerBlock err := o.db.SelectContext(ctx, &blocks, ` SELECT * FROM evm.log_poller_blocks @@ -393,7 +396,7 @@ func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]L } // SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events -func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { // TODO: cant convert byteArray!? query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE (block_number, address, event_sig) IN ( @@ -415,7 +418,7 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, from } // SelectLatestBlockByEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block -func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { +func (o *orm) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { query := fmt.Sprintf(` SELECT COALESCE(MAX(block_number), 0) FROM evm.logs WHERE evm_chain_id = $1 @@ -430,7 +433,7 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, return blockNumber, nil } -func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { query := fmt.Sprintf(`SELECT * FROM evm.logs WHERE evm_chain_id = $1 AND address = $2 @@ -446,7 +449,7 @@ func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Addr return logs, nil } -func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -462,7 +465,7 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address commo return logs, nil } -func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = $1 @@ -479,7 +482,7 @@ func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Ad return logs, nil } -func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err @@ -500,7 +503,7 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address c return logs, nil } -func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err @@ -523,7 +526,7 @@ func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common. return logs, nil } -func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { +func (o *orm) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err @@ -546,7 +549,7 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e } // SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. -func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { +func (o *orm) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err @@ -569,7 +572,7 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in return logs, nil } -func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { +func (o *orm) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err @@ -592,7 +595,7 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address commo return logs, nil } -func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { +func (o *orm) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { var logs []Log err := o.db.SelectContext(ctx, &logs, ` SELECT * FROM evm.logs @@ -608,7 +611,7 @@ func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Ad } // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations -func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { +func (o *orm) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { topicIndex, err := UseTopicIndex(topicIndex) if err != nil { return nil, err diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index dc187e59de0..34d27d76321 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -450,7 +450,7 @@ func TestORM(t *testing.T) { require.Zero(t, len(logs)) } -func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbORM, addr common.Address, blockNumber int, eventSig common.Hash, start, stop int) { +func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o logpoller.ORM, addr common.Address, blockNumber int, eventSig common.Hash, start, stop int) { var lgs []logpoller.Log for i := start; i <= stop; i++ { lgs = append(lgs, logpoller.Log{ diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go index c797f80d15d..6df5bb1c00a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -1,7 +1,6 @@ package ocr2keeper_test import ( - "context" "crypto/rand" "encoding/hex" "encoding/json" @@ -441,9 +440,7 @@ func setupForwarderForNode( // add forwarder address to be tracked in db forwarderORM := forwarders.NewORM(app.GetSqlxDB()) chainID := ubig.Big(*backend.Blockchain().Config().ChainID) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - _, err = forwarderORM.CreateForwarder(ctx, faddr, chainID) + _, err = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err) chain, err := app.GetRelayers().LegacyEVMChains().Get((*big.Int)(&chainID).String()) diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go index 9aeaa5a7ed6..1a02ea08c65 100644 --- a/core/services/vrf/v2/listener_v2_log_listener_test.go +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -43,7 +43,7 @@ var ( type vrfLogPollerListenerTH struct { Lggr logger.Logger ChainID *big.Int - ORM *logpoller.DbORM + ORM logpoller.ORM LogPoller logpoller.LogPollerTest Client *backends.SimulatedBackend Emitter *log_emitter.LogEmitter diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index 70bc651fa0a..286e1b3a295 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -469,7 +469,7 @@ func TestDatabaseBackFillWithMigration202(t *testing.T) { name string blockNumber int64 expectedFinalizedBlock int64 - orm *logpoller.DbORM + orm logpoller.ORM }{ { name: "last finalized block not changed if finality is too deep", diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index 3a95aeafd19..c7d7adb61a6 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -137,8 +137,8 @@ var registerSingleTopicFilter = func(registry contracts.KeeperRegistry, upkeepID // return nil // } -// NewOrm returns a new logpoller.DbORM instance -func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (*logpoller.DbORM, *sqlx.DB, error) { +// NewOrm returns a new logpoller.orm instance +func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (logpoller.ORM, *sqlx.DB, error) { dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", "127.0.0.1", postgresDb.ExternalPort, postgresDb.User, postgresDb.Password, postgresDb.DbName) db, err := sqlx.Open("postgres", dsn) if err != nil { From 9a5acacb0b586a56ae2aee2552098bb4fcf010aa Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 23 Feb 2024 14:03:18 -0500 Subject: [PATCH 34/65] Fix arg name --- core/chains/evm/logpoller/disabled.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index b05c456f565..b946d85f6dc 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -65,7 +65,7 @@ func (disabled) IndexedLogs(ctx context.Context, eventSig common.Hash, address c return nil, ErrDisabled } -func (disabled) IndexedLogsByBlockRange(ctx context.Context, tart, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { +func (disabled) IndexedLogsByBlockRange(ctx context.Context, start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash) ([]Log, error) { return nil, ErrDisabled } From f61e89a071df56e02f09ceb502c05a7eec78a50a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 29 Feb 2024 12:51:48 -0500 Subject: [PATCH 35/65] update logpoller --- core/chains/evm/logpoller/helper_test.go | 3 ++- core/chains/evm/logpoller/orm_test.go | 3 ++- .../llo/onchain_channel_definition_cache.go | 11 ++++----- ...annel_definition_cache_integration_test.go | 24 +++++++++---------- 4 files changed, 21 insertions(+), 20 deletions(-) diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index d21f95cca0d..68a646d644a 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -3,12 +3,13 @@ package logpoller_test import ( "context" "database/sql" - "github.com/pkg/errors" "math/big" "strings" "testing" "time" + "github.com/pkg/errors" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 1b1c64df8d4..d474cf9d53d 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -5,12 +5,13 @@ import ( "context" "database/sql" "fmt" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "math" "math/big" "testing" "time" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/ethereum/go-ethereum/common" "github.com/jackc/pgx/v4" "github.com/pkg/errors" diff --git a/core/services/llo/onchain_channel_definition_cache.go b/core/services/llo/onchain_channel_definition_cache.go index af35d237b98..d72079d0b1e 100644 --- a/core/services/llo/onchain_channel_definition_cache.go +++ b/core/services/llo/onchain_channel_definition_cache.go @@ -19,7 +19,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store" "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/utils" ) @@ -89,7 +88,7 @@ func NewChannelDefinitionCache(lggr logger.Logger, orm ChannelDefinitionCacheORM func (c *channelDefinitionCache) Start(ctx context.Context) error { // Initial load from DB, then async poll from chain thereafter return c.StartOnce("ChannelDefinitionCache", func() (err error) { - err = c.lp.RegisterFilter(logpoller.Filter{Name: c.filterName, EventSigs: allTopics, Addresses: []common.Address{c.addr}}, pg.WithParentCtx(ctx)) + err = c.lp.RegisterFilter(ctx, logpoller.Filter{Name: c.filterName, EventSigs: allTopics, Addresses: []common.Address{c.addr}}) if err != nil { return err } @@ -140,8 +139,10 @@ func (c *channelDefinitionCache) poll() { func (c *channelDefinitionCache) fetchFromChain() (nLogs int, err error) { // TODO: Pass context + ctx, cancel := services.StopChan(c.chStop).NewCtx() + defer cancel() // https://smartcontract-it.atlassian.net/browse/MERC-3653 - latest, err := c.lp.LatestBlock() + latest, err := c.lp.LatestBlock(ctx) if errors.Is(err, sql.ErrNoRows) { c.lggr.Debug("Logpoller has no logs yet, skipping poll") return 0, nil @@ -156,10 +157,8 @@ func (c *channelDefinitionCache) fetchFromChain() (nLogs int, err error) { return 0, nil } - ctx, cancel := services.StopChan(c.chStop).NewCtx() - defer cancel() // NOTE: We assume that log poller returns logs in ascending order chronologically - logs, err := c.lp.LogsWithSigs(fromBlock, toBlock, allTopics, c.addr, pg.WithParentCtx(ctx)) + logs, err := c.lp.LogsWithSigs(ctx, fromBlock, toBlock, allTopics, c.addr) if err != nil { // TODO: retry? // https://smartcontract-it.atlassian.net/browse/MERC-3653 diff --git a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go index c24ea46231d..74897b53a8d 100644 --- a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go +++ b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go @@ -1,6 +1,7 @@ package llo_test import ( + "context" "math/rand" "testing" "time" @@ -26,7 +27,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/llo" - "github.com/smartcontractkit/chainlink/v2/core/services/pg" ) func Test_ChannelDefinitionCache_Integration(t *testing.T) { @@ -85,7 +85,7 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { KeepFinalizedBlocksDepth: 1000, } lp := logpoller.NewLogPoller( - logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, lpOpts) + logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts) servicetest.Run(t, lp) cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, 0) @@ -157,11 +157,11 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { KeepFinalizedBlocksDepth: 1000, } lp := &mockLogPoller{ - LogPoller: logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, lpOpts), - LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + LogPoller: logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts), + LatestBlockFn: func(ctx context.Context) (int64, error) { return 0, nil }, - LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + LogsWithSigsFn: func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { return []logpoller.Log{}, nil }, } @@ -198,7 +198,7 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, lpOpts) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts) servicetest.Run(t, lp) cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, channel2Block.Number().Int64()+1) @@ -222,14 +222,14 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { type mockLogPoller struct { logpoller.LogPoller - LatestBlockFn func(qopts ...pg.QOpt) (int64, error) - LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) + LatestBlockFn func(ctx context.Context) (int64, error) + LogsWithSigsFn func(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) } -func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { - return p.LogsWithSigsFn(start, end, eventSigs, address, qopts...) +func (p *mockLogPoller) LogsWithSigs(ctx context.Context, start, end int64, eventSigs []common.Hash, address common.Address) ([]logpoller.Log, error) { + return p.LogsWithSigsFn(ctx, start, end, eventSigs, address) } -func (p *mockLogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { - block, err := p.LatestBlockFn(qopts...) +func (p *mockLogPoller) LatestBlock(ctx context.Context) (logpoller.LogPollerBlock, error) { + block, err := p.LatestBlockFn(ctx) return logpoller.LogPollerBlock{BlockNumber: block}, err } From d1f54041a9ac3f97b1085bee7fd0e69bc3f5ba57 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 29 Feb 2024 13:09:52 -0500 Subject: [PATCH 36/65] unexport orm --- core/chains/evm/forwarders/orm.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index a1dc1f6d883..0a8531fc562 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -23,25 +23,25 @@ type ORM interface { FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) } -type DbORM struct { +type orm struct { db sqlutil.Queryer } -var _ ORM = &DbORM{} +var _ ORM = &orm{} -func NewORM(db sqlutil.Queryer) *DbORM { - return &DbORM{db: db} +func NewORM(db sqlutil.Queryer) *orm { + return &orm{db: db} } -func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { +func (o *orm) Transaction(ctx context.Context, fn func(*orm) error) (err error) { return sqlutil.Transact(ctx, o.new, o.db, nil, fn) } // new returns a NewORM like o, but backed by q. -func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(q) } +func (o *orm) new(q sqlutil.Queryer) *orm { return NewORM(q) } // CreateForwarder creates the Forwarder address associated with the current EVM chain id. -func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { +func (o *orm) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { sql := `INSERT INTO evm.forwarders (address, evm_chain_id, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` err = o.db.GetContext(ctx, &fwd, sql, addr, evmChainId) return fwd, err @@ -50,8 +50,8 @@ func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmCha // DeleteForwarder removes a forwarder address. // If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically // on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. -func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { - return o.Transaction(ctx, func(orm *DbORM) error { +func (o *orm) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { + return o.Transaction(ctx, func(orm *orm) error { var dest struct { EvmChainId int64 Address common.Address @@ -82,7 +82,7 @@ func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx s } // FindForwarders returns all forwarder addresses from offset up until limit. -func (o *DbORM) FindForwarders(ctx context.Context, offset, limit int) (fwds []Forwarder, count int, err error) { +func (o *orm) FindForwarders(ctx context.Context, offset, limit int) (fwds []Forwarder, count int, err error) { sql := `SELECT count(*) FROM evm.forwarders` if err = o.db.GetContext(ctx, &count, sql); err != nil { return @@ -96,13 +96,13 @@ func (o *DbORM) FindForwarders(ctx context.Context, offset, limit int) (fwds []F } // FindForwardersByChain returns all forwarder addresses for a chain. -func (o *DbORM) FindForwardersByChain(ctx context.Context, evmChainId big.Big) (fwds []Forwarder, err error) { +func (o *orm) FindForwardersByChain(ctx context.Context, evmChainId big.Big) (fwds []Forwarder, err error) { sql := `SELECT * FROM evm.forwarders where evm_chain_id = $1 ORDER BY created_at DESC, id DESC` err = o.db.SelectContext(ctx, &fwds, sql, evmChainId) return } -func (o *DbORM) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { +func (o *orm) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { var fwdrs []Forwarder arg := map[string]interface{}{ From 8c096daa80cbd092b0d6753dcf5a48d6e09c06de Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 29 Feb 2024 15:10:36 -0500 Subject: [PATCH 37/65] Use query --- core/chains/evm/logpoller/orm.go | 556 ++++++++++++++++-------- core/chains/evm/logpoller/orm_test.go | 2 +- core/chains/evm/logpoller/query.go | 165 +++++++ core/chains/evm/logpoller/query_test.go | 82 ++++ 4 files changed, 622 insertions(+), 183 deletions(-) create mode 100644 core/chains/evm/logpoller/query.go create mode 100644 core/chains/evm/logpoller/query_test.go diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index de10497ab7b..5917b3eef4e 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -4,7 +4,9 @@ import ( "context" "database/sql" "fmt" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "math/big" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -84,10 +86,21 @@ func (o *orm) new(q sqlutil.Queryer) *orm { return NewORM(o.chainID, q, o.lggr). // InsertBlock is idempotent to support replays. func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { - query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) - VALUES ($1, $2, $3, $4, $5, NOW()) + args, err := newQueryArgs(o.chainID). + withCustomHashArg("block_hash", blockHash). + withCustomArg("block_number", blockNumber). + withCustomArg("block_timestamp", blockTimestamp). + withCustomArg("finalized_block_number", finalizedBlock). + toArgs() + if err != nil { + return err + } + query := `INSERT INTO evm.log_poller_blocks + (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) ON CONFLICT DO NOTHING` - _, err := o.db.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) + query, sqlArgs, _ := o.db.BindNamed(query, args) + _, err = o.db.ExecContext(ctx, query, sqlArgs...) return err } @@ -98,18 +111,56 @@ func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumbe func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 - query := ` + /* + query := ` + INSERT INTO evm.log_poller_filters + (name, evm_chain_id, retention, created_at, address, event) + SELECT * FROM + (SELECT $1, $2 ::NUMERIC, $3 ::BIGINT, NOW()) x, + (SELECT unnest($4 ::BYTEA[]) addr) a, + (SELECT unnest($5 ::BYTEA[]) ev) e + ON CONFLICT (evm.f_log_poller_filter_hash(name, evm_chain_id, address, event, topic2, topic3, topic4)) + DO UPDATE SET retention=$3 ::BIGINT, max_logs_kept=$6 ::NUMERIC, logs_per_block=$7 ::NUMERIC` + */ + + topicArrays := []types.HashArray{filter.Topic2, filter.Topic3, filter.Topic4} + args, err := newQueryArgs(o.chainID). + withCustomArg("name", filter.Name). + withRetention(filter.Retention). + withMaxLogsKept(filter.MaxLogsKept). + withLogsPerBlock(filter.LogsPerBlock). + withAddressArray(filter.Addresses). + withEventSigArray(filter.EventSigs). + withTopicArrays(filter.Topic2, filter.Topic3, filter.Topic4). + toArgs() + if err != nil { + return err + } + // '::' has to be escaped in the query string + // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 + var topicsColumns, topicsSql strings.Builder + for n, topicValues := range topicArrays { + if len(topicValues) != 0 { + topicCol := fmt.Sprintf("topic%d", n+2) + fmt.Fprintf(&topicsColumns, ", %s", topicCol) + fmt.Fprintf(&topicsSql, ",\n(SELECT unnest(:%s ::::BYTEA[]) %s) t%d", topicCol, topicCol, n+2) + } + } + query := fmt.Sprintf(` INSERT INTO evm.log_poller_filters - (name, evm_chain_id, retention, created_at, address, event) + (name, evm_chain_id, retention, max_logs_kept, logs_per_block, created_at, address, event %s) SELECT * FROM - (SELECT $1, $2 ::NUMERIC, $3 ::BIGINT, NOW()) x, - (SELECT unnest($4 ::BYTEA[]) addr) a, - (SELECT unnest($5 ::BYTEA[]) ev) e - ON CONFLICT (evm.f_log_poller_filter_hash(name, evm_chain_id, address, event, topic2, topic3, topic4)) - DO UPDATE SET retention=$3 ::BIGINT, max_logs_kept=$6 ::NUMERIC, logs_per_block=$7 ::NUMERIC` - - _, err = o.db.ExecContext(ctx, query, filter.Name, ubig.New(o.chainID), filter.Retention, - concatBytes(filter.Addresses), concatBytes(filter.EventSigs), filter.MaxLogsKept, filter.LogsPerBlock) + (SELECT :name, :evm_chain_id ::::NUMERIC, :retention ::::BIGINT, :max_logs_kept ::::NUMERIC, :logs_per_block ::::NUMERIC, NOW()) x, + (SELECT unnest(:address_array ::::BYTEA[]) addr) a, + (SELECT unnest(:event_sig_array ::::BYTEA[]) ev) e + %s + ON CONFLICT (evm.f_log_poller_filter_hash(name, evm_chain_id, address, event, topic2, topic3, topic4)) + DO UPDATE SET retention=:retention ::::BIGINT, max_logs_kept=:max_logs_kept ::::NUMERIC, logs_per_block=:logs_per_block ::::NUMERIC`, + topicsColumns.String(), + topicsSql.String()) + + query, sqlArgs, _ := o.db.BindNamed(query, args) + _, err = o.db.ExecContext(ctx, query, sqlArgs...) return err } @@ -169,17 +220,23 @@ func (o *orm) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { } func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND event_sig = $2 - AND address = $3 + WHERE evm_chain_id = :evm_chain_id + AND event_sig = :event_sig + AND address = :address AND block_number <= %s - ORDER BY (block_number, log_index) DESC LIMIT 1`, - nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) var l Log - if err := o.db.GetContext(ctx, &l, query, ubig.New(o.chainID), eventSig.Bytes(), address); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.GetContext(ctx, &l, query, sqlArgs...); err != nil { return nil, err } return &l, nil @@ -187,10 +244,8 @@ func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig c // DeleteBlocksBefore delete all blocks before and including end. func (o *orm) DeleteBlocksBefore(ctx context.Context, limit int64, end int64) (int64, error) { - var err error - var result sql.Result if limit > 0 { - result, err = o.db.ExecContext(ctx, + result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number IN ( SELECT block_number FROM evm.log_poller_blocks @@ -200,14 +255,13 @@ func (o *orm) DeleteBlocksBefore(ctx context.Context, limit int64, end int64) (i ) AND evm_chain_id = $2`, end, ubig.New(o.chainID), limit) - } else { - result, err = o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) - } - - rowsAffected, affectedErr := result.RowsAffected() - if affectedErr != nil { - err = errors.Wrap(err, affectedErr.Error()) + rowsAffected, _ := result.RowsAffected() + return rowsAffected, err } + fmt.Println("Deleting all blocks before", end) + result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks + WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) + rowsAffected, _ := result.RowsAffected() return rowsAffected, err } @@ -283,10 +337,7 @@ func (o *orm) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, error) ubig.New(o.chainID)) } - rowsAffected, affectedErr := result.RowsAffected() - if affectedErr != nil { - err = errors.Wrap(err, affectedErr.Error()) - } + rowsAffected, _ := result.RowsAffected() return rowsAffected, err } @@ -367,13 +418,23 @@ func (o *orm) validateLogs(logs []Log) error { } func (o *orm) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + query := `SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index)` + var logs []Log - err := o.db.SelectContext(ctx, &logs, ` - SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND block_number >= $2 - AND block_number <= $3 - ORDER BY (block_number, log_index, created_at)`, ubig.New(o.chainID), start, end) + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err } @@ -382,15 +443,25 @@ func (o *orm) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]L // SelectLogs finds the logs in a given block range. func (o *orm) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + query := `SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index)` + var logs []Log - err := o.db.SelectContext(ctx, &logs, ` - SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND block_number >= $4 - AND block_number <= $5 - ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, eventSig.Bytes(), start, end) + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err } @@ -399,18 +470,26 @@ func (o *orm) SelectLogs(ctx context.Context, start, end int64, address common.A // SelectLogsCreatedAfter finds logs created after some timestamp. func (o *orm) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withBlockTimestampAfter(after). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND block_timestamp > $4 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, - nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), after); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil @@ -419,13 +498,25 @@ func (o *orm) SelectLogsCreatedAfter(ctx context.Context, address common.Address // SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { - err = o.db.SelectContext(ctx, &logs, ` - SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = ANY($3) - AND block_number BETWEEN $4 AND $5 - ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, concatBytes(eventSigs), start, end) + args, err := newQueryArgs(o.chainID). + withAddress(address). + withEventSigArray(eventSigs). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + query := `SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = ANY(:event_sig_array) + AND block_number BETWEEN :start_block AND :end_block + ORDER BY (block_number, log_index)` + + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if errors.Is(err, sql.ErrNoRows) { return nil, nil } @@ -433,13 +524,23 @@ func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address } func (o *orm) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { + args, err := newQueryArgs(o.chainID). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + query := `SELECT * FROM evm.log_poller_blocks + WHERE block_number >= :start_block + AND block_number <= :end_block + AND evm_chain_id = :evm_chain_id + ORDER BY block_number ASC` + var blocks []LogPollerBlock - err := o.db.SelectContext(ctx, &blocks, ` - SELECT * FROM evm.log_poller_blocks - WHERE block_number >= $1 - AND block_number <= $2 - AND evm_chain_id = $3 - ORDER BY block_number ASC`, start, end, ubig.New(o.chainID)) + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &blocks, query, sqlArgs...) if err != nil { return nil, err } @@ -448,21 +549,31 @@ func (o *orm) GetBlocksRange(ctx context.Context, start int64, end int64) ([]Log // SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { - // TODO: cant convert byteArray!? + args, err := newQueryArgs(o.chainID). + withAddressArray(addresses). + withEventSigArray(eventSigs). + withStartBlock(fromBlock). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE (block_number, address, event_sig) IN ( SELECT MAX(block_number), address, event_sig FROM evm.logs - WHERE evm_chain_id = $1 - AND event_sig = ANY($2) - AND address = ANY($3) - AND block_number > $4 + WHERE evm_chain_id = :evm_chain_id + AND event_sig = ANY(:event_sig_array) + AND address = ANY(:address_array) + AND block_number > :start_block AND block_number <= %s GROUP BY event_sig, address ) - ORDER BY block_number ASC`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, errors.Wrap(err, "failed to execute query") } return logs, nil @@ -470,130 +581,192 @@ func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBl // SelectLatestBlockByEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block func (o *orm) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { + args, err := newQueryArgs(o.chainID). + withEventSigArray(eventSigs). + withAddressArray(addresses). + withStartBlock(fromBlock). + withConfs(confs). + toArgs() + if err != nil { + return 0, err + } query := fmt.Sprintf(` SELECT COALESCE(MAX(block_number), 0) FROM evm.logs - WHERE evm_chain_id = $1 - AND event_sig = ANY($2) - AND address = ANY($3) - AND block_number > $4 - AND block_number <= %s`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + WHERE evm_chain_id = :evm_chain_id + AND event_sig = ANY(:event_sig_array) + AND address = ANY(:address_array) + AND block_number > :start_block + AND block_number <= %s`, nestedBlockNumberQuery(confs)) + var blockNumber int64 - if err := o.db.GetContext(ctx, &blockNumber, query, ubig.New(o.chainID), concatBytes(eventSigs), concatBytes(addresses), fromBlock); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.GetContext(ctx, &blockNumber, query, sqlArgs...); err != nil { return 0, err } return blockNumber, nil } func (o *orm) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { - query := fmt.Sprintf(`SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) >= $5 - AND substring(data from 32*$4+1 for 32) <= $6 - AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndex(wordIndex). + withWordValueMin(wordValueMin). + withWordValueMax(wordValueMax). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + + query := fmt.Sprintf(`SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index+1 for 32) >= :word_value_min + AND substring(data from 32*:word_index+1 for 32) <= :word_value_max + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes(), wordValueMax.Bytes()); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndex(wordIndex). + withWordValueMin(wordValueMin). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) >= $5 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index+1 for 32) >= :word_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes()); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndexMin(wordIndexMin). + withWordIndexMax(wordIndexMax). + withWordValue(wordValue). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) <= $5 - AND substring(data from 32*$6+1 for 32) >= $5 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index_min+1 for 32) <= :word_value + AND substring(data from 32*:word_index_max+1 for 32) >= :word_value AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), wordIndexMin, wordValue.Bytes(), wordIndexMax); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValueMin(topicValueMin). + withConfs(confs). + toArgs() if err != nil { return nil, err } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND topics[$4] >= $5 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] >= :topic_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes()); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValueMin(topicValueMin). + withTopicValueMax(topicValueMax). + withConfs(confs). + toArgs() if err != nil { return nil, err } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND topics[$4] >= $5 - AND topics[$4] <= $6 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] >= :topic_value_min + AND topics[:topic_index] <= :topic_value_max AND block_number <= %s - ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, topicValueMin.Bytes(), topicValueMax.Bytes()); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + withConfs(confs). + toArgs() if err != nil { return nil, err } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND topics[$4] = ANY($5) + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues)); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil @@ -601,22 +774,28 @@ func (o *orm) SelectIndexedLogs(ctx context.Context, address common.Address, eve // SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. func (o *orm) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + withStartBlock(start). + withEndBlock(end). + toArgs() if err != nil { return nil, err } + query := `SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index)` + var logs []Log - err = o.db.SelectContext(ctx, &logs, ` - SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND topics[$4] = ANY($5) - AND block_number >= $6 - AND block_number <= $7 - ORDER BY (block_number, log_index)`, - ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues), start, end) + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err } @@ -624,37 +803,54 @@ func (o *orm) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int6 } func (o *orm) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withBlockTimestampAfter(after). + withConfs(confs). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + toArgs() if err != nil { return nil, err } query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND topics[$4] = ANY($5) - AND block_timestamp > $6 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) + AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs, ubig.New(o.chainID))) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, eventSig.Bytes(), topicIndex, concatBytes(topicValues), after); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } func (o *orm) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withTxHash(txHash). + withAddress(address). + withEventSig(eventSig). + toArgs() + if err != nil { + return nil, err + } + + query := `SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND tx_hash = :tx_hash + ORDER BY (block_number, log_index)` + var logs []Log - err := o.db.SelectContext(ctx, &logs, ` - SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND tx_hash = $4 - ORDER BY (block_number, log_index)`, ubig.New(o.chainID), address, eventSig.Bytes(), txHash.Bytes()) + query, sqlArgs, _ := o.db.BindNamed(query, args) + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err } @@ -663,54 +859,62 @@ func (o *orm) SelectIndexedLogsByTxHash(ctx context.Context, address common.Addr // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations func (o *orm) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { - topicIndex, err := UseTopicIndex(topicIndex) + args, err := newQueryArgs(o.chainID). + withAddress(address). + withTopicIndex(topicIndex). + withStartBlock(startBlock). + withEndBlock(endBlock). + withCustomHashArg("sigA", sigA). + withCustomHashArg("sigB", sigB). + withConfs(confs). + toArgs() if err != nil { return nil, err } - nestedQuery := nestedBlockNumberQuery(confs, ubig.New(o.chainID)) + nestedQuery := nestedBlockNumberQuery(confs) query := fmt.Sprintf(` SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 - AND event_sig = $3 - AND block_number BETWEEN $5 AND $6 + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :sigA + AND block_number BETWEEN :start_block AND :end_block AND block_number <= %s EXCEPT SELECT a.* FROM evm.logs AS a INNER JOIN evm.logs B ON a.evm_chain_id = b.evm_chain_id AND a.address = b.address - AND a.topics[$7] = b.topics[$7] - AND a.event_sig = $3 - AND b.event_sig = $4 - AND b.block_number BETWEEN $5 AND $6 + AND a.topics[:topic_index] = b.topics[:topic_index] + AND a.event_sig = :sigA + AND b.event_sig = :sigB + AND b.block_number BETWEEN :start_block AND :end_block AND b.block_number <= %s ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) var logs []Log - if err := o.db.SelectContext(ctx, &logs, query, ubig.New(o.chainID), address, sigA.Bytes(), sigB.Bytes(), startBlock, endBlock, topicIndex); err != nil { + query, sqlArgs, _ := o.db.BindNamed(query, args) + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil } -func nestedBlockNumberQuery(confs Confirmations, chainID *ubig.Big) string { +func nestedBlockNumberQuery(confs Confirmations) string { if confs == Finalized { - return fmt.Sprintf(` + return ` (SELECT finalized_block_number FROM evm.log_poller_blocks - WHERE evm_chain_id = %v - ORDER BY block_number DESC LIMIT 1) `, chainID) + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` } // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number // It doesn't impact logic of the outer query, because block numbers are never less or equal to 0 (guarded by log_poller_blocks_block_number_check) - return fmt.Sprintf(` - (SELECT greatest(block_number - %d, 0) + return ` + (SELECT greatest(block_number - :confs, 0) FROM evm.log_poller_blocks - WHERE evm_chain_id = %v - ORDER BY block_number DESC LIMIT 1) `, confs, chainID) - + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` } func UseTopicIndex(index int) (int, error) { @@ -721,15 +925,3 @@ func UseTopicIndex(index int) (int, error) { // Add 1 since postgresql arrays are 1-indexed. return index + 1, nil } - -type bytesProducer interface { - Bytes() []byte -} - -func concatBytes[T bytesProducer](byteSlice []T) [][]byte { - var output [][]byte - for _, b := range byteSlice { - output = append(output, b.Bytes()) - } - return output -} diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index d474cf9d53d..a106819ebd9 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -880,7 +880,7 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 2, time.Now(), 0)) deleted, err := o1.DeleteBlocksBefore(ctx, 1, 0) require.NoError(t, err) - assert.Equal(t, int64(1), deleted) + require.Equal(t, int64(1), deleted) // 1 should be gone. _, err = o1.SelectBlockByNumber(ctx, 1) require.Equal(t, err, sql.ErrNoRows) diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go new file mode 100644 index 00000000000..9baa00b626a --- /dev/null +++ b/core/chains/evm/logpoller/query.go @@ -0,0 +1,165 @@ +package logpoller + +import ( + "errors" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" +) + +type bytesProducer interface { + Bytes() []byte +} + +func concatBytes[T bytesProducer](byteSlice []T) [][]byte { + var output [][]byte + for _, b := range byteSlice { + output = append(output, b.Bytes()) + } + return output +} + +// queryArgs is a helper for building the arguments to a postgres query created by DbORM +// Besides the convenience methods, it also keeps track of arguments validation and sanitization. +type queryArgs struct { + args map[string]interface{} + err []error +} + +func newQueryArgs(chainId *big.Int) *queryArgs { + return &queryArgs{ + args: map[string]interface{}{ + "evm_chain_id": ubig.New(chainId), + }, + err: []error{}, + } +} + +func newQueryArgsForEvent(chainId *big.Int, address common.Address, eventSig common.Hash) *queryArgs { + return newQueryArgs(chainId). + withAddress(address). + withEventSig(eventSig) +} + +func (q *queryArgs) withEventSig(eventSig common.Hash) *queryArgs { + return q.withCustomHashArg("event_sig", eventSig) +} + +func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { + return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) +} + +func (q *queryArgs) withTopicArray(topicValues types.HashArray, topicNum uint64) *queryArgs { + return q.withCustomArg(fmt.Sprintf("topic%d", topicNum), concatBytes(topicValues)) +} + +func (q *queryArgs) withTopicArrays(topic2Vals types.HashArray, topic3Vals types.HashArray, topic4Vals types.HashArray) *queryArgs { + return q.withTopicArray(topic2Vals, 2). + withTopicArray(topic3Vals, 3). + withTopicArray(topic4Vals, 4) +} + +func (q *queryArgs) withAddress(address common.Address) *queryArgs { + return q.withCustomArg("address", address) +} + +func (q *queryArgs) withAddressArray(addresses []common.Address) *queryArgs { + return q.withCustomArg("address_array", concatBytes(addresses)) +} + +func (q *queryArgs) withStartBlock(startBlock int64) *queryArgs { + return q.withCustomArg("start_block", startBlock) +} + +func (q *queryArgs) withEndBlock(endBlock int64) *queryArgs { + return q.withCustomArg("end_block", endBlock) +} + +func (q *queryArgs) withWordIndex(wordIndex int) *queryArgs { + return q.withCustomArg("word_index", wordIndex) +} + +func (q *queryArgs) withWordValueMin(wordValueMin common.Hash) *queryArgs { + return q.withCustomHashArg("word_value_min", wordValueMin) +} + +func (q *queryArgs) withWordValueMax(wordValueMax common.Hash) *queryArgs { + return q.withCustomHashArg("word_value_max", wordValueMax) +} + +func (q *queryArgs) withWordIndexMin(wordIndex int) *queryArgs { + return q.withCustomArg("word_index_min", wordIndex) +} + +func (q *queryArgs) withWordIndexMax(wordIndex int) *queryArgs { + return q.withCustomArg("word_index_max", wordIndex) +} + +func (q *queryArgs) withWordValue(wordValue common.Hash) *queryArgs { + return q.withCustomHashArg("word_value", wordValue) +} + +func (q *queryArgs) withConfs(confs Confirmations) *queryArgs { + return q.withCustomArg("confs", confs) +} + +func (q *queryArgs) withTopicIndex(index int) *queryArgs { + // Only topicIndex 1 through 3 is valid. 0 is the event sig and only 4 total topics are allowed + if !(index == 1 || index == 2 || index == 3) { + q.err = append(q.err, fmt.Errorf("invalid index for topic: %d", index)) + } + // Add 1 since postgresql arrays are 1-indexed. + return q.withCustomArg("topic_index", index+1) +} + +func (q *queryArgs) withTopicValueMin(valueMin common.Hash) *queryArgs { + return q.withCustomHashArg("topic_value_min", valueMin) +} + +func (q *queryArgs) withTopicValueMax(valueMax common.Hash) *queryArgs { + return q.withCustomHashArg("topic_value_max", valueMax) +} + +func (q *queryArgs) withTopicValues(values []common.Hash) *queryArgs { + return q.withCustomArg("topic_values", concatBytes(values)) +} + +func (q *queryArgs) withBlockTimestampAfter(after time.Time) *queryArgs { + return q.withCustomArg("block_timestamp_after", after) +} + +func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { + return q.withCustomHashArg("tx_hash", hash) +} + +func (q *queryArgs) withRetention(retention time.Duration) *queryArgs { + return q.withCustomArg("retention", retention) +} + +func (q *queryArgs) withLogsPerBlock(logsPerBlock uint64) *queryArgs { + return q.withCustomArg("logs_per_block", logsPerBlock) +} + +func (q *queryArgs) withMaxLogsKept(maxLogsKept uint64) *queryArgs { + return q.withCustomArg("max_logs_kept", maxLogsKept) +} + +func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { + return q.withCustomArg(name, arg.Bytes()) +} + +func (q *queryArgs) withCustomArg(name string, arg any) *queryArgs { + q.args[name] = arg + return q +} + +func (q *queryArgs) toArgs() (map[string]interface{}, error) { + if len(q.err) > 0 { + return nil, errors.Join(q.err...) + } + return q.args, nil +} diff --git a/core/chains/evm/logpoller/query_test.go b/core/chains/evm/logpoller/query_test.go new file mode 100644 index 00000000000..832cbbfcb00 --- /dev/null +++ b/core/chains/evm/logpoller/query_test.go @@ -0,0 +1,82 @@ +package logpoller + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" +) + +func Test_QueryArgs(t *testing.T) { + tests := []struct { + name string + queryArgs *queryArgs + want map[string]interface{} + wantErr bool + }{ + { + name: "valid arguments", + queryArgs: newQueryArgs(big.NewInt(20)).withAddress(utils.ZeroAddress), + want: map[string]interface{}{ + "evm_chain_id": ubig.NewI(20), + "address": utils.ZeroAddress, + }, + }, + { + name: "invalid topic index", + queryArgs: newQueryArgs(big.NewInt(20)).withTopicIndex(0), + wantErr: true, + }, + { + name: "custom argument", + queryArgs: newEmptyArgs().withCustomArg("arg", "value"), + want: map[string]interface{}{ + "arg": "value", + }, + }, + { + name: "hash converted to bytes", + queryArgs: newEmptyArgs().withCustomHashArg("hash", common.Hash{}), + want: map[string]interface{}{ + "hash": make([]byte, 32), + }, + }, + { + name: "hash array converted to bytes array", + queryArgs: newEmptyArgs().withEventSigArray([]common.Hash{{}, {}}), + want: map[string]interface{}{ + "event_sig_array": [][]byte{make([]byte, 32), make([]byte, 32)}, + }, + }, + { + name: "topic index incremented", + queryArgs: newEmptyArgs().withTopicIndex(2), + want: map[string]interface{}{ + "topic_index": 3, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + args, err := tt.queryArgs.toArgs() + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, args) + } + }) + } +} + +func newEmptyArgs() *queryArgs { + return &queryArgs{ + args: map[string]interface{}{}, + err: []error{}, + } +} From 985105fde20d694bb86d1786732e312ced8678e4 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 29 Feb 2024 15:15:49 -0500 Subject: [PATCH 38/65] fix tests --- core/chains/evm/logpoller/log_poller_test.go | 2 +- core/chains/evm/logpoller/orm.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 51d55587787..ca08bbc42b1 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1405,7 +1405,7 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { time.Sleep(100 * time.Millisecond) require.NoError(t, lp.Start(ctx)) require.Eventually(t, func() bool { - return observedLogs.Len() >= 3 + return observedLogs.Len() >= 2 }, 2*time.Second, 20*time.Millisecond) err = lp.Close() require.NoError(t, err) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 5917b3eef4e..95c9efec281 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -243,8 +243,9 @@ func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig c } // DeleteBlocksBefore delete all blocks before and including end. -func (o *orm) DeleteBlocksBefore(ctx context.Context, limit int64, end int64) (int64, error) { +func (o *orm) DeleteBlocksBefore(ctx context.Context, end int64, limit int64) (int64, error) { if limit > 0 { + fmt.Println("Deleting all blocks before with limit", end, limit) result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number IN ( From c5dd049d603b4ee0858a6cf34b8b7a6c92e34d3c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 29 Feb 2024 15:28:41 -0500 Subject: [PATCH 39/65] fix imports --- core/chains/evm/logpoller/orm.go | 2 +- core/chains/evm/logpoller/query.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 95c9efec281..31717db5f00 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -4,7 +4,6 @@ import ( "context" "database/sql" "fmt" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "math/big" "strings" "time" @@ -16,6 +15,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go index 9baa00b626a..6aabe59045d 100644 --- a/core/chains/evm/logpoller/query.go +++ b/core/chains/evm/logpoller/query.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) From 8f581262278db9428b9a1e6f45cd034a99c81d67 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:21:19 -0500 Subject: [PATCH 40/65] Use pkgerrors --- core/chains/evm/forwarders/orm.go | 10 ++--- core/chains/evm/headtracker/orm.go | 12 ++--- core/chains/evm/logpoller/disabled.go | 4 +- core/chains/evm/logpoller/helper_test.go | 4 +- core/chains/evm/logpoller/log_poller.go | 56 ++++++++++++------------ core/chains/evm/logpoller/orm.go | 10 ++--- core/chains/evm/logpoller/orm_test.go | 14 +++--- 7 files changed, 55 insertions(+), 55 deletions(-) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index 0a8531fc562..6c860e3e62f 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -8,7 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/jmoiron/sqlx" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) @@ -70,7 +70,7 @@ func (o *orm) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sql // If the forwarder wasn't found, we still want to delete the filter. // In that case, the transaction must return nil, even though DeleteForwarder // will return sql.ErrNoRows - if err != nil && !errors.Is(err, sql.ErrNoRows) { + if err != nil && !pkgerrors.Is(err, sql.ErrNoRows) { return err } rowsAffected, err := result.RowsAffected() @@ -119,19 +119,19 @@ func (o *orm) FindForwardersInListByChain(ctx context.Context, evmChainId big.Bi ) if err != nil { - return nil, errors.Wrap(err, "Failed to format query") + return nil, pkgerrors.Wrap(err, "Failed to format query") } query, args, err = sqlx.In(query, args...) if err != nil { - return nil, errors.Wrap(err, "Failed to run sqlx.IN on query") + return nil, pkgerrors.Wrap(err, "Failed to run sqlx.IN on query") } query = o.db.Rebind(query) err = o.db.SelectContext(ctx, &fwdrs, query, args...) if err != nil { - return nil, errors.Wrap(err, "Failed to execute query") + return nil, pkgerrors.Wrap(err, "Failed to execute query") } return fwdrs, nil diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 7bc630f9d28..6788864b51f 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -6,7 +6,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" @@ -49,7 +49,7 @@ func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) e $1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (evm_chain_id, hash) DO NOTHING` _, err := orm.db.ExecContext(ctx, query, head.Hash, head.Number, head.ParentHash, head.CreatedAt, head.Timestamp, head.L1BlockNumber, orm.chainID, head.BaseFeePerGas) - return errors.Wrap(err, "IdempotentInsertHead failed to insert head") + return pkgerrors.Wrap(err, "IdempotentInsertHead failed to insert head") } func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { @@ -71,23 +71,23 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) - if errors.Is(err, sql.ErrNoRows) { + if pkgerrors.Is(err, sql.ErrNoRows) { return nil, nil } - err = errors.Wrap(err, "LatestHead failed") + err = pkgerrors.Wrap(err, "LatestHead failed") return } func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) - err = errors.Wrap(err, "LatestHeads failed") + err = pkgerrors.Wrap(err, "LatestHeads failed") return } func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) - if errors.Is(err, sql.ErrNoRows) { + if pkgerrors.Is(err, sql.ErrNoRows) { return nil, nil } return head, err diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index b946d85f6dc..8287aed22a4 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -5,11 +5,11 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" ) var ( - ErrDisabled = errors.New("log poller disabled") + ErrDisabled = pkgerrors.New("log poller disabled") LogPollerDisabled LogPoller = disabled{} ) diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index 68a646d644a..3b2a10df6c8 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -103,7 +103,7 @@ func (th *TestHarness) PollAndSaveLogs(ctx context.Context, currentBlockNumber i func (th *TestHarness) assertDontHave(t *testing.T, start, end int) { for i := start; i < end; i++ { _, err := th.ORM.SelectBlockByNumber(testutils.Context(t), int64(i)) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) } } diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index a8f4cbd4bd4..f4a235b3c70 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "golang.org/x/exp/maps" "github.com/smartcontractkit/chainlink-common/pkg/logger" @@ -88,9 +88,9 @@ type Client interface { var ( _ LogPollerTest = &logPoller{} - ErrReplayRequestAborted = errors.New("aborted, replay request cancelled") - ErrReplayInProgress = errors.New("replay request cancelled, but replay is already in progress") - ErrLogPollerShutdown = errors.New("replay aborted due to log poller shutdown") + ErrReplayRequestAborted = pkgerrors.New("aborted, replay request cancelled") + ErrReplayInProgress = pkgerrors.New("replay request cancelled, but replay is already in progress") + ErrLogPollerShutdown = pkgerrors.New("replay aborted due to log poller shutdown") ) type logPoller struct { @@ -238,20 +238,20 @@ func (filter *Filter) Contains(other *Filter) bool { // Warnings/debug information is keyed by filter name. func (lp *logPoller) RegisterFilter(ctx context.Context, filter Filter) error { if len(filter.Addresses) == 0 { - return errors.Errorf("at least one address must be specified") + return pkgerrors.Errorf("at least one address must be specified") } if len(filter.EventSigs) == 0 { - return errors.Errorf("at least one event must be specified") + return pkgerrors.Errorf("at least one event must be specified") } for _, eventSig := range filter.EventSigs { if eventSig == [common.HashLength]byte{} { - return errors.Errorf("empty event sig") + return pkgerrors.Errorf("empty event sig") } } for _, addr := range filter.Addresses { if addr == [common.AddressLength]byte{} { - return errors.Errorf("empty address") + return pkgerrors.Errorf("empty address") } } @@ -268,7 +268,7 @@ func (lp *logPoller) RegisterFilter(ctx context.Context, filter Filter) error { } if err := lp.orm.InsertFilter(ctx, filter); err != nil { - return errors.Wrap(err, "error inserting filter") + return pkgerrors.Wrap(err, "error inserting filter") } lp.filters[filter.Name] = filter lp.filterDirty = true @@ -289,7 +289,7 @@ func (lp *logPoller) UnregisterFilter(ctx context.Context, name string) error { } if err := lp.orm.DeleteFilter(ctx, name); err != nil { - return errors.Wrap(err, "error deleting filter") + return pkgerrors.Wrap(err, "error deleting filter") } delete(lp.filters, name) lp.filterDirty = true @@ -364,13 +364,13 @@ func (lp *logPoller) Replay(ctx context.Context, fromBlock int64) error { return err } if fromBlock < 1 || fromBlock > latest.Number { - return errors.Errorf("Invalid replay block number %v, acceptable range [1, %v]", fromBlock, latest.Number) + return pkgerrors.Errorf("Invalid replay block number %v, acceptable range [1, %v]", fromBlock, latest.Number) } // Block until replay notification accepted or cancelled. select { case lp.replayStart <- fromBlock: case <-ctx.Done(): - return errors.Wrap(ErrReplayRequestAborted, ctx.Err().Error()) + return pkgerrors.Wrap(ErrReplayRequestAborted, ctx.Err().Error()) } // Block until replay complete or cancelled. select { @@ -435,7 +435,7 @@ func (lp *logPoller) HealthReport() map[string]error { func (lp *logPoller) GetReplayFromBlock(ctx context.Context, requested int64) (int64, error) { lastProcessed, err := lp.orm.SelectLatestBlock(ctx) if err != nil { - if !errors.Is(err, sql.ErrNoRows) { + if !pkgerrors.Is(err, sql.ErrNoRows) { // Real DB error return 0, err } @@ -454,7 +454,7 @@ func (lp *logPoller) loadFilters() error { filters, err := lp.orm.LoadFilters(lp.ctx) if err != nil { - return errors.Wrapf(err, "Failed to load initial filters from db, retrying") + return pkgerrors.Wrapf(err, "Failed to load initial filters from db, retrying") } lp.filters = filters @@ -489,7 +489,7 @@ func (lp *logPoller) run() { var start int64 lastProcessed, err := lp.orm.SelectLatestBlock(lp.ctx) if err != nil { - if !errors.Is(err, sql.ErrNoRows) { + if !pkgerrors.Is(err, sql.ErrNoRows) { // Assume transient db reading issue, retry forever. lp.lggr.Errorw("unable to get starting block", "err", err) continue @@ -603,7 +603,7 @@ func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context) { if lp.backupPollerNextBlock == 0 { lastProcessed, err := lp.orm.SelectLatestBlock(ctx) if err != nil { - if errors.Is(err, sql.ErrNoRows) { + if pkgerrors.Is(err, sql.ErrNoRows) { lp.lggr.Warnw("Backup log poller ran before first successful log poller run, skipping") } else { lp.lggr.Errorw("Backup log poller unable to get starting block", "err", err) @@ -703,7 +703,7 @@ func (lp *logPoller) backfill(ctx context.Context, start, end int64) error { gethLogs, err := lp.ec.FilterLogs(ctx, lp.Filter(big.NewInt(from), big.NewInt(to), nil)) if err != nil { var rpcErr client.JsonError - if errors.As(err, &rpcErr) { + if pkgerrors.As(err, &rpcErr) { if rpcErr.Code != jsonRpcLimitExceeded { lp.lggr.Errorw("Unable to query for logs", "err", err, "from", from, "to", to) return err @@ -756,20 +756,20 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // Additional sanity checks, don't necessarily trust the RPC. if currentBlock == nil { lp.lggr.Errorf("Unexpected nil block from RPC", "currentBlockNumber", currentBlockNumber) - return nil, errors.Errorf("Got nil block for %d", currentBlockNumber) + return nil, pkgerrors.Errorf("Got nil block for %d", currentBlockNumber) } if currentBlock.Number != currentBlockNumber { lp.lggr.Warnw("Unable to get currentBlock, rpc returned incorrect block", "currentBlockNumber", currentBlockNumber, "got", currentBlock.Number) - return nil, errors.Errorf("Block mismatch have %d want %d", currentBlock.Number, currentBlockNumber) + return nil, pkgerrors.Errorf("Block mismatch have %d want %d", currentBlock.Number, currentBlockNumber) } } // Does this currentBlock point to the same parent that we have saved? // If not, there was a reorg, so we need to rewind. expectedParent, err1 := lp.orm.SelectBlockByNumber(ctx, currentBlockNumber-1) - if err1 != nil && !errors.Is(err1, sql.ErrNoRows) { + if err1 != nil && !pkgerrors.Is(err1, sql.ErrNoRows) { // If err is not a 'no rows' error, assume transient db issue and retry lp.lggr.Warnw("Unable to read latestBlockNumber currentBlock saved", "err", err1, "currentBlockNumber", currentBlockNumber) - return nil, errors.New("Unable to read latestBlockNumber currentBlock saved") + return nil, pkgerrors.New("Unable to read latestBlockNumber currentBlock saved") } // We will not have the previous currentBlock on initial poll. havePreviousBlock := err1 == nil @@ -785,7 +785,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock, expectedParent.FinalizedBlockNumber) if err2 != nil { lp.lggr.Warnw("Unable to find LCA after reorg, retrying", "err", err2) - return nil, errors.New("Unable to find LCA after reorg, retrying") + return nil, pkgerrors.New("Unable to find LCA after reorg, retrying") } lp.lggr.Infow("Reorg detected", "blockAfterLCA", blockAfterLCA.Number, "currentBlockNumber", currentBlockNumber) @@ -968,7 +968,7 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He } } lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) - rerr := errors.New("Reorg greater than finality depth") + rerr := pkgerrors.New("Reorg greater than finality depth") lp.SvcErrBuffer.Append(rerr) return nil, rerr } @@ -1150,7 +1150,7 @@ func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64) ([]Lo } if len(blocksNotFound) > 0 { - return nil, errors.Errorf("blocks were not found in db or RPC call: %v", blocksNotFound) + return nil, pkgerrors.Errorf("blocks were not found in db or RPC call: %v", blocksNotFound) } return blocks, nil @@ -1222,16 +1222,16 @@ func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []str block, is := r.Result.(*evmtypes.Head) if !is { - return nil, errors.Errorf("expected result to be a %T, got %T", &evmtypes.Head{}, r.Result) + return nil, pkgerrors.Errorf("expected result to be a %T, got %T", &evmtypes.Head{}, r.Result) } if block == nil { - return nil, errors.New("invariant violation: got nil block") + return nil, pkgerrors.New("invariant violation: got nil block") } if block.Hash == (common.Hash{}) { - return nil, errors.Errorf("missing block hash for block number: %d", block.Number) + return nil, pkgerrors.Errorf("missing block hash for block number: %d", block.Number) } if block.Number < 0 { - return nil, errors.Errorf("expected block number to be >= to 0, got %d", block.Number) + return nil, pkgerrors.Errorf("expected block number to be >= to 0, got %d", block.Number) } blocks = append(blocks, block) } diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 31717db5f00..8eccab9d886 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/jmoiron/sqlx" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" @@ -397,7 +397,7 @@ func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) e ) if err != nil { - if errors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 { + if pkgerrors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 { // In case of DB timeouts, try to insert again with a smaller batch upto a limit batchInsertSize /= 2 i -= batchInsertSize // counteract +=batchInsertSize on next loop iteration @@ -412,7 +412,7 @@ func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) e func (o *orm) validateLogs(logs []Log) error { for _, log := range logs { if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 { - return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) + return pkgerrors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) } } return nil @@ -518,7 +518,7 @@ func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address query, sqlArgs, _ := o.db.BindNamed(query, args) err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) - if errors.Is(err, sql.ErrNoRows) { + if pkgerrors.Is(err, sql.ErrNoRows) { return nil, nil } return logs, err @@ -575,7 +575,7 @@ func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBl var logs []Log query, sqlArgs, _ := o.db.BindNamed(query, args) if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { - return nil, errors.Wrap(err, "failed to execute query") + return nil, pkgerrors.Wrap(err, "failed to execute query") } return logs, nil } diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index a106819ebd9..8a45ff2f1c5 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/jackc/pgx/v4" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -216,17 +216,17 @@ func TestORM(t *testing.T) { require.NoError(t, o1.DeleteLogsAndBlocksAfter(ctx, 10)) _, err = o1.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // Delete blocks from another chain. require.NoError(t, o2.DeleteLogsAndBlocksAfter(ctx, 11)) _, err = o2.SelectBlockByHash(ctx, common.HexToHash("0x1234")) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // Delete blocks after should also delete block 12. _, err = o2.SelectBlockByHash(ctx, common.HexToHash("0x1235")) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // Should be able to insert and read back a log. topic := common.HexToHash("0x1599") @@ -350,7 +350,7 @@ func TestORM(t *testing.T) { // With no blocks, should be an error _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) require.Error(t, err) - require.True(t, errors.Is(err, sql.ErrNoRows)) + require.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // With block 10, only 0 confs should work require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 10, time.Now(), 0)) log, err := o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 0) @@ -358,7 +358,7 @@ func TestORM(t *testing.T) { assert.Equal(t, int64(10), log.BlockNumber) _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 1) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // With block 12, anything <=2 should work require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 11, time.Now(), 0)) require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1235"), 12, time.Now(), 0)) @@ -370,7 +370,7 @@ func TestORM(t *testing.T) { require.NoError(t, err) _, err = o1.SelectLatestLogByEventSigWithConfs(ctx, topic, common.HexToAddress("0x1234"), 3) require.Error(t, err) - assert.True(t, errors.Is(err, sql.ErrNoRows)) + assert.True(t, pkgerrors.Is(err, sql.ErrNoRows)) // Required for confirmations to work require.NoError(t, o1.InsertBlock(ctx, common.HexToHash("0x1234"), 13, time.Now(), 0)) From 9f7653f8097925516a792fefe4dfcd9460d9bf7a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:26:44 -0500 Subject: [PATCH 41/65] Use registry ctx --- .../plugins/ocr2keeper/evmregistry/v21/registry.go | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go index 100acdc5dd3..d6f1fa1e4af 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -338,13 +338,11 @@ func (r *EvmRegistry) refreshLogTriggerUpkeepsBatch(logTriggerIDs []*big.Int) er logTriggerHashes = append(logTriggerHashes, common.BigToHash(id)) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - unpausedLogs, err := r.poller.IndexedLogs(ctx, iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) + unpausedLogs, err := r.poller.IndexedLogs(r.ctx, iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } - configSetLogs, err := r.poller.IndexedLogs(ctx, iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) + configSetLogs, err := r.poller.IndexedLogs(r.ctx, iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth)) if err != nil { return err } @@ -422,10 +420,8 @@ func (r *EvmRegistry) pollUpkeepStateLogs() error { } var logs []logpoller.Log - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() if logs, err = r.poller.LogsWithSigs( - ctx, + r.ctx, end.BlockNumber-logEventLookback, end.BlockNumber, upkeepStateEvents, @@ -508,9 +504,7 @@ func RegistryUpkeepFilterName(addr common.Address) string { // registerEvents registers upkeep state events from keeper registry on log poller func (r *EvmRegistry) registerEvents(_ uint64, addr common.Address) error { // Add log filters for the log poller so that it can poll and find the logs that we need - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - return r.poller.RegisterFilter(ctx, logpoller.Filter{ + return r.poller.RegisterFilter(r.ctx, logpoller.Filter{ Name: RegistryUpkeepFilterName(addr), EventSigs: upkeepStateEvents, Addresses: []common.Address{addr}, From e70065d5b07d5ba3ceed3af5bfd8e4d1f35194f5 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:40:08 -0500 Subject: [PATCH 42/65] Use context --- core/services/functions/listener.go | 8 +++--- core/services/ocr2/delegate.go | 4 ++- .../ocr2vrf/coordinator/coordinator.go | 3 +-- .../relay/evm/functions/logpoller_wrapper.go | 25 ++++++++----------- .../evm/types/mocks/log_poller_wrapper.go | 22 ++++++++-------- core/services/relay/evm/types/types.go | 2 +- 6 files changed, 30 insertions(+), 34 deletions(-) diff --git a/core/services/functions/listener.go b/core/services/functions/listener.go index f9d74f1bae9..d959e9685c8 100644 --- a/core/services/functions/listener.go +++ b/core/services/functions/listener.go @@ -185,14 +185,14 @@ func NewFunctionsListener( } // Start complies with job.Service -func (l *functionsListener) Start(context.Context) error { +func (l *functionsListener) Start(ctx context.Context) error { return l.StartOnce("FunctionsListener", func() error { l.serviceContext, l.serviceCancel = context.WithCancel(context.Background()) switch l.pluginConfig.ContractVersion { case 1: l.shutdownWaitGroup.Add(1) - go l.processOracleEventsV1() + go l.processOracleEventsV1(ctx) default: return fmt.Errorf("unsupported contract version: %d", l.pluginConfig.ContractVersion) } @@ -221,7 +221,7 @@ func (l *functionsListener) Close() error { }) } -func (l *functionsListener) processOracleEventsV1() { +func (l *functionsListener) processOracleEventsV1(ctx context.Context) { defer l.shutdownWaitGroup.Done() freqMillis := l.pluginConfig.ListenerEventsCheckFrequencyMillis if freqMillis == 0 { @@ -235,7 +235,7 @@ func (l *functionsListener) processOracleEventsV1() { case <-l.chStop: return case <-ticker.C: - requests, responses, err := l.logPollerWrapper.LatestEvents() + requests, responses, err := l.logPollerWrapper.LatestEvents(ctx) if err != nil { l.logger.Errorw("error when calling LatestEvents()", "err", err) break diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index d8bd9f79116..895f7cc2212 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -455,7 +455,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi return d.newServicesDKG(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) case types.OCR2VRF: - return d.newServicesOCR2VRF(lggr, jb, bootstrapPeers, kb, ocrDB, lc) + return d.newServicesOCR2VRF(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc) case types.OCR2Keeper: return d.newServicesOCR2Keepers(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) @@ -1045,6 +1045,7 @@ func (d *Delegate) newServicesDKG( } func (d *Delegate) newServicesOCR2VRF( + ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, @@ -1147,6 +1148,7 @@ func (d *Delegate) newServicesOCR2VRF( } coordinator, err2 := ocr2coordinator.New( + ctx, lggr.Named("OCR2VRFCoordinator"), common.HexToAddress(spec.ContractID), common.HexToAddress(cfg.VRFCoordinatorAddress), diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go index 8c62872c6c8..e7dd3174413 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go @@ -165,6 +165,7 @@ type coordinator struct { // New creates a new CoordinatorInterface implementor. func New( + ctx context.Context, lggr logger.Logger, beaconAddress common.Address, coordinatorAddress common.Address, @@ -182,8 +183,6 @@ func New( // Add log filters for the log poller so that it can poll and find the logs that // we need. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: filterName(beaconAddress, coordinatorAddress, dkgAddress), EventSigs: []common.Hash{ diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index 687d44c3578..98a44754957 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -112,7 +112,7 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf }, nil } -func (l *logPollerWrapper) Start(context.Context) error { +func (l *logPollerWrapper) Start(ctx context.Context) error { return l.StartOnce("LogPollerWrapper", func() error { l.lggr.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion) l.mu.Lock() @@ -121,7 +121,7 @@ func (l *logPollerWrapper) Start(context.Context) error { return errors.New("only contract version 1 is supported") } l.closeWait.Add(1) - go l.checkForRouteUpdates() + go l.checkForRouteUpdates(ctx) return nil }) } @@ -142,7 +142,7 @@ func (l *logPollerWrapper) HealthReport() map[string]error { func (l *logPollerWrapper) Name() string { return l.lggr.Name() } // methods of LogPollerWrapper -func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmRelayTypes.OracleResponse, error) { +func (l *logPollerWrapper) LatestEvents(ctx context.Context) ([]evmRelayTypes.OracleRequest, []evmRelayTypes.OracleResponse, error) { l.mu.Lock() coordinators := []common.Address{} if l.activeCoordinator != (common.Address{}) { @@ -151,7 +151,7 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR if l.proposedCoordinator != (common.Address{}) && l.activeCoordinator != l.proposedCoordinator { coordinators = append(coordinators, l.proposedCoordinator) } - latest, err := l.logPoller.LatestBlock(context.Background()) + latest, err := l.logPoller.LatestBlock(ctx) if err != nil { l.mu.Unlock() return nil, nil, err @@ -171,9 +171,6 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR return resultsReq, resultsResp, errors.New("no non-zero coordinators to check") } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - for _, coordinator := range coordinators { requestEndBlock := latestBlockNum - l.requestBlockOffset requestLogs, err := l.logPoller.Logs(ctx, startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator) @@ -332,7 +329,7 @@ func (l *logPollerWrapper) SubscribeToUpdates(subscriberName string, subscriber } } -func (l *logPollerWrapper) checkForRouteUpdates() { +func (l *logPollerWrapper) checkForRouteUpdates(ctx context.Context) { defer l.closeWait.Done() freqSec := l.pluginConfig.ContractUpdateCheckFrequencySec if freqSec == 0 { @@ -349,7 +346,7 @@ func (l *logPollerWrapper) checkForRouteUpdates() { l.lggr.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err) return } - l.handleRouteUpdate(active, proposed) + l.handleRouteUpdate(ctx, active, proposed) } updateOnce() // update once right away @@ -391,7 +388,7 @@ func (l *logPollerWrapper) getCurrentCoordinators(ctx context.Context) (common.A return activeCoordinator, proposedCoordinator, nil } -func (l *logPollerWrapper) handleRouteUpdate(activeCoordinator common.Address, proposedCoordinator common.Address) { +func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) { l.mu.Lock() defer l.mu.Unlock() @@ -404,8 +401,8 @@ func (l *logPollerWrapper) handleRouteUpdate(activeCoordinator common.Address, p l.lggr.Debug("LogPollerWrapper: no changes to routes") return } - errActive := l.registerFilters(activeCoordinator) - errProposed := l.registerFilters(proposedCoordinator) + errActive := l.registerFilters(ctx, activeCoordinator) + errProposed := l.registerFilters(ctx, proposedCoordinator) if errActive != nil || errProposed != nil { l.lggr.Errorw("LogPollerWrapper: Failed to register filters", "errorActive", errActive, "errorProposed", errProposed) return @@ -427,12 +424,10 @@ func filterName(addr common.Address) string { return logpoller.FilterName("FunctionsLogPollerWrapper", addr.String()) } -func (l *logPollerWrapper) registerFilters(coordinatorAddress common.Address) error { +func (l *logPollerWrapper) registerFilters(ctx context.Context, coordinatorAddress common.Address) error { if (coordinatorAddress == common.Address{}) { return nil } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() return l.logPoller.RegisterFilter( ctx, logpoller.Filter{ diff --git a/core/services/relay/evm/types/mocks/log_poller_wrapper.go b/core/services/relay/evm/types/mocks/log_poller_wrapper.go index 675cf317b14..fea333ca9f5 100644 --- a/core/services/relay/evm/types/mocks/log_poller_wrapper.go +++ b/core/services/relay/evm/types/mocks/log_poller_wrapper.go @@ -52,9 +52,9 @@ func (_m *LogPollerWrapper) HealthReport() map[string]error { return r0 } -// LatestEvents provides a mock function with given fields: -func (_m *LogPollerWrapper) LatestEvents() ([]types.OracleRequest, []types.OracleResponse, error) { - ret := _m.Called() +// LatestEvents provides a mock function with given fields: ctx +func (_m *LogPollerWrapper) LatestEvents(ctx context.Context) ([]types.OracleRequest, []types.OracleResponse, error) { + ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for LatestEvents") @@ -63,27 +63,27 @@ func (_m *LogPollerWrapper) LatestEvents() ([]types.OracleRequest, []types.Oracl var r0 []types.OracleRequest var r1 []types.OracleResponse var r2 error - if rf, ok := ret.Get(0).(func() ([]types.OracleRequest, []types.OracleResponse, error)); ok { - return rf() + if rf, ok := ret.Get(0).(func(context.Context) ([]types.OracleRequest, []types.OracleResponse, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func() []types.OracleRequest); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) []types.OracleRequest); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]types.OracleRequest) } } - if rf, ok := ret.Get(1).(func() []types.OracleResponse); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) []types.OracleResponse); ok { + r1 = rf(ctx) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]types.OracleResponse) } } - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) } else { r2 = ret.Error(2) } diff --git a/core/services/relay/evm/types/types.go b/core/services/relay/evm/types/types.go index 0697605edab..f794f4a0016 100644 --- a/core/services/relay/evm/types/types.go +++ b/core/services/relay/evm/types/types.go @@ -191,7 +191,7 @@ type RouteUpdateSubscriber interface { //go:generate mockery --quiet --name LogPollerWrapper --output ./mocks/ --case=underscore type LogPollerWrapper interface { services.Service - LatestEvents() ([]OracleRequest, []OracleResponse, error) + LatestEvents(ctx context.Context) ([]OracleRequest, []OracleResponse, error) // TODO (FUN-668): Remove from the LOOP interface and only use internally within the EVM relayer SubscribeToUpdates(name string, subscriber RouteUpdateSubscriber) From d3e3c4fed68ebdb5c53f1a34139665b04b337e5e Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:50:33 -0500 Subject: [PATCH 43/65] Use ctx --- .../evm/logpoller/log_poller_internal_test.go | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 3e6b07cb88e..7ad48b6a349 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -107,7 +107,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { validateFiltersTable(t, lp, orm) // Removing non-existence Filter should log error but return nil - err = lp.UnregisterFilter(testutils.Context(t), "Filter doesn't exist") + err = lp.UnregisterFilter(ctx, "Filter doesn't exist") require.NoError(t, err) require.Equal(t, observedLogs.Len(), 1) require.Contains(t, observedLogs.TakeAll()[0].Entry.Message, "not found") @@ -121,19 +121,19 @@ func TestLogPoller_RegisterFilter(t *testing.T) { require.True(t, ok, "'Emitter Log 1 + 2 dupe' Filter missing") // Removing an existing Filter should remove it from both memory and db - err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1 + 2") + err = lp.UnregisterFilter(ctx, "Emitter Log 1 + 2") require.NoError(t, err) _, ok = lp.filters["Emitter Log 1 + 2"] require.False(t, ok, "'Emitter Log 1 Filter' should have been removed by UnregisterFilter()") require.Len(t, lp.filters, 2) validateFiltersTable(t, lp, orm) - err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1 + 2 dupe") + err = lp.UnregisterFilter(ctx, "Emitter Log 1 + 2 dupe") require.NoError(t, err) - err = lp.UnregisterFilter(testutils.Context(t), "Emitter Log 1") + err = lp.UnregisterFilter(ctx, "Emitter Log 1") require.NoError(t, err) assert.Len(t, lp.filters, 0) - filters, err := lp.orm.LoadFilters(testutils.Context(t)) + filters, err := lp.orm.LoadFilters(ctx) require.NoError(t, err) assert.Len(t, filters, 0) @@ -254,6 +254,7 @@ func TestLogPoller_Replay(t *testing.T) { chainID := testutils.FixtureChainID db := pgtest.NewSqlxDB(t) orm := NewORM(chainID, db, lggr) + ctx := testutils.Context(t) head := evmtypes.Head{Number: 4} events := []common.Hash{EmitterABI.Events["Log1"].ID} @@ -282,8 +283,8 @@ func TestLogPoller_Replay(t *testing.T) { lp := NewLogPoller(orm, ec, lggr, lpOpts) // process 1 log in block 3 - lp.PollAndSaveLogs(testutils.Context(t), 4) - latest, err := lp.LatestBlock(testutils.Context(t)) + lp.PollAndSaveLogs(ctx, 4) + latest, err := lp.LatestBlock(ctx) require.NoError(t, err) require.Equal(t, int64(4), latest.BlockNumber) @@ -460,6 +461,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { chainID := testutils.FixtureChainID db := pgtest.NewSqlxDB(t) orm := NewORM(chainID, db, lggr) + ctx := testutils.Context(t) lpOpts := Opts{ PollPeriod: time.Hour, @@ -477,7 +479,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) lp := NewLogPoller(orm, ec, lggr, lpOpts) - latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(testutils.Context(t)) + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(ctx) require.NoError(t, err) require.Equal(t, latestBlock.Number, head.Number) require.Equal(t, lpOpts.FinalityDepth, latestBlock.Number-lastFinalizedBlockNumber) @@ -503,7 +505,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { lpOpts.UseFinalityTag = true lp := NewLogPoller(orm, ec, lggr, lpOpts) - latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(testutils.Context(t)) + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(ctx) require.NoError(t, err) require.Equal(t, expectedLatestBlockNumber, latestBlock.Number) require.Equal(t, expectedLastFinalizedBlockNumber, lastFinalizedBlockNumber) @@ -521,7 +523,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { lpOpts.UseFinalityTag = true lp := NewLogPoller(orm, ec, lggr, lpOpts) - _, _, err := lp.latestBlocks(testutils.Context(t)) + _, _, err := lp.latestBlocks(ctx) require.Error(t, err) }) @@ -530,7 +532,7 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(fmt.Errorf("some error")) lpOpts.UseFinalityTag = true lp := NewLogPoller(orm, ec, lggr, lpOpts) - _, _, err := lp.latestBlocks(testutils.Context(t)) + _, _, err := lp.latestBlocks(ctx) require.Error(t, err) }) }) From b3f02dd343d0018da76a689bc670e7be5c29a184 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:52:22 -0500 Subject: [PATCH 44/65] Use ctx --- core/chains/evm/logpoller/log_poller_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index ca08bbc42b1..9ee7cfa85cd 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -1482,7 +1482,7 @@ func TestTooManyLogResults(t *testing.T) { }) require.NoError(t, err) lp.PollAndSaveLogs(ctx, 5) - block, err2 := o.SelectLatestBlock(testutils.Context(t)) + block, err2 := o.SelectLatestBlock(ctx) require.NoError(t, err2) assert.Equal(t, int64(298), block.BlockNumber) @@ -1513,7 +1513,7 @@ func TestTooManyLogResults(t *testing.T) { }) lp.PollAndSaveLogs(ctx, 298) - block, err2 = o.SelectLatestBlock(testutils.Context(t)) + block, err2 = o.SelectLatestBlock(ctx) require.NoError(t, err2) assert.Equal(t, int64(298), block.BlockNumber) warns := obs.FilterMessageSnippet("halving block range").FilterLevelExact(zapcore.WarnLevel).All() @@ -1542,7 +1542,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { th := SetupTH(t, lpOpts) eventSig := EmitterABI.Events["Log1"].ID - err := th.LogPoller.RegisterFilter(testutils.Context(t), logpoller.Filter{ + err := th.LogPoller.RegisterFilter(ctx, logpoller.Filter{ Name: "GetBlocks Test", EventSigs: []common.Hash{eventSig}, Addresses: []common.Address{th.EmitterAddress1}}, @@ -1571,7 +1571,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { require.Equal(t, int(currentBlock), firstBatchLen+secondBatchLen+2) finalizedLogs, err := th.LogPoller.LogsDataWordGreaterThan( - testutils.Context(t), + ctx, eventSig, th.EmitterAddress1, 0, @@ -1583,7 +1583,7 @@ func Test_PollAndQueryFinalizedBlocks(t *testing.T) { numberOfConfirmations := 1 logsByConfs, err := th.LogPoller.LogsDataWordGreaterThan( - testutils.Context(t), + ctx, eventSig, th.EmitterAddress1, 0, @@ -1634,7 +1634,7 @@ func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { } th := SetupTH(t, lpOpts) // Should return error before the first poll and save - _, err := th.LogPoller.LatestBlock(testutils.Context(t)) + _, err := th.LogPoller.LatestBlock(ctx) require.Error(t, err) // Mark first block as finalized @@ -1648,7 +1648,7 @@ func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { th.PollAndSaveLogs(ctx, 1) - latestBlock, err := th.LogPoller.LatestBlock(testutils.Context(t)) + latestBlock, err := th.LogPoller.LatestBlock(ctx) require.NoError(t, err) require.Equal(t, int64(numberOfBlocks), latestBlock.BlockNumber) require.Equal(t, tt.expectedFinalizedBlock, latestBlock.FinalizedBlockNumber) @@ -1787,7 +1787,7 @@ func Test_PruneOldBlocks(t *testing.T) { th := SetupTH(t, lpOpts) for i := 1; i <= tt.blockToCreate; i++ { - err := th.ORM.InsertBlock(testutils.Context(t), utils.RandomBytes32(), int64(i+10), time.Now(), int64(i)) + err := th.ORM.InsertBlock(ctx, utils.RandomBytes32(), int64(i+10), time.Now(), int64(i)) require.NoError(t, err) } From 596f9b4fd3011314cdcbb59eb634c82781ba9cee Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 10:58:11 -0500 Subject: [PATCH 45/65] Update orm.go --- core/chains/evm/logpoller/orm.go | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 8eccab9d886..0938d78e4cc 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -111,18 +111,6 @@ func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumbe func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { // '::' has to be escaped in the query string // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 - /* - query := ` - INSERT INTO evm.log_poller_filters - (name, evm_chain_id, retention, created_at, address, event) - SELECT * FROM - (SELECT $1, $2 ::NUMERIC, $3 ::BIGINT, NOW()) x, - (SELECT unnest($4 ::BYTEA[]) addr) a, - (SELECT unnest($5 ::BYTEA[]) ev) e - ON CONFLICT (evm.f_log_poller_filter_hash(name, evm_chain_id, address, event, topic2, topic3, topic4)) - DO UPDATE SET retention=$3 ::BIGINT, max_logs_kept=$6 ::NUMERIC, logs_per_block=$7 ::NUMERIC` - */ - topicArrays := []types.HashArray{filter.Topic2, filter.Topic3, filter.Topic4} args, err := newQueryArgs(o.chainID). withCustomArg("name", filter.Name). @@ -334,7 +322,7 @@ func (o *orm) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, error) GROUP BY evm_chain_id,address, event HAVING NOT 0 = ANY(ARRAY_AGG(retention)) ) DELETE FROM evm.logs l USING r WHERE l.evm_chain_id = $1 AND l.address=r.address AND l.event_sig=r.event - AND l.created_at <= STATEMENT_TIMESTAMP() - (r.retention / 10^9 * interval '1 second')`, // retention is in nanoseconds (time.Duration aka BIGINT) + AND l.block_timestamp <= STATEMENT_TIMESTAMP() - (r.retention / 10^9 * interval '1 second')`, // retention is in nanoseconds (time.Duration aka BIGINT) ubig.New(o.chainID)) } @@ -371,7 +359,7 @@ func (o *orm) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPoll }) } -func (o *orm) insertBlockWithinTx(ctx context.Context, tx *sqlx.Tx, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { +func (o *orm) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` From 271b8920e1c2440c0ce3966b27e7ca55aab8e9c5 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 11:12:07 -0500 Subject: [PATCH 46/65] Use context --- core/chains/evm/logpoller/orm.go | 18 ++++++++---------- integration-tests/smoke/log_poller_test.go | 13 +++++++++---- .../universal/log_poller/helpers.go | 10 +++++----- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 0938d78e4cc..62a002cae18 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -109,8 +109,6 @@ func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumbe // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { - // '::' has to be escaped in the query string - // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 topicArrays := []types.HashArray{filter.Topic2, filter.Topic3, filter.Topic4} args, err := newQueryArgs(o.chainID). withCustomArg("name", filter.Name). @@ -124,8 +122,6 @@ func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { if err != nil { return err } - // '::' has to be escaped in the query string - // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 var topicsColumns, topicsSql strings.Builder for n, topicValues := range topicArrays { if len(topicValues) != 0 { @@ -134,6 +130,8 @@ func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { fmt.Fprintf(&topicsSql, ",\n(SELECT unnest(:%s ::::BYTEA[]) %s) t%d", topicCol, topicCol, n+2) } } + // '::' has to be escaped in the query string + // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 query := fmt.Sprintf(` INSERT INTO evm.log_poller_filters (name, evm_chain_id, retention, max_logs_kept, logs_per_block, created_at, address, event %s) @@ -367,7 +365,7 @@ func (o *orm) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, block return err } -func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) error { +func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Queryer) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { start, end := i, i+batchInsertSize @@ -375,14 +373,14 @@ func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx *sqlx.Tx) e end = len(logs) } - _, err := tx.NamedExecContext(ctx, ` - INSERT INTO evm.logs + query := `INSERT INTO evm.logs (evm_chain_id, log_index, block_hash, block_number, block_timestamp, address, event_sig, topics, tx_hash, data, created_at) VALUES (:evm_chain_id, :log_index, :block_hash, :block_number, :block_timestamp, :address, :event_sig, :topics, :tx_hash, :data, NOW()) - ON CONFLICT DO NOTHING`, - logs[start:end], - ) + ON CONFLICT DO NOTHING` + + query, sqlArgs, _ := o.db.BindNamed(query, logs[start:end]) + _, err := tx.ExecContext(ctx, query, sqlArgs...) if err != nil { if pkgerrors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 { diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go index 4b4533d3a37..3b659ba0c36 100644 --- a/integration-tests/smoke/log_poller_test.go +++ b/integration-tests/smoke/log_poller_test.go @@ -1,6 +1,7 @@ package smoke import ( + "context" "fmt" "math/big" "testing" @@ -97,6 +98,8 @@ func executeBasicLogPollerTest(t *testing.T) { lpTestEnv := prepareEnvironment(l, t, &testConfig) testEnv := lpTestEnv.testEnv + ctx := context.Background() + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) err = logpoller.RegisterFiltersAndAssertUniquness(l, lpTestEnv.registry, lpTestEnv.upkeepIDs, lpTestEnv.logEmitters, cfg, lpTestEnv.upKeepsNeeded) @@ -108,7 +111,7 @@ func executeBasicLogPollerTest(t *testing.T) { require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") expectedFilters := logpoller.GetExpectedFilters(lpTestEnv.logEmitters, cfg) - waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l, coreLogger, t, testEnv, expectedFilters) + waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx, l, coreLogger, t, testEnv, expectedFilters) // Save block number before starting to emit events, so that we can later use it when querying logs sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) @@ -176,6 +179,8 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { lpTestEnv := prepareEnvironment(l, t, &testConfig) testEnv := lpTestEnv.testEnv + ctx := context.Background() + // Save block number before starting to emit events, so that we can later use it when querying logs sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) require.NoError(t, err, "Error getting latest block number") @@ -213,7 +218,7 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { err = testEnv.EVMClient.WaitForEvents() require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") - waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l, coreLogger, t, testEnv, expectedFilters) + waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx, l, coreLogger, t, testEnv, expectedFilters) blockFinalisationWaitDuration := "5m" l.Warn().Str("Duration", blockFinalisationWaitDuration).Msg("Waiting for all CL nodes to have end block finalised") @@ -317,7 +322,7 @@ func prepareEnvironment(l zerolog.Logger, t *testing.T, testConfig *tc.TestConfi } // waitForAllNodesToHaveExpectedFiltersRegisteredOrFail waits until all nodes have expected filters registered until timeout -func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l zerolog.Logger, coreLogger core_logger.SugaredLogger, t *testing.T, testEnv *test_env.CLClusterTestEnv, expectedFilters []logpoller.ExpectedFilter) { +func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx context.Context, l zerolog.Logger, coreLogger core_logger.SugaredLogger, t *testing.T, testEnv *test_env.CLClusterTestEnv, expectedFilters []logpoller.ExpectedFilter) { // Make sure that all nodes have expected filters registered before starting to emit events gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { @@ -330,7 +335,7 @@ func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l zerolog.Logger, core var message string var err error - hasFilters, message, err = logpoller.NodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + hasFilters, message, err = logpoller.NodeHasExpectedFilters(ctx, expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) if !hasFilters || err != nil { l.Warn(). Str("Details", message). diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index 99905541f99..8752e344330 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -170,14 +170,14 @@ func GetExpectedFilters(logEmitters []*contracts.LogEmitter, cfg *lp_config.Conf } // NodeHasExpectedFilters returns true if the provided node has all the expected filters registered -func NodeHasExpectedFilters(expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, string, error) { +func NodeHasExpectedFilters(ctx context.Context, expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, string, error) { orm, db, err := NewOrm(logger, chainID, postgresDb) if err != nil { return false, "", err } defer db.Close() - knownFilters, err := orm.LoadFilters(context.Background()) + knownFilters, err := orm.LoadFilters(ctx) if err != nil { return false, "", err } @@ -594,7 +594,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit return nil, dbError } - allLogsInEVMNode, err := getEVMLogs(startBlock, endBlock, logEmitters, evmClient, l, cfg) + allLogsInEVMNode, err := getEVMLogs(ctx, startBlock, endBlock, logEmitters, evmClient, l, cfg) if err != nil { return nil, err } @@ -722,13 +722,13 @@ func PrintMissingLogsInfo(missingLogs map[string][]geth_types.Log, l zerolog.Log // getEVMLogs returns a slice of all logs emitted by the provided log emitters in the provided block range, // which are present in the EVM node to which the provided evm client is connected -func getEVMLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *lp_config.Config) ([]geth_types.Log, error) { +func getEVMLogs(ctx context.Context, startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *lp_config.Config) ([]geth_types.Log, error) { allLogsInEVMNode := make([]geth_types.Log, 0) for j := 0; j < len(logEmitters); j++ { address := (*logEmitters[j]).Address() for _, event := range cfg.General.EventsToEmit { l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching logs from EVM node") - logsInEVMNode, err := evmClient.FilterLogs(context.Background(), geth.FilterQuery{ + logsInEVMNode, err := evmClient.FilterLogs(ctx, geth.FilterQuery{ Addresses: []common.Address{(address)}, Topics: [][]common.Hash{{event.ID}}, FromBlock: big.NewInt(startBlock), From 2606a0f06ba3be1603c42f8627fd53c58e0a6e4d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 11:21:07 -0500 Subject: [PATCH 47/65] Use context --- core/services/relay/evm/binding.go | 6 +++--- core/services/relay/evm/event_binding.go | 14 +++++--------- .../relay/evm/functions/contract_transmitter.go | 4 +--- .../evm/functions/contract_transmitter_test.go | 9 ++++++--- .../relay/evm/functions/logpoller_wrapper.go | 6 +++--- .../relay/evm/types/mocks/log_poller_wrapper.go | 6 +++--- core/services/relay/evm/types/types.go | 4 ++-- 7 files changed, 23 insertions(+), 26 deletions(-) diff --git a/core/services/relay/evm/binding.go b/core/services/relay/evm/binding.go index e78d9f0a770..976ba05b1e8 100644 --- a/core/services/relay/evm/binding.go +++ b/core/services/relay/evm/binding.go @@ -8,8 +8,8 @@ import ( type readBinding interface { GetLatestValue(ctx context.Context, params, returnVal any) error - Bind(binding commontypes.BoundContract) error + Bind(ctx context.Context, binding commontypes.BoundContract) error SetCodec(codec commontypes.RemoteCodec) - Register() error - Unregister() error + Register(ctx context.Context) error + Unregister(ctx context.Context) error } diff --git a/core/services/relay/evm/event_binding.go b/core/services/relay/evm/event_binding.go index 6124df55475..bded6ba476d 100644 --- a/core/services/relay/evm/event_binding.go +++ b/core/services/relay/evm/event_binding.go @@ -43,7 +43,7 @@ func (e *eventBinding) SetCodec(codec commontypes.RemoteCodec) { e.codec = codec } -func (e *eventBinding) Register() error { +func (e *eventBinding) Register(ctx context.Context) error { e.lock.Lock() defer e.lock.Unlock() @@ -52,8 +52,6 @@ func (e *eventBinding) Register() error { return nil } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() if err := e.lp.RegisterFilter(ctx, logpoller.Filter{ Name: e.id, EventSigs: evmtypes.HashArray{e.hash}, @@ -64,7 +62,7 @@ func (e *eventBinding) Register() error { return nil } -func (e *eventBinding) Unregister() error { +func (e *eventBinding) Unregister(ctx context.Context) error { e.lock.Lock() defer e.lock.Unlock() @@ -72,8 +70,6 @@ func (e *eventBinding) Unregister() error { return nil } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() if err := e.lp.UnregisterFilter(ctx, e.id); err != nil { return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) } @@ -97,8 +93,8 @@ func (e *eventBinding) GetLatestValue(ctx context.Context, params, into any) err return e.getLatestValueWithFilters(ctx, confs, params, into) } -func (e *eventBinding) Bind(binding commontypes.BoundContract) error { - if err := e.Unregister(); err != nil { +func (e *eventBinding) Bind(ctx context.Context, binding commontypes.BoundContract) error { + if err := e.Unregister(ctx); err != nil { return err } @@ -107,7 +103,7 @@ func (e *eventBinding) Bind(binding commontypes.BoundContract) error { e.bound = true if e.registerCalled { - return e.Register() + return e.Register(ctx) } return nil } diff --git a/core/services/relay/evm/functions/contract_transmitter.go b/core/services/relay/evm/functions/contract_transmitter.go index 352240233b8..051b1f0bef9 100644 --- a/core/services/relay/evm/functions/contract_transmitter.go +++ b/core/services/relay/evm/functions/contract_transmitter.go @@ -253,15 +253,13 @@ func (oc *contractTransmitter) HealthReport() map[string]error { } func (oc *contractTransmitter) Name() string { return oc.lggr.Name() } -func (oc *contractTransmitter) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { +func (oc *contractTransmitter) UpdateRoutes(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) error { // transmitter only cares about the active coordinator previousContract := oc.contractAddress.Swap(&activeCoordinator) if previousContract != nil && *previousContract == activeCoordinator { return nil } oc.lggr.Debugw("FunctionsContractTransmitter: updating routes", "previousContract", previousContract, "activeCoordinator", activeCoordinator) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err := oc.lp.RegisterFilter(ctx, logpoller.Filter{Name: transmitterFilterName(activeCoordinator), EventSigs: []common.Hash{oc.transmittedEventSig}, Addresses: []common.Address{activeCoordinator}}) if err != nil { return err diff --git a/core/services/relay/evm/functions/contract_transmitter_test.go b/core/services/relay/evm/functions/contract_transmitter_test.go index aaf4a5715d2..e9712a3687c 100644 --- a/core/services/relay/evm/functions/contract_transmitter_test.go +++ b/core/services/relay/evm/functions/contract_transmitter_test.go @@ -35,6 +35,7 @@ func (mockTransmitter) FromAddress() gethcommon.Address { return testutils.NewAd func TestContractTransmitter_LatestConfigDigestAndEpoch(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) digestStr := "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc776" lggr := logger.TestLogger(t) @@ -54,7 +55,7 @@ func TestContractTransmitter_LatestConfigDigestAndEpoch(t *testing.T) { return &txmgr.TxMeta{}, nil }, 1) require.NoError(t, err) - require.NoError(t, functionsTransmitter.UpdateRoutes(gethcommon.Address{}, gethcommon.Address{})) + require.NoError(t, functionsTransmitter.UpdateRoutes(ctx, gethcommon.Address{}, gethcommon.Address{})) digest, epoch, err := functionsTransmitter.LatestConfigDigestAndEpoch(testutils.Context(t)) require.NoError(t, err) @@ -64,6 +65,7 @@ func TestContractTransmitter_LatestConfigDigestAndEpoch(t *testing.T) { func TestContractTransmitter_Transmit_V1(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) contractVersion := uint32(1) configuredDestAddress, coordinatorAddress := testutils.NewAddress(), testutils.NewAddress() @@ -78,7 +80,7 @@ func TestContractTransmitter_Transmit_V1(t *testing.T) { return &txmgr.TxMeta{}, nil }, contractVersion) require.NoError(t, err) - require.NoError(t, ot.UpdateRoutes(configuredDestAddress, configuredDestAddress)) + require.NoError(t, ot.UpdateRoutes(ctx, configuredDestAddress, configuredDestAddress)) reqId, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f") require.NoError(t, err) @@ -107,6 +109,7 @@ func TestContractTransmitter_Transmit_V1(t *testing.T) { func TestContractTransmitter_Transmit_V1_CoordinatorMismatch(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) contractVersion := uint32(1) configuredDestAddress, coordinatorAddress1, coordinatorAddress2 := testutils.NewAddress(), testutils.NewAddress(), testutils.NewAddress() @@ -121,7 +124,7 @@ func TestContractTransmitter_Transmit_V1_CoordinatorMismatch(t *testing.T) { return &txmgr.TxMeta{}, nil }, contractVersion) require.NoError(t, err) - require.NoError(t, ot.UpdateRoutes(configuredDestAddress, configuredDestAddress)) + require.NoError(t, ot.UpdateRoutes(ctx, configuredDestAddress, configuredDestAddress)) reqId1, err := hex.DecodeString("110102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f") require.NoError(t, err) diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index 98a44754957..b9f9a08b6ca 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -316,10 +316,10 @@ func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log, } // "internal" method called only by EVM relayer components -func (l *logPollerWrapper) SubscribeToUpdates(subscriberName string, subscriber evmRelayTypes.RouteUpdateSubscriber) { +func (l *logPollerWrapper) SubscribeToUpdates(ctx context.Context, subscriberName string, subscriber evmRelayTypes.RouteUpdateSubscriber) { if l.pluginConfig.ContractVersion == 0 { // in V0, immediately set contract address to Oracle contract and never update again - if err := subscriber.UpdateRoutes(l.routerContract.Address(), l.routerContract.Address()); err != nil { + if err := subscriber.UpdateRoutes(ctx, l.routerContract.Address(), l.routerContract.Address()); err != nil { l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "err", err) } } else if l.pluginConfig.ContractVersion == 1 { @@ -413,7 +413,7 @@ func (l *logPollerWrapper) handleRouteUpdate(ctx context.Context, activeCoordina l.proposedCoordinator = proposedCoordinator for _, subscriber := range l.subscribers { - err := subscriber.UpdateRoutes(activeCoordinator, proposedCoordinator) + err := subscriber.UpdateRoutes(ctx, activeCoordinator, proposedCoordinator) if err != nil { l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "err", err) } diff --git a/core/services/relay/evm/types/mocks/log_poller_wrapper.go b/core/services/relay/evm/types/mocks/log_poller_wrapper.go index fea333ca9f5..8017e983e53 100644 --- a/core/services/relay/evm/types/mocks/log_poller_wrapper.go +++ b/core/services/relay/evm/types/mocks/log_poller_wrapper.go @@ -145,9 +145,9 @@ func (_m *LogPollerWrapper) Start(_a0 context.Context) error { return r0 } -// SubscribeToUpdates provides a mock function with given fields: name, subscriber -func (_m *LogPollerWrapper) SubscribeToUpdates(name string, subscriber types.RouteUpdateSubscriber) { - _m.Called(name, subscriber) +// SubscribeToUpdates provides a mock function with given fields: ctx, name, subscriber +func (_m *LogPollerWrapper) SubscribeToUpdates(ctx context.Context, name string, subscriber types.RouteUpdateSubscriber) { + _m.Called(ctx, name, subscriber) } // NewLogPollerWrapper creates a new instance of LogPollerWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/core/services/relay/evm/types/types.go b/core/services/relay/evm/types/types.go index f794f4a0016..f431ae07110 100644 --- a/core/services/relay/evm/types/types.go +++ b/core/services/relay/evm/types/types.go @@ -183,7 +183,7 @@ type OracleResponse struct { } type RouteUpdateSubscriber interface { - UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error + UpdateRoutes(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) error } // A LogPoller wrapper that understands router proxy contracts @@ -194,5 +194,5 @@ type LogPollerWrapper interface { LatestEvents(ctx context.Context) ([]OracleRequest, []OracleResponse, error) // TODO (FUN-668): Remove from the LOOP interface and only use internally within the EVM relayer - SubscribeToUpdates(name string, subscriber RouteUpdateSubscriber) + SubscribeToUpdates(ctx context.Context, name string, subscriber RouteUpdateSubscriber) } From aecda9cd98f6849d0cfb19ccb7dc18f2cae7674d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 11:28:31 -0500 Subject: [PATCH 48/65] Use context --- core/services/blockhashstore/coordinators.go | 12 ++++++------ core/services/blockhashstore/delegate.go | 6 +++--- core/services/blockheaderfeeder/delegate.go | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/services/blockhashstore/coordinators.go b/core/services/blockhashstore/coordinators.go index 7e4a0c5dc02..9a8c34a434e 100644 --- a/core/services/blockhashstore/coordinators.go +++ b/core/services/blockhashstore/coordinators.go @@ -70,8 +70,8 @@ type V1Coordinator struct { } // NewV1Coordinator creates a new V1Coordinator from the given contract. -func NewV1Coordinator(c v1.VRFCoordinatorInterface, lp logpoller.LogPoller) (*V1Coordinator, error) { - err := lp.RegisterFilter(context.Background(), logpoller.Filter{ +func NewV1Coordinator(ctx context.Context, c v1.VRFCoordinatorInterface, lp logpoller.LogPoller) (*V1Coordinator, error) { + err := lp.RegisterFilter(ctx, logpoller.Filter{ Name: logpoller.FilterName("VRFv1CoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v1.VRFCoordinatorRandomnessRequest{}.Topic(), @@ -159,8 +159,8 @@ type V2Coordinator struct { } // NewV2Coordinator creates a new V2Coordinator from the given contract. -func NewV2Coordinator(c v2.VRFCoordinatorV2Interface, lp logpoller.LogPoller) (*V2Coordinator, error) { - err := lp.RegisterFilter(context.Background(), logpoller.Filter{ +func NewV2Coordinator(ctx context.Context, c v2.VRFCoordinatorV2Interface, lp logpoller.LogPoller) (*V2Coordinator, error) { + err := lp.RegisterFilter(ctx, logpoller.Filter{ Name: logpoller.FilterName("VRFv2CoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), @@ -250,8 +250,8 @@ type V2PlusCoordinator struct { } // NewV2Coordinator creates a new V2Coordinator from the given contract. -func NewV2PlusCoordinator(c v2plus.IVRFCoordinatorV2PlusInternalInterface, lp logpoller.LogPoller) (*V2PlusCoordinator, error) { - err := lp.RegisterFilter(context.Background(), logpoller.Filter{ +func NewV2PlusCoordinator(ctx context.Context, c v2plus.IVRFCoordinatorV2PlusInternalInterface, lp logpoller.LogPoller) (*V2PlusCoordinator, error) { + err := lp.RegisterFilter(ctx, logpoller.Filter{ Name: logpoller.FilterName("VRFv2PlusCoordinatorFeeder", c.Address()), EventSigs: []common.Hash{ v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go index 751cf5dd90a..357f166ff72 100644 --- a/core/services/blockhashstore/delegate.go +++ b/core/services/blockhashstore/delegate.go @@ -107,7 +107,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi } var coord *V1Coordinator - coord, err = NewV1Coordinator(c, lp) + coord, err = NewV1Coordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V1 coordinator") } @@ -122,7 +122,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi } var coord *V2Coordinator - coord, err = NewV2Coordinator(c, lp) + coord, err = NewV2Coordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V2 coordinator") } @@ -137,7 +137,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi } var coord *V2PlusCoordinator - coord, err = NewV2PlusCoordinator(c, lp) + coord, err = NewV2PlusCoordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V2Plus coordinator") } diff --git a/core/services/blockheaderfeeder/delegate.go b/core/services/blockheaderfeeder/delegate.go index d78782f6592..986bfa2bf29 100644 --- a/core/services/blockheaderfeeder/delegate.go +++ b/core/services/blockheaderfeeder/delegate.go @@ -104,7 +104,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi return nil, errors.Wrap(err, "building V1 coordinator") } var coord *blockhashstore.V1Coordinator - coord, err = blockhashstore.NewV1Coordinator(c, lp) + coord, err = blockhashstore.NewV1Coordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V1 coordinator") } @@ -118,7 +118,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi return nil, errors.Wrap(err, "building V2 coordinator") } var coord *blockhashstore.V2Coordinator - coord, err = blockhashstore.NewV2Coordinator(c, lp) + coord, err = blockhashstore.NewV2Coordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V2 coordinator") } @@ -132,7 +132,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi return nil, errors.Wrap(err, "building V2 plus coordinator") } var coord *blockhashstore.V2PlusCoordinator - coord, err = blockhashstore.NewV2PlusCoordinator(c, lp) + coord, err = blockhashstore.NewV2PlusCoordinator(ctx, c, lp) if err != nil { return nil, errors.Wrap(err, "building V2 plus coordinator") } From 5e0d3fc3b7461d6429404cbf53e93204d58f83c6 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 11:38:55 -0500 Subject: [PATCH 49/65] Propagate context --- core/services/relay/evm/evm.go | 9 +++++++-- core/services/relay/evm/mercury/config_poller.go | 4 +--- core/services/relay/evm/mercury/helpers_test.go | 2 +- core/services/relay/evm/mercury_config_provider.go | 4 +++- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index a02885cb556..a919ba7209e 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -185,6 +185,8 @@ func (r *Relayer) NewPluginProvider(rargs commontypes.RelayArgs, pargs commontyp } func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.MercuryProvider, error) { + // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + ctx := context.Background() lggr := r.lggr.Named("MercuryProvider").Named(rargs.ExternalJobID.String()) relayOpts := types.NewRelayOpts(rargs) relayConfig, err := relayOpts.RelayConfig() @@ -205,7 +207,7 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty if relayConfig.ChainID.String() != r.chain.ID().String() { return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) } - cp, err := newMercuryConfigProvider(lggr, r.chain, relayOpts) + cp, err := newMercuryConfigProvider(ctx, lggr, r.chain, relayOpts) if err != nil { return nil, pkgerrors.WithStack(err) } @@ -315,6 +317,9 @@ func (r *Relayer) NewFunctionsProvider(rargs commontypes.RelayArgs, pargs common // NewConfigProvider is called by bootstrap jobs func (r *Relayer) NewConfigProvider(args commontypes.RelayArgs) (configProvider commontypes.ConfigProvider, err error) { + // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + ctx := context.Background() + lggr := r.lggr.Named("ConfigProvider").Named(args.ExternalJobID.String()) relayOpts := types.NewRelayOpts(args) relayConfig, err := relayOpts.RelayConfig() @@ -340,7 +345,7 @@ func (r *Relayer) NewConfigProvider(args commontypes.RelayArgs) (configProvider case "median": configProvider, err = newStandardConfigProvider(lggr, r.chain, relayOpts) case "mercury": - configProvider, err = newMercuryConfigProvider(lggr, r.chain, relayOpts) + configProvider, err = newMercuryConfigProvider(ctx, lggr, r.chain, relayOpts) case "llo": configProvider, err = newLLOConfigProvider(lggr, r.chain, relayOpts) default: diff --git a/core/services/relay/evm/mercury/config_poller.go b/core/services/relay/evm/mercury/config_poller.go index 1501db1337a..2da541a8e42 100644 --- a/core/services/relay/evm/mercury/config_poller.go +++ b/core/services/relay/evm/mercury/config_poller.go @@ -97,9 +97,7 @@ func FilterName(addr common.Address, feedID common.Hash) string { } // NewConfigPoller creates a new Mercury ConfigPoller -func NewConfigPoller(lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +func NewConfigPoller(ctx context.Context, lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) { err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}}) if err != nil { return nil, err diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go index 196bf0b963c..f2923696bfc 100644 --- a/core/services/relay/evm/mercury/helpers_test.go +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -177,7 +177,7 @@ func SetupTH(t *testing.T, feedID common.Hash) TestHarness { lp := logpoller.NewLogPoller(lorm, ethClient, lggr, lpOpts) servicetest.Run(t, lp) - configPoller, err := NewConfigPoller(lggr, lp, verifierAddress, feedID) + configPoller, err := NewConfigPoller(testutils.Context(t), lggr, lp, verifierAddress, feedID) require.NoError(t, err) configPoller.Start() diff --git a/core/services/relay/evm/mercury_config_provider.go b/core/services/relay/evm/mercury_config_provider.go index 027a3cfb27c..bd0749e5ae2 100644 --- a/core/services/relay/evm/mercury_config_provider.go +++ b/core/services/relay/evm/mercury_config_provider.go @@ -1,6 +1,7 @@ package evm import ( + "context" "errors" "fmt" @@ -14,7 +15,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) -func newMercuryConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (commontypes.ConfigProvider, error) { +func newMercuryConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (commontypes.ConfigProvider, error) { if !common.IsHexAddress(opts.ContractID) { return nil, errors.New("invalid contractID, expected hex address") } @@ -29,6 +30,7 @@ func newMercuryConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *t return nil, errors.New("feed ID is required for tracking config on mercury contracts") } cp, err := mercury.NewConfigPoller( + ctx, lggr.Named(relayConfig.FeedID.String()), chain.LogPoller(), aggregatorAddress, From 1e1be7d4bf01135b619ef5f673175c3f48d5f1f0 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 12:57:57 -0500 Subject: [PATCH 50/65] Propagate context --- core/services/blockhashstore/delegate.go | 2 +- core/services/blockheaderfeeder/delegate.go | 2 +- core/services/cron/delegate.go | 8 +++---- core/services/directrequest/delegate.go | 8 +++---- core/services/fluxmonitorv2/delegate.go | 8 +++---- core/services/gateway/delegate.go | 8 +++---- core/services/job/spawner.go | 12 +++++----- core/services/keeper/delegate.go | 8 +++---- core/services/ocr/delegate.go | 8 +++---- core/services/ocr2/delegate.go | 8 +++---- core/services/ocrbootstrap/delegate.go | 2 +- core/services/relay/evm/bindings.go | 9 ++++---- core/services/relay/evm/chain_reader.go | 17 ++++++++------ core/services/relay/evm/chain_reader_test.go | 2 +- core/services/relay/evm/evm.go | 2 +- core/services/relay/evm/functions.go | 10 ++++----- .../relay/evm/functions/config_poller.go | 4 +--- .../relay/evm/functions/config_poller_test.go | 2 +- .../evm/functions/logpoller_wrapper_test.go | 22 ++++++++++++------- .../evm/functions/offchain_config_digester.go | 3 ++- core/services/relay/evm/method_binding.go | 6 ++--- core/services/streams/delegate.go | 8 +++---- core/services/vrf/delegate.go | 8 +++---- core/services/webhook/delegate.go | 2 +- core/services/workflows/delegate.go | 2 +- 25 files changed, 89 insertions(+), 82 deletions(-) diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go index 357f166ff72..c8954ad1c2b 100644 --- a/core/services/blockhashstore/delegate.go +++ b/core/services/blockhashstore/delegate.go @@ -194,7 +194,7 @@ func (d *Delegate) BeforeJobCreated(spec job.Job) {} func (d *Delegate) BeforeJobDeleted(spec job.Job) {} // OnDeleteJob satisfies the job.Delegate interface. -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // service is a job.Service that runs the BHS feeder every pollPeriod. type service struct { diff --git a/core/services/blockheaderfeeder/delegate.go b/core/services/blockheaderfeeder/delegate.go index 986bfa2bf29..19edb43bc23 100644 --- a/core/services/blockheaderfeeder/delegate.go +++ b/core/services/blockheaderfeeder/delegate.go @@ -208,7 +208,7 @@ func (d *Delegate) BeforeJobCreated(spec job.Job) {} func (d *Delegate) BeforeJobDeleted(spec job.Job) {} // OnDeleteJob satisfies the job.Delegate interface. -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // service is a job.Service that runs the BHS feeder every pollPeriod. type service struct { diff --git a/core/services/cron/delegate.go b/core/services/cron/delegate.go index 4a08fec5a40..05b5b36c00f 100644 --- a/core/services/cron/delegate.go +++ b/core/services/cron/delegate.go @@ -29,10 +29,10 @@ func (d *Delegate) JobType() job.Type { return job.Cron } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec returns the scheduler to be used for running cron jobs func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) (services []job.ServiceCtx, err error) { diff --git a/core/services/directrequest/delegate.go b/core/services/directrequest/delegate.go index 083e6f02266..aa0f8cd4de0 100644 --- a/core/services/directrequest/delegate.go +++ b/core/services/directrequest/delegate.go @@ -63,10 +63,10 @@ func (d *Delegate) JobType() job.Type { return job.DirectRequest } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec returns the log listener service for a direct request job func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.ServiceCtx, error) { diff --git a/core/services/fluxmonitorv2/delegate.go b/core/services/fluxmonitorv2/delegate.go index 5de59432d11..1e2eba8d000 100644 --- a/core/services/fluxmonitorv2/delegate.go +++ b/core/services/fluxmonitorv2/delegate.go @@ -56,10 +56,10 @@ func (d *Delegate) JobType() job.Type { return job.FluxMonitor } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec returns the flux monitor service for the job spec func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) (services []job.ServiceCtx, err error) { diff --git a/core/services/gateway/delegate.go b/core/services/gateway/delegate.go index 3100877e96a..ba34f2894de 100644 --- a/core/services/gateway/delegate.go +++ b/core/services/gateway/delegate.go @@ -41,10 +41,10 @@ func (d *Delegate) JobType() job.Type { return job.Gateway } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec returns the scheduler to be used for running observer jobs func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) (services []job.ServiceCtx, err error) { diff --git a/core/services/job/spawner.go b/core/services/job/spawner.go index f0486df1c25..3d30a3190b3 100644 --- a/core/services/job/spawner.go +++ b/core/services/job/spawner.go @@ -78,7 +78,7 @@ type ( // non-db side effects. This is required in order to guarantee mutual atomicity between // all tasks intended to happen during job deletion. For the same reason, the job will // not show up in the db within OnDeleteJob(), even though it is still actively running. - OnDeleteJob(jb Job, q pg.Queryer) error + OnDeleteJob(ctx context.Context, jb Job, q pg.Queryer) error } activeJob struct { @@ -340,7 +340,7 @@ func (js *spawner) DeleteJob(jobID int32, qopts ...pg.QOpt) error { // we know the DELETE will succeed. The DELETE will be finalized only if all db transactions in OnDeleteJob() // succeed. If either of those fails, the job will not be stopped and everything will be rolled back. lggr.Debugw("Callback: OnDeleteJob") - err = aj.delegate.OnDeleteJob(aj.spec, tx) + err = aj.delegate.OnDeleteJob(ctx, aj.spec, tx) if err != nil { return err } @@ -395,7 +395,7 @@ func (n *NullDelegate) ServicesForSpec(ctx context.Context, spec Job) (s []Servi return } -func (n *NullDelegate) BeforeJobCreated(spec Job) {} -func (n *NullDelegate) AfterJobCreated(spec Job) {} -func (n *NullDelegate) BeforeJobDeleted(spec Job) {} -func (n *NullDelegate) OnDeleteJob(spec Job, q pg.Queryer) error { return nil } +func (n *NullDelegate) BeforeJobCreated(spec Job) {} +func (n *NullDelegate) AfterJobCreated(spec Job) {} +func (n *NullDelegate) BeforeJobDeleted(spec Job) {} +func (n *NullDelegate) OnDeleteJob(ctx context.Context, spec Job, q pg.Queryer) error { return nil } diff --git a/core/services/keeper/delegate.go b/core/services/keeper/delegate.go index c2c546fcd33..8cadb8cd77f 100644 --- a/core/services/keeper/delegate.go +++ b/core/services/keeper/delegate.go @@ -51,10 +51,10 @@ func (d *Delegate) JobType() job.Type { return job.Keeper } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec satisfies the job.Delegate interface. func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) (services []job.ServiceCtx, err error) { diff --git a/core/services/ocr/delegate.go b/core/services/ocr/delegate.go index 6d7757ea528..0411aea6923 100644 --- a/core/services/ocr/delegate.go +++ b/core/services/ocr/delegate.go @@ -82,10 +82,10 @@ func (d *Delegate) JobType() job.Type { return job.OffchainReporting } -func (d *Delegate) BeforeJobCreated(spec job.Job) {} -func (d *Delegate) AfterJobCreated(spec job.Job) {} -func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } // ServicesForSpec returns the OCR services that need to run for this job func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) (services []job.ServiceCtx, err error) { diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 895f7cc2212..c640f3d9f22 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -273,7 +273,7 @@ func (d *Delegate) BeforeJobCreated(spec job.Job) { } func (d *Delegate) AfterJobCreated(spec job.Job) {} func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { +func (d *Delegate) OnDeleteJob(ctx context.Context, jb job.Job, q pg.Queryer) error { // If the job spec is malformed in any way, we report the error but return nil so that // the job deletion itself isn't blocked. @@ -290,13 +290,13 @@ func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { } // we only have clean to do for the EVM if rid.Network == relay.EVM { - return d.cleanupEVM(jb, q, rid) + return d.cleanupEVM(ctx, jb, q, rid) } return nil } // cleanupEVM is a helper for clean up EVM specific state when a job is deleted -func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error { +func (d *Delegate) cleanupEVM(ctx context.Context, jb job.Job, q pg.Queryer, relayID relay.ID) error { // If UnregisterFilter returns an // error, that means it failed to remove a valid active filter from the db. We do abort the job deletion // in that case, since it should be easy for the user to retry and will avoid leaving the db in @@ -341,8 +341,6 @@ func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error } filters = append(filters, relayFilters...) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() for _, filter := range filters { d.lggr.Debugf("Unregistering %s filter", filter) err = lp.UnregisterFilter(ctx, filter) diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go index 46c664007bc..9ed7cbea477 100644 --- a/core/services/ocrbootstrap/delegate.go +++ b/core/services/ocrbootstrap/delegate.go @@ -190,6 +190,6 @@ func (d *Delegate) AfterJobCreated(spec job.Job) { func (d *Delegate) BeforeJobDeleted(spec job.Job) {} // OnDeleteJob satisfies the job.Delegate interface. -func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { +func (d *Delegate) OnDeleteJob(ctx context.Context, spec job.Job, q pg.Queryer) error { return nil } diff --git a/core/services/relay/evm/bindings.go b/core/services/relay/evm/bindings.go index 1a23128d19f..e13fcbc02d5 100644 --- a/core/services/relay/evm/bindings.go +++ b/core/services/relay/evm/bindings.go @@ -1,6 +1,7 @@ package evm import ( + "context" "fmt" commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" @@ -34,14 +35,14 @@ func (b contractBindings) AddReadBinding(contractName, readName string, reader r rbs[readName] = reader } -func (b contractBindings) Bind(boundContracts []commontypes.BoundContract) error { +func (b contractBindings) Bind(ctx context.Context, boundContracts []commontypes.BoundContract) error { for _, bc := range boundContracts { rbs, rbsExist := b[bc.Name] if !rbsExist { return fmt.Errorf("%w: no contract named %s", commontypes.ErrInvalidConfig, bc.Name) } for _, r := range rbs { - if err := r.Bind(bc); err != nil { + if err := r.Bind(ctx, bc); err != nil { return err } } @@ -49,10 +50,10 @@ func (b contractBindings) Bind(boundContracts []commontypes.BoundContract) error return nil } -func (b contractBindings) ForEach(fn func(readBinding) error) error { +func (b contractBindings) ForEach(ctx context.Context, fn func(readBinding, context.Context) error) error { for _, rbs := range b { for _, rb := range rbs { - if err := fn(rb); err != nil { + if err := fn(rb, ctx); err != nil { return err } } diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go index dba05af7e3c..ed8c34b4831 100644 --- a/core/services/relay/evm/chain_reader.go +++ b/core/services/relay/evm/chain_reader.go @@ -39,7 +39,7 @@ type chainReader struct { } // NewChainReaderService is a constructor for ChainReader, returns nil if there is any error -func NewChainReaderService(lggr logger.Logger, lp logpoller.LogPoller, chain legacyevm.Chain, config types.ChainReaderConfig) (ChainReaderService, error) { +func NewChainReaderService(ctx context.Context, lggr logger.Logger, lp logpoller.LogPoller, chain legacyevm.Chain, config types.ChainReaderConfig) (ChainReaderService, error) { cr := &chainReader{ lggr: lggr.Named("ChainReader"), lp: lp, @@ -57,7 +57,7 @@ func NewChainReaderService(lggr logger.Logger, lp logpoller.LogPoller, chain leg return nil, err } - err = cr.contractBindings.ForEach(func(b readBinding) error { + err = cr.contractBindings.ForEach(ctx, func(b readBinding, c context.Context) error { b.SetCodec(cr.codec) return nil }) @@ -78,8 +78,8 @@ func (cr *chainReader) GetLatestValue(ctx context.Context, contractName, method return b.GetLatestValue(ctx, params, returnVal) } -func (cr *chainReader) Bind(_ context.Context, bindings []commontypes.BoundContract) error { - return cr.contractBindings.Bind(bindings) +func (cr *chainReader) Bind(ctx context.Context, bindings []commontypes.BoundContract) error { + return cr.contractBindings.Bind(ctx, bindings) } func (cr *chainReader) init(chainContractReaders map[string]types.ChainContractReader) error { @@ -110,15 +110,18 @@ func (cr *chainReader) init(chainContractReaders map[string]types.ChainContractR return nil } -func (cr *chainReader) Start(_ context.Context) error { +func (cr *chainReader) Start(ctx context.Context) error { return cr.StartOnce("ChainReader", func() error { - return cr.contractBindings.ForEach(readBinding.Register) + return cr.contractBindings.ForEach(ctx, readBinding.Register) }) } func (cr *chainReader) Close() error { return cr.StopOnce("ChainReader", func() error { - return cr.contractBindings.ForEach(readBinding.Unregister) + // TODO: Propagate context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + return cr.contractBindings.ForEach(ctx, readBinding.Unregister) }) } diff --git a/core/services/relay/evm/chain_reader_test.go b/core/services/relay/evm/chain_reader_test.go index 71c6487a6c5..edca5c19b60 100644 --- a/core/services/relay/evm/chain_reader_test.go +++ b/core/services/relay/evm/chain_reader_test.go @@ -273,7 +273,7 @@ func (it *chainReaderInterfaceTester) GetChainReader(t *testing.T) clcommontypes lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), it.chain.Client(), lggr, lpOpts) require.NoError(t, lp.Start(ctx)) it.chain.On("LogPoller").Return(lp) - cr, err := evm.NewChainReaderService(lggr, lp, it.chain, it.chainConfig) + cr, err := evm.NewChainReaderService(ctx, lggr, lp, it.chain, it.chainConfig) require.NoError(t, err) require.NoError(t, cr.Start(ctx)) it.cr = cr diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index a919ba7209e..db504872925 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -582,7 +582,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp // allow fallback until chain reader is default and median contract is removed, but still log just in case var chainReaderService ChainReaderService if relayConfig.ChainReader != nil { - if chainReaderService, err = NewChainReaderService(lggr, r.chain.LogPoller(), r.chain, *relayConfig.ChainReader); err != nil { + if chainReaderService, err = NewChainReaderService(ctx, lggr, r.chain.LogPoller(), r.chain, *relayConfig.ChainReader); err != nil { return nil, err } diff --git a/core/services/relay/evm/functions.go b/core/services/relay/evm/functions.go index c10134f3acc..57df346136a 100644 --- a/core/services/relay/evm/functions.go +++ b/core/services/relay/evm/functions.go @@ -115,7 +115,7 @@ func NewFunctionsProvider(ctx context.Context, chain legacyevm.Chain, rargs comm if err != nil { return nil, err } - configWatcher, err := newFunctionsConfigProvider(pluginType, chain, rargs, relayConfig.FromBlock, logPollerWrapper, lggr) + configWatcher, err := newFunctionsConfigProvider(ctx, pluginType, chain, rargs, relayConfig.FromBlock, logPollerWrapper, lggr) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func NewFunctionsProvider(ctx context.Context, chain legacyevm.Chain, rargs comm }, nil } -func newFunctionsConfigProvider(pluginType functionsRelay.FunctionsPluginType, chain legacyevm.Chain, args commontypes.RelayArgs, fromBlock uint64, logPollerWrapper evmRelayTypes.LogPollerWrapper, lggr logger.Logger) (*configWatcher, error) { +func newFunctionsConfigProvider(ctx context.Context, pluginType functionsRelay.FunctionsPluginType, chain legacyevm.Chain, args commontypes.RelayArgs, fromBlock uint64, logPollerWrapper evmRelayTypes.LogPollerWrapper, lggr logger.Logger) (*configWatcher, error) { if !common.IsHexAddress(args.ContractID) { return nil, errors.Errorf("invalid contractID, expected hex address") } @@ -146,10 +146,10 @@ func newFunctionsConfigProvider(pluginType functionsRelay.FunctionsPluginType, c if err != nil { return nil, err } - logPollerWrapper.SubscribeToUpdates("FunctionsConfigPoller", cp) + logPollerWrapper.SubscribeToUpdates(ctx, "FunctionsConfigPoller", cp) offchainConfigDigester := functionsRelay.NewFunctionsOffchainConfigDigester(pluginType, chain.ID().Uint64()) - logPollerWrapper.SubscribeToUpdates("FunctionsOffchainConfigDigester", offchainConfigDigester) + logPollerWrapper.SubscribeToUpdates(ctx, "FunctionsOffchainConfigDigester", offchainConfigDigester) return newConfigWatcher(lggr, routerContractAddress, offchainConfigDigester, cp, chain, fromBlock, args.New), nil } @@ -224,6 +224,6 @@ func newFunctionsContractTransmitter(ctx context.Context, contractVersion uint32 if err != nil { return nil, err } - logPollerWrapper.SubscribeToUpdates("FunctionsConfigTransmitter", functionsTransmitter) + logPollerWrapper.SubscribeToUpdates(ctx, "FunctionsConfigTransmitter", functionsTransmitter) return functionsTransmitter, err } diff --git a/core/services/relay/evm/functions/config_poller.go b/core/services/relay/evm/functions/config_poller.go index 8d2d7257db3..71616f2e840 100644 --- a/core/services/relay/evm/functions/config_poller.go +++ b/core/services/relay/evm/functions/config_poller.go @@ -184,11 +184,9 @@ func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint } // called from LogPollerWrapper in a separate goroutine -func (cp *configPoller) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { +func (cp *configPoller) UpdateRoutes(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) error { cp.targetContract.Store(&activeCoordinator) // Register filters for both active and proposed - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err := cp.destChainLogPoller.RegisterFilter(ctx, logpoller.Filter{Name: configPollerFilterName(activeCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) if err != nil { return err diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index f8a2196b0e0..2d96b2fd15d 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -92,7 +92,7 @@ func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDig servicetest.Run(t, lp) configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) require.NoError(t, err) - require.NoError(t, configPoller.UpdateRoutes(ocrAddress, ocrAddress)) + require.NoError(t, configPoller.UpdateRoutes(testutils.Context(t), ocrAddress, ocrAddress)) // Should have no config to begin with. _, config, err := configPoller.LatestConfigDetails(testutils.Context(t)) require.NoError(t, err) diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go index 8e7d08410e4..b9a1684050d 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper_test.go +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -1,12 +1,15 @@ package functions import ( + "context" "crypto/rand" "encoding/hex" "sync" "testing" "time" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -27,7 +30,7 @@ type subscriber struct { expectedCalls int } -func (s *subscriber) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { +func (s *subscriber) UpdateRoutes(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) error { if s.expectedCalls == 0 { panic("unexpected call to UpdateRoutes") } @@ -85,6 +88,7 @@ func getMockedRequestLog(t *testing.T) logpoller.Log { func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) lp, lpWrapper, client := setUp(t, 100_000) // check only once lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) @@ -93,11 +97,11 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) subscriber := newSubscriber(1) - lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) + lpWrapper.SubscribeToUpdates(ctx, "mock_subscriber", subscriber) servicetest.Run(t, lpWrapper) subscriber.updates.Wait() - reqs, resps, err := lpWrapper.LatestEvents() + reqs, resps, err := lpWrapper.LatestEvents(ctx) require.NoError(t, err) require.Equal(t, 0, len(reqs)) require.Equal(t, 0, len(resps)) @@ -105,24 +109,26 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) lp, lpWrapper, client := setUp(t, 100_000) // check only once lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "00"), nil) servicetest.Run(t, lpWrapper) - _, _, err := lpWrapper.LatestEvents() + _, _, err := lpWrapper.LatestEvents(ctx) require.Error(t, err) } func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { t.Parallel() + ctx := testutils.Context(t) lp, lpWrapper, client := setUp(t, 100_000) lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) subscriber := newSubscriber(1) - lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) + lpWrapper.SubscribeToUpdates(ctx, "mock_subscriber", subscriber) mockedLog := getMockedRequestLog(t) // All logPoller queries for responses return none lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil) @@ -136,14 +142,14 @@ func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { servicetest.Run(t, lpWrapper) subscriber.updates.Wait() - oracleRequests, _, err := lpWrapper.LatestEvents() + oracleRequests, _, err := lpWrapper.LatestEvents(ctx) require.NoError(t, err) assert.Equal(t, 1, len(oracleRequests)) - oracleRequests, _, err = lpWrapper.LatestEvents() + oracleRequests, _, err = lpWrapper.LatestEvents(ctx) require.NoError(t, err) assert.Equal(t, 0, len(oracleRequests)) require.NoError(t, err) - oracleRequests, _, err = lpWrapper.LatestEvents() + oracleRequests, _, err = lpWrapper.LatestEvents(ctx) require.NoError(t, err) assert.Equal(t, 0, len(oracleRequests)) } diff --git a/core/services/relay/evm/functions/offchain_config_digester.go b/core/services/relay/evm/functions/offchain_config_digester.go index 29547e794ce..c53d07e77ca 100644 --- a/core/services/relay/evm/functions/offchain_config_digester.go +++ b/core/services/relay/evm/functions/offchain_config_digester.go @@ -1,6 +1,7 @@ package functions import ( + "context" "encoding/binary" "errors" "fmt" @@ -82,7 +83,7 @@ func (d *functionsOffchainConfigDigester) ConfigDigestPrefix() (types.ConfigDige } // called from LogPollerWrapper in a separate goroutine -func (d *functionsOffchainConfigDigester) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { +func (d *functionsOffchainConfigDigester) UpdateRoutes(ctx context.Context, activeCoordinator common.Address, proposedCoordinator common.Address) error { d.contractAddress.Store(&activeCoordinator) return nil } diff --git a/core/services/relay/evm/method_binding.go b/core/services/relay/evm/method_binding.go index c5e10cce1c1..154c5b16a18 100644 --- a/core/services/relay/evm/method_binding.go +++ b/core/services/relay/evm/method_binding.go @@ -27,11 +27,11 @@ func (m *methodBinding) SetCodec(codec commontypes.RemoteCodec) { m.codec = codec } -func (m *methodBinding) Register() error { +func (m *methodBinding) Register(ctx context.Context) error { return nil } -func (m *methodBinding) Unregister() error { +func (m *methodBinding) Unregister(ctx context.Context) error { return nil } @@ -59,7 +59,7 @@ func (m *methodBinding) GetLatestValue(ctx context.Context, params, returnValue return m.codec.Decode(ctx, bytes, returnValue, wrapItemType(m.contractName, m.method, false)) } -func (m *methodBinding) Bind(binding commontypes.BoundContract) error { +func (m *methodBinding) Bind(ctx context.Context, binding commontypes.BoundContract) error { m.address = common.HexToAddress(binding.Address) m.bound = true return nil diff --git a/core/services/streams/delegate.go b/core/services/streams/delegate.go index 5ea0d475d2b..f9e2a64c4a3 100644 --- a/core/services/streams/delegate.go +++ b/core/services/streams/delegate.go @@ -38,10 +38,10 @@ func (d *Delegate) JobType() job.Type { return job.Stream } -func (d *Delegate) BeforeJobCreated(jb job.Job) {} -func (d *Delegate) AfterJobCreated(jb job.Job) {} -func (d *Delegate) BeforeJobDeleted(jb job.Job) {} -func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(jb job.Job) {} +func (d *Delegate) AfterJobCreated(jb job.Job) {} +func (d *Delegate) BeforeJobDeleted(jb job.Job) {} +func (d *Delegate) OnDeleteJob(ctx context.Context, jb job.Job, q pg.Queryer) error { return nil } func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) (services []job.ServiceCtx, err error) { if jb.StreamID == nil { diff --git a/core/services/vrf/delegate.go b/core/services/vrf/delegate.go index 14ba341b1b6..617a28ac4d5 100644 --- a/core/services/vrf/delegate.go +++ b/core/services/vrf/delegate.go @@ -67,10 +67,10 @@ func (d *Delegate) JobType() job.Type { return job.VRF } -func (d *Delegate) BeforeJobCreated(job.Job) {} -func (d *Delegate) AfterJobCreated(job.Job) {} -func (d *Delegate) BeforeJobDeleted(job.Job) {} -func (d *Delegate) OnDeleteJob(job.Job, pg.Queryer) error { return nil } +func (d *Delegate) BeforeJobCreated(job.Job) {} +func (d *Delegate) AfterJobCreated(job.Job) {} +func (d *Delegate) BeforeJobDeleted(job.Job) {} +func (d *Delegate) OnDeleteJob(context.Context, job.Job, pg.Queryer) error { return nil } // ServicesForSpec satisfies the job.Delegate interface. func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.ServiceCtx, error) { diff --git a/core/services/webhook/delegate.go b/core/services/webhook/delegate.go index 3211018d48d..999b041f308 100644 --- a/core/services/webhook/delegate.go +++ b/core/services/webhook/delegate.go @@ -73,7 +73,7 @@ func (d *Delegate) BeforeJobDeleted(spec job.Job) { ) } } -func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) OnDeleteJob(ctx context.Context, jb job.Job, q pg.Queryer) error { return nil } // ServicesForSpec satisfies the job.Delegate interface. func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) ([]job.ServiceCtx, error) { diff --git a/core/services/workflows/delegate.go b/core/services/workflows/delegate.go index 2951c2b4aa3..a54a33e9f0d 100644 --- a/core/services/workflows/delegate.go +++ b/core/services/workflows/delegate.go @@ -32,7 +32,7 @@ func (d *Delegate) AfterJobCreated(jb job.Job) {} func (d *Delegate) BeforeJobDeleted(spec job.Job) {} -func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } +func (d *Delegate) OnDeleteJob(ctx context.Context, jb job.Job, q pg.Queryer) error { return nil } // ServicesForSpec satisfies the job.Delegate interface. func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) ([]job.ServiceCtx, error) { From 0340a8ca870eb8b9f5cd293a4c339261464e1bd6 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 15:15:20 -0500 Subject: [PATCH 51/65] Update listener_test.go --- core/services/functions/listener_test.go | 28 ++++++++++++------------ 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/core/services/functions/listener_test.go b/core/services/functions/listener_test.go index 75161d3410b..24d95cdcd6b 100644 --- a/core/services/functions/listener_test.go +++ b/core/services/functions/listener_test.go @@ -167,8 +167,8 @@ func TestFunctionsListener_HandleOracleRequestV1_Success(t *testing.T) { Data: make([]byte, 12), } - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, nil, nil) @@ -261,8 +261,8 @@ func TestFunctionsListener_HandleOracleRequestV1_ComputationError(t *testing.T) Data: make([]byte, 12), } - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(nil, ErrorBytes, nil, nil) @@ -300,8 +300,8 @@ func TestFunctionsListener_HandleOracleRequestV1_ThresholdDecryptedSecrets(t *te uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) doneCh := make(chan struct{}) - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) uni.eaClient.On("FetchEncryptedSecrets", mock.Anything, mock.Anything, RequestIDStr, mock.Anything, mock.Anything).Return(EncryptedSecrets, nil, nil) @@ -330,8 +330,8 @@ func TestFunctionsListener_HandleOracleRequestV1_CBORTooBig(t *testing.T) { Data: make([]byte, 20), } - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) uni.pluginORM.On("SetError", RequestID, functions_service.USER_ERROR, []byte("request too big (max 10 bytes)"), mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { close(doneCh) @@ -356,8 +356,8 @@ func TestFunctionsListener_ReportSourceCodeDomains(t *testing.T) { Data: make([]byte, 12), } - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, Domains, nil) @@ -387,7 +387,7 @@ func TestFunctionsListener_PruneRequests(t *testing.T) { uni := NewFunctionsListenerUniverse(t, 0, 1) doneCh := make(chan bool) - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("PruneOldestRequests", functions_service.DefaultPruneMaxStoredRequests, functions_service.DefaultPruneBatchSize, mock.Anything).Return(uint32(0), uint32(0), nil).Run(func(args mock.Arguments) { doneCh <- true }) @@ -402,7 +402,7 @@ func TestFunctionsListener_TimeoutRequests(t *testing.T) { uni := NewFunctionsListenerUniverse(t, 1, 0) doneCh := make(chan bool) - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("TimeoutExpiredResults", mock.Anything, uint32(1), mock.Anything).Return([]functions_service.RequestID{}, nil).Run(func(args mock.Arguments) { doneCh <- true }) @@ -420,8 +420,8 @@ func TestFunctionsListener_ORMDoesNotFreezeHandlersForever(t *testing.T) { uni := NewFunctionsListenerUniverse(t, 0, 0) request := types.OracleRequest{} - uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() - uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents", mock.Anything).Return(nil, nil, nil) uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { var queryerWrapper pg.Q args.Get(1).(pg.QOpt)(&queryerWrapper) From f564a4813ffe6386ee8a1ab6fcd8d7864337f7fd Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 4 Mar 2024 16:05:33 -0500 Subject: [PATCH 52/65] Fix context --- core/services/functions/listener.go | 4 ++-- .../services/relay/evm/functions/logpoller_wrapper.go | 11 +++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/core/services/functions/listener.go b/core/services/functions/listener.go index d959e9685c8..12516005c3d 100644 --- a/core/services/functions/listener.go +++ b/core/services/functions/listener.go @@ -185,14 +185,14 @@ func NewFunctionsListener( } // Start complies with job.Service -func (l *functionsListener) Start(ctx context.Context) error { +func (l *functionsListener) Start(context.Context) error { return l.StartOnce("FunctionsListener", func() error { l.serviceContext, l.serviceCancel = context.WithCancel(context.Background()) switch l.pluginConfig.ContractVersion { case 1: l.shutdownWaitGroup.Add(1) - go l.processOracleEventsV1(ctx) + go l.processOracleEventsV1(l.serviceContext) default: return fmt.Errorf("unsupported contract version: %d", l.pluginConfig.ContractVersion) } diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index b9f9a08b6ca..02d91373660 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -112,7 +112,7 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf }, nil } -func (l *logPollerWrapper) Start(ctx context.Context) error { +func (l *logPollerWrapper) Start(context.Context) error { return l.StartOnce("LogPollerWrapper", func() error { l.lggr.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion) l.mu.Lock() @@ -121,7 +121,7 @@ func (l *logPollerWrapper) Start(ctx context.Context) error { return errors.New("only contract version 1 is supported") } l.closeWait.Add(1) - go l.checkForRouteUpdates(ctx) + go l.checkForRouteUpdates() return nil }) } @@ -329,7 +329,7 @@ func (l *logPollerWrapper) SubscribeToUpdates(ctx context.Context, subscriberNam } } -func (l *logPollerWrapper) checkForRouteUpdates(ctx context.Context) { +func (l *logPollerWrapper) checkForRouteUpdates() { defer l.closeWait.Done() freqSec := l.pluginConfig.ContractUpdateCheckFrequencySec if freqSec == 0 { @@ -346,7 +346,10 @@ func (l *logPollerWrapper) checkForRouteUpdates(ctx context.Context) { l.lggr.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err) return } - l.handleRouteUpdate(ctx, active, proposed) + + handleRouteCtx, handleRouteCancel := utils.ContextFromChan(l.stopCh) + defer handleRouteCancel() + l.handleRouteUpdate(handleRouteCtx, active, proposed) } updateOnce() // update once right away From 3c0da11a890bd10714fab291a926f64f8d3c72b6 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 10:15:44 -0500 Subject: [PATCH 53/65] Export DbORM struct --- core/chains/evm/forwarders/orm.go | 24 ++++---- core/chains/evm/headtracker/orm.go | 20 +++---- core/chains/evm/logpoller/orm.go | 91 +++++++++++++++--------------- 3 files changed, 67 insertions(+), 68 deletions(-) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index 6c860e3e62f..8f40dd4e396 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -23,25 +23,25 @@ type ORM interface { FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) } -type orm struct { +type DbORM struct { db sqlutil.Queryer } -var _ ORM = &orm{} +var _ ORM = &DbORM{} -func NewORM(db sqlutil.Queryer) *orm { - return &orm{db: db} +func NewORM(db sqlutil.Queryer) *DbORM { + return &DbORM{db: db} } -func (o *orm) Transaction(ctx context.Context, fn func(*orm) error) (err error) { +func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { return sqlutil.Transact(ctx, o.new, o.db, nil, fn) } // new returns a NewORM like o, but backed by q. -func (o *orm) new(q sqlutil.Queryer) *orm { return NewORM(q) } +func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(q) } // CreateForwarder creates the Forwarder address associated with the current EVM chain id. -func (o *orm) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { +func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { sql := `INSERT INTO evm.forwarders (address, evm_chain_id, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` err = o.db.GetContext(ctx, &fwd, sql, addr, evmChainId) return fwd, err @@ -50,8 +50,8 @@ func (o *orm) CreateForwarder(ctx context.Context, addr common.Address, evmChain // DeleteForwarder removes a forwarder address. // If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically // on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. -func (o *orm) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { - return o.Transaction(ctx, func(orm *orm) error { +func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { + return o.Transaction(ctx, func(orm *DbORM) error { var dest struct { EvmChainId int64 Address common.Address @@ -82,7 +82,7 @@ func (o *orm) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sql } // FindForwarders returns all forwarder addresses from offset up until limit. -func (o *orm) FindForwarders(ctx context.Context, offset, limit int) (fwds []Forwarder, count int, err error) { +func (o *DbORM) FindForwarders(ctx context.Context, offset, limit int) (fwds []Forwarder, count int, err error) { sql := `SELECT count(*) FROM evm.forwarders` if err = o.db.GetContext(ctx, &count, sql); err != nil { return @@ -96,13 +96,13 @@ func (o *orm) FindForwarders(ctx context.Context, offset, limit int) (fwds []For } // FindForwardersByChain returns all forwarder addresses for a chain. -func (o *orm) FindForwardersByChain(ctx context.Context, evmChainId big.Big) (fwds []Forwarder, err error) { +func (o *DbORM) FindForwardersByChain(ctx context.Context, evmChainId big.Big) (fwds []Forwarder, err error) { sql := `SELECT * FROM evm.forwarders where evm_chain_id = $1 ORDER BY created_at DESC, id DESC` err = o.db.SelectContext(ctx, &fwds, sql, evmChainId) return } -func (o *orm) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { +func (o *DbORM) FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { var fwdrs []Forwarder arg := map[string]interface{}{ diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index 6788864b51f..d3834b932ca 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -27,23 +27,23 @@ type ORM interface { HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) } -var _ ORM = &orm{} +var _ ORM = &DbORM{} -type orm struct { +type DbORM struct { chainID ubig.Big db sqlutil.Queryer } // NewORM creates an ORM scoped to chainID. -func NewORM(chainID big.Int, db sqlutil.Queryer) ORM { - return &orm{ +func NewORM(chainID big.Int, db sqlutil.Queryer) *DbORM { + return &DbORM{ chainID: ubig.Big(chainID), db: db, } } -func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error { - // listener guarantees head.EVMChainID to be equal to orm.chainID +func (orm *DbORM) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error { + // listener guarantees head.EVMChainID to be equal to DbORM.chainID query := ` INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, l1_block_number, evm_chain_id, base_fee_per_gas) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8) @@ -52,7 +52,7 @@ func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) e return pkgerrors.Wrap(err, "IdempotentInsertHead failed to insert head") } -func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { +func (orm *DbORM) TrimOldHeads(ctx context.Context, n uint) (err error) { _, err = orm.db.ExecContext(ctx, ` DELETE FROM evm.heads WHERE evm_chain_id = $1 AND number < ( @@ -68,7 +68,7 @@ func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { return err } -func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { +func (orm *DbORM) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) if pkgerrors.Is(err, sql.ErrNoRows) { @@ -78,13 +78,13 @@ func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) return } -func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { +func (orm *DbORM) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { err = orm.db.SelectContext(ctx, &heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) err = pkgerrors.Wrap(err, "LatestHeads failed") return } -func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { +func (orm *DbORM) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { head = new(evmtypes.Head) err = orm.db.GetContext(ctx, head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) if pkgerrors.Is(err, sql.ErrNoRows) { diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 62a002cae18..53a55046394 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -60,32 +60,32 @@ type ORM interface { SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) } -type orm struct { +type DbORM struct { chainID *big.Int db sqlutil.Queryer lggr logger.Logger } -var _ ORM = &orm{} +var _ ORM = &DbORM{} -// NewORM creates an orm scoped to chainID. -func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) ORM { - return &orm{ +// NewORM creates an DbORM scoped to chainID. +func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) *DbORM { + return &DbORM{ chainID: chainID, db: db, lggr: lggr, } } -func (o *orm) Transaction(ctx context.Context, fn func(*orm) error) (err error) { +func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err error) { return sqlutil.Transact(ctx, o.new, o.db, nil, fn) } // new returns a NewORM like o, but backed by q. -func (o *orm) new(q sqlutil.Queryer) *orm { return NewORM(o.chainID, q, o.lggr).(*orm) } +func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(o.chainID, q, o.lggr) } // InsertBlock is idempotent to support replays. -func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { +func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { args, err := newQueryArgs(o.chainID). withCustomHashArg("block_hash", blockHash). withCustomArg("block_number", blockNumber). @@ -108,7 +108,7 @@ func (o *orm) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumbe // // Each address/event pair must have a unique job id, so it may be removed when the job is deleted. // If a second job tries to overwrite the same pair, this should fail. -func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { +func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { topicArrays := []types.HashArray{filter.Topic2, filter.Topic3, filter.Topic4} args, err := newQueryArgs(o.chainID). withCustomArg("name", filter.Name). @@ -151,7 +151,7 @@ func (o *orm) InsertFilter(ctx context.Context, filter Filter) (err error) { } // DeleteFilter removes all events,address pairs associated with the Filter -func (o *orm) DeleteFilter(ctx context.Context, name string) error { +func (o *DbORM) DeleteFilter(ctx context.Context, name string) error { _, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) @@ -160,7 +160,7 @@ func (o *orm) DeleteFilter(ctx context.Context, name string) error { } // LoadFilters returns all filters for this chain -func (o *orm) LoadFilters(ctx context.Context) (map[string]Filter, error) { +func (o *DbORM) LoadFilters(ctx context.Context) (map[string]Filter, error) { query := `SELECT name, ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, @@ -181,7 +181,7 @@ func (o *orm) LoadFilters(ctx context.Context) (map[string]Filter, error) { return filters, err } -func (o *orm) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { +func (o *DbORM) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash.Bytes(), ubig.New(o.chainID)); err != nil { return nil, err @@ -189,7 +189,7 @@ func (o *orm) SelectBlockByHash(ctx context.Context, hash common.Hash) (*LogPoll return &b, nil } -func (o *orm) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { +func (o *DbORM) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { return nil, err @@ -197,7 +197,7 @@ func (o *orm) SelectBlockByNumber(ctx context.Context, n int64) (*LogPollerBlock return &b, nil } -func (o *orm) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { +func (o *DbORM) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { var b LogPollerBlock if err := o.db.GetContext(ctx, &b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { return nil, err @@ -205,7 +205,7 @@ func (o *orm) SelectLatestBlock(ctx context.Context) (*LogPollerBlock, error) { return &b, nil } -func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { +func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig common.Hash, address common.Address, confs Confirmations) (*Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withConfs(confs). toArgs() @@ -228,10 +228,10 @@ func (o *orm) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig c return &l, nil } -// DeleteBlocksBefore delete all blocks before and including end. -func (o *orm) DeleteBlocksBefore(ctx context.Context, end int64, limit int64) (int64, error) { +// DeleteBlocksBefore delete blocks before and including end. When limit is set, it will delete at most limit blocks. +// Otherwise, it will delete all blocks at once. +func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64, limit int64) (int64, error) { if limit > 0 { - fmt.Println("Deleting all blocks before with limit", end, limit) result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number IN ( @@ -245,17 +245,16 @@ func (o *orm) DeleteBlocksBefore(ctx context.Context, end int64, limit int64) (i rowsAffected, _ := result.RowsAffected() return rowsAffected, err } - fmt.Println("Deleting all blocks before", end) result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) rowsAffected, _ := result.RowsAffected() return rowsAffected, err } -func (o *orm) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { +func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { // These deletes are bounded by reorg depth, so they are // fast and should not slow down the log readers. - return o.Transaction(ctx, func(orm *orm) error { + return o.Transaction(ctx, func(orm *DbORM) error { // Applying upper bound filter is critical for Postgres performance (especially for evm.logs table) // because it allows the planner to properly estimate the number of rows to be scanned. // If not applied, these queries can become very slow. After some critical number @@ -294,7 +293,7 @@ type Exp struct { ShouldDelete bool } -func (o *orm) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, error) { +func (o *DbORM) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, error) { var err error var result sql.Result if limit > 0 { @@ -329,16 +328,16 @@ func (o *orm) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, error) } // InsertLogs is idempotent to support replays. -func (o *orm) InsertLogs(ctx context.Context, logs []Log) error { +func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { if err := o.validateLogs(logs); err != nil { return err } - return o.Transaction(ctx, func(orm *orm) error { + return o.Transaction(ctx, func(orm *DbORM) error { return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) }) } -func (o *orm) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { +func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPollerBlock) error { // Optimization, don't open TX when there is only a block to be persisted if len(logs) == 0 { return o.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber) @@ -349,7 +348,7 @@ func (o *orm) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPoll } // Block and logs goes with the same TX to ensure atomicity - return o.Transaction(ctx, func(orm *orm) error { + return o.Transaction(ctx, func(orm *DbORM) error { if err := o.insertBlockWithinTx(ctx, orm.db.(*sqlx.Tx), block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { return err } @@ -357,7 +356,7 @@ func (o *orm) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPoll }) } -func (o *orm) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { +func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING` @@ -365,7 +364,7 @@ func (o *orm) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, block return err } -func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Queryer) error { +func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Queryer) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { start, end := i, i+batchInsertSize @@ -395,7 +394,7 @@ func (o *orm) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Que return nil } -func (o *orm) validateLogs(logs []Log) error { +func (o *DbORM) validateLogs(logs []Log) error { for _, log := range logs { if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 { return pkgerrors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) @@ -404,7 +403,7 @@ func (o *orm) validateLogs(logs []Log) error { return nil } -func (o *orm) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { +func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]Log, error) { args, err := newQueryArgs(o.chainID). withStartBlock(start). withEndBlock(end). @@ -429,7 +428,7 @@ func (o *orm) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([]L } // SelectLogs finds the logs in a given block range. -func (o *orm) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { +func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withStartBlock(start). withEndBlock(end). @@ -456,7 +455,7 @@ func (o *orm) SelectLogs(ctx context.Context, start, end int64, address common.A } // SelectLogsCreatedAfter finds logs created after some timestamp. -func (o *orm) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, after time.Time, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withBlockTimestampAfter(after). withConfs(confs). @@ -484,7 +483,7 @@ func (o *orm) SelectLogsCreatedAfter(ctx context.Context, address common.Address // SelectLogsWithSigs finds the logs in the given block range with the given event signatures // emitted from the given address. -func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { +func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, address common.Address, eventSigs []common.Hash) (logs []Log, err error) { args, err := newQueryArgs(o.chainID). withAddress(address). withEventSigArray(eventSigs). @@ -510,7 +509,7 @@ func (o *orm) SelectLogsWithSigs(ctx context.Context, start, end int64, address return logs, err } -func (o *orm) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { +func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]LogPollerBlock, error) { args, err := newQueryArgs(o.chainID). withStartBlock(start). withEndBlock(end). @@ -535,7 +534,7 @@ func (o *orm) GetBlocksRange(ctx context.Context, start int64, end int64) ([]Log } // SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events -func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgs(o.chainID). withAddressArray(addresses). withEventSigArray(eventSigs). @@ -567,7 +566,7 @@ func (o *orm) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, fromBl } // SelectLatestBlockByEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block -func (o *orm) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { +func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations) (int64, error) { args, err := newQueryArgs(o.chainID). withEventSigArray(eventSigs). withAddressArray(addresses). @@ -593,7 +592,7 @@ func (o *orm) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, fr return blockNumber, nil } -func (o *orm) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndex(wordIndex). withWordValueMin(wordValueMin). @@ -621,7 +620,7 @@ func (o *orm) SelectLogsDataWordRange(ctx context.Context, address common.Addres return logs, nil } -func (o *orm) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndex(wordIndex). withWordValueMin(wordValueMin). @@ -648,7 +647,7 @@ func (o *orm) SelectLogsDataWordGreaterThan(ctx context.Context, address common. return logs, nil } -func (o *orm) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withWordIndexMin(wordIndexMin). withWordIndexMax(wordIndexMax). @@ -676,7 +675,7 @@ func (o *orm) SelectLogsDataWordBetween(ctx context.Context, address common.Addr return logs, nil } -func (o *orm) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValueMin(topicValueMin). @@ -703,7 +702,7 @@ func (o *orm) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address com return logs, nil } -func (o *orm) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValueMin(topicValueMin). @@ -732,7 +731,7 @@ func (o *orm) SelectIndexedLogsTopicRange(ctx context.Context, address common.Ad return logs, nil } -func (o *orm) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValues(topicValues). @@ -760,7 +759,7 @@ func (o *orm) SelectIndexedLogs(ctx context.Context, address common.Address, eve } // SelectIndexedLogsByBlockRange finds the indexed logs in a given block range. -func (o *orm) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withTopicIndex(topicIndex). withTopicValues(topicValues). @@ -789,7 +788,7 @@ func (o *orm) SelectIndexedLogsByBlockRange(ctx context.Context, start, end int6 return logs, nil } -func (o *orm) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations) ([]Log, error) { args, err := newQueryArgsForEvent(o.chainID, address, eventSig). withBlockTimestampAfter(after). withConfs(confs). @@ -818,7 +817,7 @@ func (o *orm) SelectIndexedLogsCreatedAfter(ctx context.Context, address common. return logs, nil } -func (o *orm) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Address, eventSig common.Hash, txHash common.Hash) ([]Log, error) { args, err := newQueryArgs(o.chainID). withTxHash(txHash). withAddress(address). @@ -845,7 +844,7 @@ func (o *orm) SelectIndexedLogsByTxHash(ctx context.Context, address common.Addr } // SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations -func (o *orm) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { +func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations) ([]Log, error) { args, err := newQueryArgs(o.chainID). withAddress(address). withTopicIndex(topicIndex). From a889f7f9e88f690e4d96ea79f6a8a203a9b8469d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 10:18:39 -0500 Subject: [PATCH 54/65] Update orm.go --- core/chains/evm/logpoller/orm.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 53a55046394..8d1ea8149e8 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -902,12 +902,3 @@ func nestedBlockNumberQuery(confs Confirmations) string { WHERE evm_chain_id = :evm_chain_id ORDER BY block_number DESC LIMIT 1) ` } - -func UseTopicIndex(index int) (int, error) { - // Only topicIndex 1 through 3 is valid. 0 is the event sig and only 4 total topics are allowed - if !(index == 1 || index == 2 || index == 3) { - return 0, fmt.Errorf("invalid index for topic: %d", index) - } - // Add 1 since postgresql arrays are 1-indexed. - return index + 1, nil -} From f585fa3628a6856436416a0a0176946fd2998478 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 11:13:23 -0500 Subject: [PATCH 55/65] Pass context --- .../ocr2keeper/evmregistry/v20/registry.go | 9 ++---- .../evmregistry/v21/logprovider/provider.go | 4 +-- .../v21/logprovider/provider_life_cycle.go | 8 ++--- .../logprovider/provider_life_cycle_test.go | 5 +-- .../ocr2keeper/evmregistry/v21/registry.go | 12 +++---- .../evmregistry/v21/registry_test.go | 32 +++++++++---------- .../v21/transmit/event_provider.go | 3 +- .../v21/transmit/event_provider_test.go | 5 +-- core/services/relay/evm/ocr2keeper.go | 2 +- 9 files changed, 37 insertions(+), 43 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go index 71d45fcb203..bf1fe7ba1cb 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -366,11 +366,8 @@ func (r *EvmRegistry) pollLogs() error { { var logs []logpoller.Log - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() if logs, err = r.poller.LogsWithSigs( - ctx, + r.ctx, end.BlockNumber-logEventLookback, end.BlockNumber, upkeepStateEvents, @@ -394,9 +391,7 @@ func UpkeepFilterName(addr common.Address) string { func (r *EvmRegistry) registerEvents(chainID uint64, addr common.Address) error { // Add log filters for the log poller so that it can poll and find the logs that // we need - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - return r.poller.RegisterFilter(ctx, logpoller.Filter{ + return r.poller.RegisterFilter(r.ctx, logpoller.Filter{ Name: UpkeepFilterName(addr), EventSigs: append(upkeepStateEvents, upkeepActiveEvents...), Addresses: []common.Address{addr}, diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index caac8eb393d..b7f445517b5 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -61,13 +61,13 @@ type LogTriggersLifeCycle interface { // RegisterFilter registers the filter (if valid) for the given upkeepID. RegisterFilter(ctx context.Context, opts FilterOptions) error // UnregisterFilter removes the filter for the given upkeepID. - UnregisterFilter(upkeepID *big.Int) error + UnregisterFilter(ctx context.Context, upkeepID *big.Int) error } type LogEventProvider interface { ocr2keepers.LogEventProvider LogTriggersLifeCycle - RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) + RefreshActiveUpkeeps(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) Start(context.Context) error io.Closer diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go index 9109890392f..ae6a373ad22 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -21,7 +21,7 @@ var ( LogBackfillBuffer = 100 ) -func (p *logEventProvider) RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) { +func (p *logEventProvider) RefreshActiveUpkeeps(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // Exploratory: investigate how we can batch the refresh if len(ids) == 0 { return nil, nil @@ -41,7 +41,7 @@ func (p *logEventProvider) RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, er if len(inactiveIDs) > 0 { p.lggr.Debugw("Removing inactive upkeeps", "upkeeps", len(inactiveIDs)) for _, id := range inactiveIDs { - if err := p.UnregisterFilter(id); err != nil { + if err := p.UnregisterFilter(ctx, id); err != nil { merr = errors.Join(merr, fmt.Errorf("failed to unregister filter: %s", id.String())) } } @@ -143,11 +143,9 @@ func (p *logEventProvider) register(ctx context.Context, lpFilter logpoller.Filt return nil } -func (p *logEventProvider) UnregisterFilter(upkeepID *big.Int) error { +func (p *logEventProvider) UnregisterFilter(ctx context.Context, upkeepID *big.Int) error { // Filter might have been unregistered already, only try to unregister if it exists if p.poller.HasFilter(p.filterName(upkeepID)) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() if err := p.poller.UnregisterFilter(ctx, p.filterName(upkeepID)); err != nil { return fmt.Errorf("failed to unregister upkeep filter %s: %w", upkeepID.String(), err) } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go index 80db1241a1f..5d87a986a56 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go @@ -136,7 +136,7 @@ func TestLogEventProvider_LifeCycle(t *testing.T) { } else { require.NoError(t, err) if tc.unregister { - require.NoError(t, p.UnregisterFilter(tc.upkeepID)) + require.NoError(t, p.UnregisterFilter(ctx, tc.upkeepID)) } } }) @@ -172,11 +172,12 @@ func TestEventLogProvider_RefreshActiveUpkeeps(t *testing.T) { })) require.Equal(t, 2, p.filterStore.Size()) - newIds, err := p.RefreshActiveUpkeeps() + newIds, err := p.RefreshActiveUpkeeps(ctx) require.NoError(t, err) require.Len(t, newIds, 0) mp.On("HasFilter", p.filterName(core.GenUpkeepID(types.LogTrigger, "2222").BigInt())).Return(true) newIds, err = p.RefreshActiveUpkeeps( + ctx, core.GenUpkeepID(types.LogTrigger, "2222").BigInt(), core.GenUpkeepID(types.LogTrigger, "1234").BigInt(), core.GenUpkeepID(types.LogTrigger, "123").BigInt()) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go index d6f1fa1e4af..9cd1cc01634 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -303,7 +303,7 @@ func (r *EvmRegistry) refreshActiveUpkeeps() error { } } - _, err = r.logEventProvider.RefreshActiveUpkeeps(logTriggerIDs...) + _, err = r.logEventProvider.RefreshActiveUpkeeps(r.ctx, logTriggerIDs...) if err != nil { return fmt.Errorf("failed to refresh active upkeep ids in log event provider: %w", err) } @@ -457,13 +457,13 @@ func (r *EvmRegistry) processUpkeepStateLog(l logpoller.Log) error { switch l := abilog.(type) { case *iregistry21.IKeeperRegistryMasterUpkeepPaused: r.lggr.Debugf("KeeperRegistryUpkeepPaused log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) - r.removeFromActive(l.Id) + r.removeFromActive(r.ctx, l.Id) case *iregistry21.IKeeperRegistryMasterUpkeepCanceled: r.lggr.Debugf("KeeperRegistryUpkeepCanceled log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) - r.removeFromActive(l.Id) + r.removeFromActive(r.ctx, l.Id) case *iregistry21.IKeeperRegistryMasterUpkeepMigrated: r.lggr.Debugf("KeeperRegistryMasterUpkeepMigrated log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) - r.removeFromActive(l.Id) + r.removeFromActive(r.ctx, l.Id) case *iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet: r.lggr.Debugf("KeeperRegistryUpkeepTriggerConfigSet log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) if err := r.updateTriggerConfig(l.Id, l.TriggerConfig, rawLog.BlockNumber); err != nil { @@ -512,7 +512,7 @@ func (r *EvmRegistry) registerEvents(_ uint64, addr common.Address) error { } // removeFromActive removes an upkeepID from active list and unregisters the log filter for log upkeeps -func (r *EvmRegistry) removeFromActive(id *big.Int) { +func (r *EvmRegistry) removeFromActive(ctx context.Context, id *big.Int) { r.active.Remove(id) uid := &ocr2keepers.UpkeepIdentifier{} @@ -520,7 +520,7 @@ func (r *EvmRegistry) removeFromActive(id *big.Int) { trigger := core.GetUpkeepType(*uid) switch trigger { case types2.LogTrigger: - if err := r.logEventProvider.UnregisterFilter(id); err != nil { + if err := r.logEventProvider.UnregisterFilter(ctx, id); err != nil { r.lggr.Warnw("failed to unregister log filter", "upkeepID", id.String()) } r.lggr.Debugw("unregistered log filter", "upkeepID", id.String()) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go index 2a147b4faa4..10effd7a81f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -213,7 +213,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), }, logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // of the ids specified in the test, only one is a valid log trigger upkeep assert.Equal(t, 1, len(ids)) return ids, nil @@ -238,7 +238,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { big.NewInt(-1), }, logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // of the ids specified in the test, only one is a valid log trigger upkeep assert.Equal(t, 1, len(ids)) return ids, nil @@ -263,7 +263,7 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { big.NewInt(-1), }, logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // of the ids specified in the test, only one is a valid log trigger upkeep assert.Equal(t, 1, len(ids)) return ids, nil @@ -292,12 +292,12 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { big.NewInt(-1), }, logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // of the ids specified in the test, only one is a valid log trigger upkeep assert.Equal(t, 1, len(ids)) return ids, nil }, - RegisterFilterFn: func(opts logprovider.FilterOptions) error { + RegisterFilterFn: func(ctx context.Context, opts logprovider.FilterOptions) error { return errors.New("register filter boom") }, }, @@ -346,12 +346,12 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { big.NewInt(-1), }, logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { // of the ids specified in the test, only two are a valid log trigger upkeep assert.Equal(t, 2, len(ids)) return ids, nil }, - RegisterFilterFn: func(opts logprovider.FilterOptions) error { + RegisterFilterFn: func(ctx context.Context, opts logprovider.FilterOptions) error { return nil }, }, @@ -399,11 +399,11 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { return res }(), logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { assert.Equal(t, logTriggerRefreshBatchSize, len(ids)) return ids, nil }, - RegisterFilterFn: func(opts logprovider.FilterOptions) error { + RegisterFilterFn: func(ctx context.Context, opts logprovider.FilterOptions) error { return nil }, }, @@ -451,13 +451,13 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { return res }(), logEventProvider: &mockLogEventProvider{ - RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + RefreshActiveUpkeepsFn: func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { if len(ids) != logTriggerRefreshBatchSize { assert.Equal(t, 3, len(ids)) } return ids, nil }, - RegisterFilterFn: func(opts logprovider.FilterOptions) error { + RegisterFilterFn: func(ctx context.Context, opts logprovider.FilterOptions) error { return nil }, }, @@ -527,16 +527,16 @@ func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { type mockLogEventProvider struct { logprovider.LogEventProvider - RefreshActiveUpkeepsFn func(ids ...*big.Int) ([]*big.Int, error) - RegisterFilterFn func(opts logprovider.FilterOptions) error + RefreshActiveUpkeepsFn func(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) + RegisterFilterFn func(ctx context.Context, opts logprovider.FilterOptions) error } -func (p *mockLogEventProvider) RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) { - return p.RefreshActiveUpkeepsFn(ids...) +func (p *mockLogEventProvider) RefreshActiveUpkeeps(ctx context.Context, ids ...*big.Int) ([]*big.Int, error) { + return p.RefreshActiveUpkeepsFn(ctx, ids...) } func (p *mockLogEventProvider) RegisterFilter(ctx context.Context, opts logprovider.FilterOptions) error { - return p.RegisterFilterFn(opts) + return p.RegisterFilterFn(ctx, opts) } type mockRegistry struct { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go index a627fdd0b1e..bd8c9a528e5 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go @@ -48,6 +48,7 @@ func EventProviderFilterName(addr common.Address) string { } func NewTransmitEventProvider( + ctx context.Context, logger logger.Logger, logPoller logpoller.LogPoller, registryAddress common.Address, @@ -60,8 +61,6 @@ func NewTransmitEventProvider( if err != nil { return nil, err } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: EventProviderFilterName(contract.Address()), EventSigs: []common.Hash{ diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go index ac2eb82d49d..62689f71981 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go @@ -31,7 +31,7 @@ func TestTransmitEventProvider_Sanity(t *testing.T) { lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) - provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client.NewNullClient(big.NewInt(1), logger.TestLogger(t)), 32) + provider, err := NewTransmitEventProvider(ctx, logger.TestLogger(t), lp, common.HexToAddress("0x"), client.NewNullClient(big.NewInt(1), logger.TestLogger(t)), 32) require.NoError(t, err) require.NotNil(t, provider) @@ -105,8 +105,9 @@ func TestTransmitEventProvider_ProcessLogs(t *testing.T) { lp := new(mocks.LogPoller) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) client := evmClientMocks.NewClient(t) + ctx := testutils.Context(t) - provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client, 250) + provider, err := NewTransmitEventProvider(ctx, logger.TestLogger(t), lp, common.HexToAddress("0x"), client, 250) require.NoError(t, err) id := core.GenUpkeepID(types.LogTrigger, "1111111111111111") diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go index 6563604945c..e1eedd30fd4 100644 --- a/core/services/relay/evm/ocr2keeper.go +++ b/core/services/relay/evm/ocr2keeper.go @@ -114,7 +114,7 @@ func (r *ocr2keeperRelayer) NewOCR2KeeperProvider(rargs commontypes.RelayArgs, p // lookback blocks for transmit event is hard coded and should provide ample time for logs // to be detected in most cases var transmitLookbackBlocks int64 = 250 - transmitEventProvider, err := transmit.NewTransmitEventProvider(r.lggr, client.LogPoller(), addr, client.Client(), transmitLookbackBlocks) + transmitEventProvider, err := transmit.NewTransmitEventProvider(ctx, r.lggr, client.LogPoller(), addr, client.Client(), transmitLookbackBlocks) if err != nil { return nil, err } From 6798f8d349a9421f3e07de8449887b1a2aa76fd3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 11:19:39 -0500 Subject: [PATCH 56/65] Pass context --- core/internal/features/ocr2/features_ocr2_test.go | 4 ++-- core/services/ocr2/delegate.go | 7 ++++--- .../plugins/ocr2keeper/evmregistry/v20/log_provider.go | 3 +-- core/services/ocr2/plugins/ocr2keeper/util.go | 4 +++- core/services/relay/evm/contract_transmitter.go | 3 +-- core/services/relay/evm/contract_transmitter_test.go | 3 ++- core/services/relay/evm/evm.go | 1 + 7 files changed, 14 insertions(+), 11 deletions(-) diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go index 636fbeef7a4..5bfa1812a40 100644 --- a/core/internal/features/ocr2/features_ocr2_test.go +++ b/core/internal/features/ocr2/features_ocr2_test.go @@ -591,7 +591,7 @@ juelsPerFeeCoinCacheDuration = "1m" contractABI, err2 := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) require.NoError(t, err2) apps[0].GetRelayers().LegacyEVMChains().Slice() - ct, err2 := evm.NewOCRContractTransmitter(ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) + ct, err2 := evm.NewOCRContractTransmitter(testutils.Context(t), ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) require.NoError(t, err2) configDigest, epoch, err2 := ct.LatestConfigDigestAndEpoch(testutils.Context(t)) require.NoError(t, err2) @@ -902,7 +902,7 @@ juelsPerFeeCoinCacheDuration = "1m" // Assert we can read the latest config digest and epoch after a report has been submitted. contractABI, err := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) require.NoError(t, err) - ct, err := evm.NewOCRContractTransmitter(ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) + ct, err := evm.NewOCRContractTransmitter(testutils.Context(t), ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) require.NoError(t, err) configDigest, epoch, err := ct.LatestConfigDigestAndEpoch(testutils.Context(t)) require.NoError(t, err) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index c640f3d9f22..1009574deb1 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -1247,9 +1247,9 @@ func (d *Delegate) newServicesOCR2Keepers( // Future contracts of v2.1 (v2.x) will use the same job spec as v2.1 return d.newServicesOCR2Keepers21(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) case "v2.0": - return d.newServicesOCR2Keepers20(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) + return d.newServicesOCR2Keepers20(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) default: - return d.newServicesOCR2Keepers20(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) + return d.newServicesOCR2Keepers20(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) } } @@ -1401,6 +1401,7 @@ func (d *Delegate) newServicesOCR2Keepers21( } func (d *Delegate) newServicesOCR2Keepers20( + ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, @@ -1424,7 +1425,7 @@ func (d *Delegate) newServicesOCR2Keepers20( return nil, fmt.Errorf("keepers2.0 services: failed to get chain (%s): %w", rid.ChainID, err2) } - keeperProvider, rgstry, encoder, logProvider, err2 := ocr2keeper.EVMDependencies20(jb, d.db, lggr, chain, d.ethKs, d.cfg.Database()) + keeperProvider, rgstry, encoder, logProvider, err2 := ocr2keeper.EVMDependencies20(ctx, jb, d.db, lggr, chain, d.ethKs, d.cfg.Database()) if err2 != nil { return nil, errors.Wrap(err2, "could not build dependencies for ocr2 keepers") } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go index f7f3fcd65b0..50c1e5b7c1a 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go @@ -48,6 +48,7 @@ func LogProviderFilterName(addr common.Address) string { } func NewLogProvider( + ctx context.Context, logger logger.Logger, logPoller logpoller.LogPoller, registryAddress common.Address, @@ -68,8 +69,6 @@ func NewLogProvider( // Add log filters for the log poller so that it can poll and find the logs that // we need. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err = logPoller.RegisterFilter(ctx, logpoller.Filter{ Name: LogProviderFilterName(contract.Address()), EventSigs: []common.Hash{ diff --git a/core/services/ocr2/plugins/ocr2keeper/util.go b/core/services/ocr2/plugins/ocr2keeper/util.go index 53fff8751c3..4fdddfe7f02 100644 --- a/core/services/ocr2/plugins/ocr2keeper/util.go +++ b/core/services/ocr2/plugins/ocr2keeper/util.go @@ -1,6 +1,7 @@ package ocr2keeper import ( + "context" "fmt" "github.com/smartcontractkit/chainlink-common/pkg/types" @@ -67,6 +68,7 @@ func EVMProvider(db *sqlx.DB, chain legacyevm.Chain, lggr logger.Logger, spec jo } func EVMDependencies20( + ctx context.Context, spec job.Job, db *sqlx.DB, lggr logger.Logger, @@ -95,7 +97,7 @@ func EVMDependencies20( // to be detected in most cases var lookbackBlocks int64 = 250 // TODO: accept a version of the registry contract and use the correct interfaces - logProvider, err := evmregistry20.NewLogProvider(lggr, chain.LogPoller(), rAddr, chain.Client(), lookbackBlocks) + logProvider, err := evmregistry20.NewLogProvider(ctx, lggr, chain.LogPoller(), rAddr, chain.Client(), lookbackBlocks) return keeperProvider, registry, encoder, logProvider, err } diff --git a/core/services/relay/evm/contract_transmitter.go b/core/services/relay/evm/contract_transmitter.go index ad673236f65..af0f83f6979 100644 --- a/core/services/relay/evm/contract_transmitter.go +++ b/core/services/relay/evm/contract_transmitter.go @@ -55,6 +55,7 @@ func transmitterFilterName(addr common.Address) string { } func NewOCRContractTransmitter( + ctx context.Context, address gethcommon.Address, caller contractReader, contractABI abi.ABI, @@ -68,8 +69,6 @@ func NewOCRContractTransmitter( return nil, errors.New("invalid ABI, missing transmitted") } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() err := lp.RegisterFilter(ctx, logpoller.Filter{Name: transmitterFilterName(address), EventSigs: []common.Hash{transmitted.ID}, Addresses: []common.Address{address}}) if err != nil { return nil, err diff --git a/core/services/relay/evm/contract_transmitter_test.go b/core/services/relay/evm/contract_transmitter_test.go index a51c2fde0bd..930ef0249e6 100644 --- a/core/services/relay/evm/contract_transmitter_test.go +++ b/core/services/relay/evm/contract_transmitter_test.go @@ -36,6 +36,7 @@ func TestContractTransmitter(t *testing.T) { lggr := logger.TestLogger(t) c := evmclimocks.NewClient(t) lp := lpmocks.NewLogPoller(t) + ctx := testutils.Context(t) // scanLogs = false digestAndEpochDontScanLogs, _ := hex.DecodeString( "0000000000000000000000000000000000000000000000000000000000000000" + // false @@ -44,7 +45,7 @@ func TestContractTransmitter(t *testing.T) { c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochDontScanLogs, nil).Once() contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) - ot, err := NewOCRContractTransmitter(gethcommon.Address{}, c, contractABI, mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { + ot, err := NewOCRContractTransmitter(ctx, gethcommon.Address{}, c, contractABI, mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { return &txmgr.TxMeta{}, nil }) require.NoError(t, err) diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index db504872925..b90bdac9c36 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -525,6 +525,7 @@ func newOnChainContractTransmitter(ctx context.Context, lggr logger.Logger, rarg } return NewOCRContractTransmitter( + ctx, configWatcher.contractAddress, configWatcher.chain.Client(), transmissionContractABI, From 4116ba2275f8f25e77d9b1a90e619893632155c2 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 11:42:13 -0500 Subject: [PATCH 57/65] Update orm.go --- core/chains/evm/logpoller/orm.go | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index 8d1ea8149e8..d1011040a73 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -333,7 +333,7 @@ func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { return err } return o.Transaction(ctx, func(orm *DbORM) error { - return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + return orm.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) }) } @@ -349,21 +349,14 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo // Block and logs goes with the same TX to ensure atomicity return o.Transaction(ctx, func(orm *DbORM) error { - if err := o.insertBlockWithinTx(ctx, orm.db.(*sqlx.Tx), block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber); err != nil { + err := orm.InsertBlock(ctx, block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber) + if err != nil { return err } - return o.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + return orm.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) }) } -func (o *DbORM) insertBlockWithinTx(ctx context.Context, tx sqlutil.Queryer, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { - query := `INSERT INTO evm.log_poller_blocks (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) - VALUES ($1, $2, $3, $4, $5, NOW()) - ON CONFLICT DO NOTHING` - _, err := tx.ExecContext(ctx, query, ubig.New(o.chainID), blockHash.Bytes(), blockNumber, blockTimestamp, finalizedBlock) - return err -} - func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Queryer) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { From 1263bf1adf1acab90228d8eabb635335be066bf3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 5 Mar 2024 11:45:14 -0500 Subject: [PATCH 58/65] Use testcontext --- integration-tests/smoke/log_poller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go index 3b659ba0c36..f6c349581ba 100644 --- a/integration-tests/smoke/log_poller_test.go +++ b/integration-tests/smoke/log_poller_test.go @@ -98,7 +98,7 @@ func executeBasicLogPollerTest(t *testing.T) { lpTestEnv := prepareEnvironment(l, t, &testConfig) testEnv := lpTestEnv.testEnv - ctx := context.Background() + ctx := testcontext.Get(t) // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) @@ -179,7 +179,7 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { lpTestEnv := prepareEnvironment(l, t, &testConfig) testEnv := lpTestEnv.testEnv - ctx := context.Background() + ctx := testcontext.Get(t) // Save block number before starting to emit events, so that we can later use it when querying logs sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) From 5859e8ee7c32681bc0f798d1b054d641bcf41be4 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 6 Mar 2024 12:41:20 -0500 Subject: [PATCH 59/65] Initialize context --- .../ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go index bf1fe7ba1cb..9fc2d7891f2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -102,6 +102,9 @@ func NewEVMRegistryService(addr common.Address, client legacyevm.Chain, lggr log enc: EVMAutomationEncoder20{}, } + r.ctx, r.cancel = context.WithCancel(context.Background()) + r.reInit = time.NewTimer(reInitializationDelay) + if err := r.registerEvents(client.ID().Uint64(), addr); err != nil { return nil, fmt.Errorf("logPoller error while registering automation events: %w", err) } @@ -200,13 +203,10 @@ func (r *EvmRegistry) Name() string { return r.lggr.Name() } -func (r *EvmRegistry) Start(ctx context.Context) error { +func (r *EvmRegistry) Start(_ context.Context) error { return r.sync.StartOnce("AutomationRegistry", func() error { r.mu.Lock() defer r.mu.Unlock() - r.ctx, r.cancel = context.WithCancel(context.Background()) - r.reInit = time.NewTimer(reInitializationDelay) - // initialize the upkeep keys; if the reInit timer returns, do it again { go func(cx context.Context, tmr *time.Timer, lggr logger.Logger, f func() error) { From 1269d73a22094b97dd6f73359446875ae192bd87 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 13 Mar 2024 10:12:39 -0400 Subject: [PATCH 60/65] Update context --- core/services/relay/evm/functions/logpoller_wrapper.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go index 02d91373660..471f18b4b0e 100644 --- a/core/services/relay/evm/functions/logpoller_wrapper.go +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -19,7 +19,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions/config" evmRelayTypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" - "github.com/smartcontractkit/chainlink/v2/core/utils" ) type logPollerWrapper struct { @@ -339,17 +338,16 @@ func (l *logPollerWrapper) checkForRouteUpdates() { updateOnce := func() { // NOTE: timeout == frequency here, could be changed to a separate config value - timeoutCtx, cancel := utils.ContextFromChanWithTimeout(l.stopCh, time.Duration(l.pluginConfig.ContractUpdateCheckFrequencySec)*time.Second) + timeout := time.Duration(l.pluginConfig.ContractUpdateCheckFrequencySec) * time.Second + ctx, cancel := l.stopCh.CtxCancel(context.WithTimeout(context.Background(), timeout)) defer cancel() - active, proposed, err := l.getCurrentCoordinators(timeoutCtx) + active, proposed, err := l.getCurrentCoordinators(ctx) if err != nil { l.lggr.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err) return } - handleRouteCtx, handleRouteCancel := utils.ContextFromChan(l.stopCh) - defer handleRouteCancel() - l.handleRouteUpdate(handleRouteCtx, active, proposed) + l.handleRouteUpdate(ctx, active, proposed) } updateOnce() // update once right away From 8823302badcdc7fc538b39199f75b6885d65a986 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 13 Mar 2024 10:24:39 -0400 Subject: [PATCH 61/65] Propagate context --- core/services/relay/evm/config_poller.go | 8 +++----- core/services/relay/evm/config_poller_test.go | 13 +++++++------ core/services/relay/evm/evm.go | 14 +++++++++----- core/services/relay/evm/llo_config_provider.go | 6 ++++-- core/services/relay/evm/ocr2keeper.go | 5 +++-- core/services/relay/evm/ocr2vrf.go | 7 ++++--- .../services/relay/evm/standard_config_provider.go | 8 +++++--- 7 files changed, 35 insertions(+), 26 deletions(-) diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go index 63977295248..2280d60d7ee 100644 --- a/core/services/relay/evm/config_poller.go +++ b/core/services/relay/evm/config_poller.go @@ -70,13 +70,11 @@ type CPConfig struct { LogDecoder LogDecoder } -func NewConfigPoller(lggr logger.Logger, cfg CPConfig) (evmRelayTypes.ConfigPoller, error) { - return newConfigPoller(lggr, cfg.Client, cfg.DestinationChainPoller, cfg.AggregatorContractAddress, cfg.ConfigStoreAddress, cfg.LogDecoder) +func NewConfigPoller(ctx context.Context, lggr logger.Logger, cfg CPConfig) (evmRelayTypes.ConfigPoller, error) { + return newConfigPoller(ctx, lggr, cfg.Client, cfg.DestinationChainPoller, cfg.AggregatorContractAddress, cfg.ConfigStoreAddress, cfg.LogDecoder) } -func newConfigPoller(lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address, ld LogDecoder) (*configPoller, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +func newConfigPoller(ctx context.Context, lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address, ld LogDecoder) (*configPoller, error) { err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: configPollerFilterName(aggregatorContractAddr), EventSigs: []common.Hash{ld.EventSig()}, Addresses: []common.Address{aggregatorContractAddr}}) if err != nil { return nil, err diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 70be2c0367b..4778c983c9c 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -55,6 +55,7 @@ func TestConfigPoller(t *testing.T) { var b *backends.SimulatedBackend var linkTokenAddress common.Address var accessAddress common.Address + ctx := testutils.Context(t) ld := OCR2AggregatorLogDecoder @@ -103,7 +104,7 @@ func TestConfigPoller(t *testing.T) { } t.Run("LatestConfig errors if there is no config in logs and config store is unconfigured", func(t *testing.T) { - cp, err := NewConfigPoller(lggr, CPConfig{ethClient, lp, ocrAddress, nil, ld}) + cp, err := NewConfigPoller(ctx, lggr, CPConfig{ethClient, lp, ocrAddress, nil, ld}) require.NoError(t, err) _, err = cp.LatestConfig(testutils.Context(t), 0) @@ -112,7 +113,7 @@ func TestConfigPoller(t *testing.T) { }) t.Run("happy path (with config store)", func(t *testing.T) { - cp, err := NewConfigPoller(lggr, CPConfig{ethClient, lp, ocrAddress, &configStoreContractAddr, ld}) + cp, err := NewConfigPoller(ctx, lggr, CPConfig{ethClient, lp, ocrAddress, &configStoreContractAddr, ld}) require.NoError(t, err) // Should have no config to begin with. _, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) @@ -183,7 +184,7 @@ func TestConfigPoller(t *testing.T) { mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) t.Run("if callLatestConfigDetails succeeds", func(t *testing.T) { - cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) + cp, err := newConfigPoller(ctx, lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) require.NoError(t, err) t.Run("when config has not been set, returns zero values", func(t *testing.T) { @@ -220,7 +221,7 @@ func TestConfigPoller(t *testing.T) { failingClient := new(evmClientMocks.Client) failingClient.On("ConfiguredChainID").Return(big.NewInt(42)) failingClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("something exploded")) - cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) + cp, err := newConfigPoller(ctx, lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) require.NoError(t, err) cp.configStoreContractAddr = &configStoreContractAddr @@ -259,7 +260,7 @@ func TestConfigPoller(t *testing.T) { mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) t.Run("if callReadConfig succeeds", func(t *testing.T) { - cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) + cp, err := newConfigPoller(ctx, lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) require.NoError(t, err) t.Run("when config has not been set, returns error", func(t *testing.T) { @@ -321,7 +322,7 @@ func TestConfigPoller(t *testing.T) { // initial call to retrieve config store address from aggregator return *callArgs.To == ocrAddress }), mock.Anything).Return(nil, errors.New("something exploded")).Once() - cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) + cp, err := newConfigPoller(ctx, lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) require.NoError(t, err) _, err = cp.LatestConfig(testutils.Context(t), 0) diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index aab5bdc5fde..c65c3fc6638 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -166,7 +166,7 @@ func (r *Relayer) NewPluginProvider(rargs commontypes.RelayArgs, pargs commontyp lggr := r.lggr.Named("PluginProvider").Named(rargs.ExternalJobID.String()) - configWatcher, err := newStandardConfigProvider(r.lggr, r.chain, types.NewRelayOpts(rargs)) + configWatcher, err := newStandardConfigProvider(ctx, r.lggr, r.chain, types.NewRelayOpts(rargs)) if err != nil { return nil, err } @@ -250,6 +250,10 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty } func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) { + + // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + ctx := context.Background() + relayOpts := types.NewRelayOpts(rargs) var relayConfig types.RelayConfig { @@ -271,7 +275,7 @@ func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes. if relayConfig.ChainID.String() != r.chain.ID().String() { return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) } - cp, err := newLLOConfigProvider(r.lggr, r.chain, relayOpts) + cp, err := newLLOConfigProvider(ctx, r.lggr, r.chain, relayOpts) if err != nil { return nil, pkgerrors.WithStack(err) } @@ -344,11 +348,11 @@ func (r *Relayer) NewConfigProvider(args commontypes.RelayArgs) (configProvider switch args.ProviderType { case "median": - configProvider, err = newStandardConfigProvider(lggr, r.chain, relayOpts) + configProvider, err = newStandardConfigProvider(ctx, lggr, r.chain, relayOpts) case "mercury": configProvider, err = newMercuryConfigProvider(ctx, lggr, r.chain, relayOpts) case "llo": - configProvider, err = newLLOConfigProvider(lggr, r.chain, relayOpts) + configProvider, err = newLLOConfigProvider(ctx, lggr, r.chain, relayOpts) default: return nil, fmt.Errorf("unrecognized provider type: %q", args.ProviderType) } @@ -563,7 +567,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp } contractID := common.HexToAddress(relayOpts.ContractID) - configWatcher, err := newStandardConfigProvider(lggr, r.chain, relayOpts) + configWatcher, err := newStandardConfigProvider(ctx, lggr, r.chain, relayOpts) if err != nil { return nil, err } diff --git a/core/services/relay/evm/llo_config_provider.go b/core/services/relay/evm/llo_config_provider.go index bd8dbac8460..6efd0ccada2 100644 --- a/core/services/relay/evm/llo_config_provider.go +++ b/core/services/relay/evm/llo_config_provider.go @@ -1,6 +1,8 @@ package evm import ( + "context" + "github.com/ethereum/go-ethereum/common" pkgerrors "github.com/pkg/errors" @@ -10,12 +12,12 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) -func newLLOConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { +func newLLOConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { if !common.IsHexAddress(opts.ContractID) { return nil, pkgerrors.Errorf("invalid contractID, expected hex address") } aggregatorAddress := common.HexToAddress(opts.ContractID) configDigester := llo.NewOffchainConfigDigester(chain.Config().EVM().ChainID(), aggregatorAddress) - return newContractConfigProvider(lggr, chain, opts, aggregatorAddress, ChannelVerifierLogDecoder, configDigester) + return newContractConfigProvider(ctx, lggr, chain, opts, aggregatorAddress, ChannelVerifierLogDecoder, configDigester) } diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go index e1eedd30fd4..b39d970f204 100644 --- a/core/services/relay/evm/ocr2keeper.go +++ b/core/services/relay/evm/ocr2keeper.go @@ -88,7 +88,7 @@ func (r *ocr2keeperRelayer) NewOCR2KeeperProvider(rargs commontypes.RelayArgs, p // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() - cfgWatcher, err := newOCR2KeeperConfigProvider(r.lggr, r.chain, rargs) + cfgWatcher, err := newOCR2KeeperConfigProvider(ctx, r.lggr, r.chain, rargs) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (c *ocr2keeperProvider) Codec() commontypes.Codec { return nil } -func newOCR2KeeperConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { +func newOCR2KeeperConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { var relayConfig types.RelayConfig err := json.Unmarshal(rargs.RelayConfig, &relayConfig) if err != nil { @@ -221,6 +221,7 @@ func newOCR2KeeperConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rarg contractAddress := common.HexToAddress(rargs.ContractID) configPoller, err := NewConfigPoller( + ctx, lggr.With("contractID", rargs.ContractID), CPConfig{ chain.Client(), diff --git a/core/services/relay/evm/ocr2vrf.go b/core/services/relay/evm/ocr2vrf.go index 98753655550..07edd1c5ac6 100644 --- a/core/services/relay/evm/ocr2vrf.go +++ b/core/services/relay/evm/ocr2vrf.go @@ -64,7 +64,7 @@ func (r *ocr2vrfRelayer) NewDKGProvider(rargs commontypes.RelayArgs, pargs commo // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() - configWatcher, err := newOCR2VRFConfigProvider(r.lggr, r.chain, rargs) + configWatcher, err := newOCR2VRFConfigProvider(ctx, r.lggr, r.chain, rargs) if err != nil { return nil, err } @@ -91,7 +91,7 @@ func (r *ocr2vrfRelayer) NewOCR2VRFProvider(rargs commontypes.RelayArgs, pargs c // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() - configWatcher, err := newOCR2VRFConfigProvider(r.lggr, r.chain, rargs) + configWatcher, err := newOCR2VRFConfigProvider(ctx, r.lggr, r.chain, rargs) if err != nil { return nil, err } @@ -140,7 +140,7 @@ func (c *ocr2vrfProvider) Codec() commontypes.Codec { return nil } -func newOCR2VRFConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { +func newOCR2VRFConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { var relayConfig types.RelayConfig err := json.Unmarshal(rargs.RelayConfig, &relayConfig) if err != nil { @@ -152,6 +152,7 @@ func newOCR2VRFConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rargs c contractAddress := common.HexToAddress(rargs.ContractID) configPoller, err := NewConfigPoller( + ctx, lggr.With("contractID", rargs.ContractID), CPConfig{ chain.Client(), diff --git a/core/services/relay/evm/standard_config_provider.go b/core/services/relay/evm/standard_config_provider.go index 0de48240b7d..59f91c52f4a 100644 --- a/core/services/relay/evm/standard_config_provider.go +++ b/core/services/relay/evm/standard_config_provider.go @@ -1,6 +1,7 @@ package evm import ( + "context" "errors" "fmt" @@ -14,7 +15,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types" ) -func newStandardConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { +func newStandardConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { if !common.IsHexAddress(opts.ContractID) { return nil, errors.New("invalid contractID, expected hex address") } @@ -24,10 +25,10 @@ func newStandardConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts * ChainID: chain.Config().EVM().ChainID().Uint64(), ContractAddress: aggregatorAddress, } - return newContractConfigProvider(lggr, chain, opts, aggregatorAddress, OCR2AggregatorLogDecoder, offchainConfigDigester) + return newContractConfigProvider(ctx, lggr, chain, opts, aggregatorAddress, OCR2AggregatorLogDecoder, offchainConfigDigester) } -func newContractConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts, aggregatorAddress common.Address, ld LogDecoder, digester ocrtypes.OffchainConfigDigester) (*configWatcher, error) { +func newContractConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts, aggregatorAddress common.Address, ld LogDecoder, digester ocrtypes.OffchainConfigDigester) (*configWatcher, error) { var cp types.ConfigPoller relayConfig, err := opts.RelayConfig() @@ -35,6 +36,7 @@ func newContractConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts * return nil, fmt.Errorf("failed to get relay config: %w", err) } cp, err = NewConfigPoller( + ctx, lggr, CPConfig{ chain.Client(), From 942f6874eea8d330e2e6027dc4b937b052b70210 Mon Sep 17 00:00:00 2001 From: Jordan Krage Date: Wed, 13 Mar 2024 09:32:03 -0500 Subject: [PATCH 62/65] core/services/chainlink: start using sqlutil.DB instead of pg.Q (#12386) --- .../evm/forwarders/forwarder_manager.go | 5 +- .../evm/forwarders/forwarder_manager_test.go | 2 +- core/chains/evm/forwarders/mocks/orm.go | 4 +- core/chains/evm/forwarders/orm.go | 10 +-- core/chains/evm/forwarders/orm_test.go | 4 +- core/chains/evm/headtracker/orm.go | 4 +- core/chains/evm/logpoller/observability.go | 4 +- core/chains/evm/logpoller/orm.go | 13 ++-- core/chains/evm/txmgr/builder.go | 6 +- core/chains/evm/txmgr/txmgr_test.go | 1 + core/chains/legacyevm/chain.go | 16 +++-- core/chains/legacyevm/chain_test.go | 1 + core/chains/legacyevm/evm_txm.go | 5 +- core/cmd/ocr2vrf_configure_commands.go | 4 +- core/cmd/shell.go | 19 +++--- core/cmd/shell_local_test.go | 2 + core/internal/cltest/cltest.go | 2 + core/internal/cltest/factories.go | 3 +- core/internal/features/features_test.go | 2 +- .../features/ocr2/features_ocr2_test.go | 2 +- core/internal/mocks/application.go | 22 +++++++ core/internal/testutils/evmtest/evmtest.go | 1 + core/scripts/go.mod | 4 +- core/scripts/go.sum | 10 +-- core/services/chainlink/application.go | 61 +++++++++++-------- .../relayer_chain_interoperators_test.go | 2 + core/services/chainlink/relayer_factory.go | 2 +- .../keeper/registry1_1_synchronizer_test.go | 15 ++--- .../keeper/registry1_2_synchronizer_test.go | 24 +++----- .../keeper/registry1_3_synchronizer_test.go | 33 ++++------ .../plugins/ocr2keeper/integration_test.go | 2 +- .../internal/ocr2vrf_integration_test.go | 2 +- core/services/pg/q.go | 17 ++---- core/services/pg/sqlx.go | 1 + .../promreporter/prom_reporter_test.go | 1 + core/services/vrf/delegate_test.go | 2 +- core/services/webhook/authorizer.go | 10 +-- core/services/webhook/authorizer_test.go | 6 +- core/web/evm_forwarders_controller.go | 8 +-- core/web/pipeline_runs_controller.go | 2 +- go.mod | 6 +- go.sum | 10 +-- integration-tests/go.mod | 6 +- integration-tests/go.sum | 10 +-- integration-tests/load/go.mod | 6 +- integration-tests/load/go.sum | 10 +-- .../universal/log_poller/helpers.go | 12 ++-- 47 files changed, 214 insertions(+), 180 deletions(-) diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index e48913d6bea..491b144a338 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -10,10 +10,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" pkgerrors "github.com/pkg/errors" - "github.com/jmoiron/sqlx" - "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-common/pkg/utils" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" @@ -55,7 +54,7 @@ type FwdMgr struct { wg sync.WaitGroup } -func NewFwdMgr(db *sqlx.DB, client evmclient.Client, logpoller evmlogpoller.LogPoller, l logger.Logger, cfg Config) *FwdMgr { +func NewFwdMgr(db sqlutil.DB, client evmclient.Client, logpoller evmlogpoller.LogPoller, l logger.Logger, cfg Config) *FwdMgr { lggr := logger.Sugared(logger.Named(l, "EVMForwarderManager")) fwdMgr := FwdMgr{ logger: lggr, diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index 89743ad0432..6752b75eaf3 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -89,7 +89,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { require.NoError(t, err) cleanupCalled := false - cleanup := func(tx sqlutil.Queryer, evmChainId int64, addr common.Address) error { + cleanup := func(tx sqlutil.DB, evmChainId int64, addr common.Address) error { require.Equal(t, testutils.FixtureChainID.Int64(), evmChainId) require.Equal(t, forwarderAddr, addr) require.NotNil(t, tx) diff --git a/core/chains/evm/forwarders/mocks/orm.go b/core/chains/evm/forwarders/mocks/orm.go index 5786a1cd277..babde57611f 100644 --- a/core/chains/evm/forwarders/mocks/orm.go +++ b/core/chains/evm/forwarders/mocks/orm.go @@ -49,7 +49,7 @@ func (_m *ORM) CreateForwarder(ctx context.Context, addr common.Address, evmChai } // DeleteForwarder provides a mock function with given fields: ctx, id, cleanup -func (_m *ORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(sqlutil.Queryer, int64, common.Address) error) error { +func (_m *ORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(sqlutil.DB, int64, common.Address) error) error { ret := _m.Called(ctx, id, cleanup) if len(ret) == 0 { @@ -57,7 +57,7 @@ func (_m *ORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(sqlut } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, int64, func(sqlutil.Queryer, int64, common.Address) error) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, int64, func(sqlutil.DB, int64, common.Address) error) error); ok { r0 = rf(ctx, id, cleanup) } else { r0 = ret.Error(0) diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go index 8f40dd4e396..dc50cd4dfb8 100644 --- a/core/chains/evm/forwarders/orm.go +++ b/core/chains/evm/forwarders/orm.go @@ -19,17 +19,17 @@ type ORM interface { CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) FindForwarders(ctx context.Context, offset, limit int) ([]Forwarder, int, error) FindForwardersByChain(ctx context.Context, evmChainId big.Big) ([]Forwarder, error) - DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainId int64, addr common.Address) error) error + DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.DB, evmChainId int64, addr common.Address) error) error FindForwardersInListByChain(ctx context.Context, evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) } type DbORM struct { - db sqlutil.Queryer + db sqlutil.DB } var _ ORM = &DbORM{} -func NewORM(db sqlutil.Queryer) *DbORM { +func NewORM(db sqlutil.DB) *DbORM { return &DbORM{db: db} } @@ -38,7 +38,7 @@ func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err err } // new returns a NewORM like o, but backed by q. -func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(q) } +func (o *DbORM) new(q sqlutil.DB) *DbORM { return NewORM(q) } // CreateForwarder creates the Forwarder address associated with the current EVM chain id. func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { @@ -50,7 +50,7 @@ func (o *DbORM) CreateForwarder(ctx context.Context, addr common.Address, evmCha // DeleteForwarder removes a forwarder address. // If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically // on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. -func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error) (err error) { +func (o *DbORM) DeleteForwarder(ctx context.Context, id int64, cleanup func(tx sqlutil.DB, evmChainID int64, addr common.Address) error) (err error) { return o.Transaction(ctx, func(orm *DbORM) error { var dest struct { EvmChainId int64 diff --git a/core/chains/evm/forwarders/orm_test.go b/core/chains/evm/forwarders/orm_test.go index a662be80cf3..e54fe8bf925 100644 --- a/core/chains/evm/forwarders/orm_test.go +++ b/core/chains/evm/forwarders/orm_test.go @@ -18,7 +18,7 @@ import ( type TestORM struct { ORM - db sqlutil.Queryer + db sqlutil.DB } func setupORM(t *testing.T) *TestORM { @@ -54,7 +54,7 @@ func Test_DeleteForwarder(t *testing.T) { rets := []error{ErrCleaningUp, nil, nil, ErrCleaningUp} expected := []error{ErrCleaningUp, nil, sql.ErrNoRows, sql.ErrNoRows} - testCleanupFn := func(q sqlutil.Queryer, evmChainID int64, addr common.Address) error { + testCleanupFn := func(q sqlutil.DB, evmChainID int64, addr common.Address) error { require.Less(t, cleanupCalled, len(rets)) cleanupCalled++ return rets[cleanupCalled-1] diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go index d3834b932ca..99601cf0e5c 100644 --- a/core/chains/evm/headtracker/orm.go +++ b/core/chains/evm/headtracker/orm.go @@ -31,11 +31,11 @@ var _ ORM = &DbORM{} type DbORM struct { chainID ubig.Big - db sqlutil.Queryer + db sqlutil.DB } // NewORM creates an ORM scoped to chainID. -func NewORM(chainID big.Int, db sqlutil.Queryer) *DbORM { +func NewORM(chainID big.Int, db sqlutil.DB) *DbORM { return &DbORM{ chainID: ubig.Big(chainID), db: db, diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go index 7d864e1374d..c3e162d260b 100644 --- a/core/chains/evm/logpoller/observability.go +++ b/core/chains/evm/logpoller/observability.go @@ -6,11 +6,11 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/jmoiron/sqlx" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" ) type queryType string @@ -76,7 +76,7 @@ type ObservedORM struct { // NewObservedORM creates an observed version of log poller's ORM created by NewORM // Please see ObservedLogPoller for more details on how latencies are measured -func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger) *ObservedORM { +func NewObservedORM(chainID *big.Int, db sqlutil.DB, lggr logger.Logger) *ObservedORM { return &ObservedORM{ ORM: NewORM(chainID, db, lggr), queryDuration: lpQueryDuration, diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index d1011040a73..07935fc593b 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -9,7 +9,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/jmoiron/sqlx" pkgerrors "github.com/pkg/errors" "github.com/smartcontractkit/chainlink-common/pkg/logger" @@ -62,14 +61,14 @@ type ORM interface { type DbORM struct { chainID *big.Int - db sqlutil.Queryer + db sqlutil.DB lggr logger.Logger } var _ ORM = &DbORM{} // NewORM creates an DbORM scoped to chainID. -func NewORM(chainID *big.Int, db sqlutil.Queryer, lggr logger.Logger) *DbORM { +func NewORM(chainID *big.Int, db sqlutil.DB, lggr logger.Logger) *DbORM { return &DbORM{ chainID: chainID, db: db, @@ -82,7 +81,7 @@ func (o *DbORM) Transaction(ctx context.Context, fn func(*DbORM) error) (err err } // new returns a NewORM like o, but backed by q. -func (o *DbORM) new(q sqlutil.Queryer) *DbORM { return NewORM(o.chainID, q, o.lggr) } +func (o *DbORM) new(q sqlutil.DB) *DbORM { return NewORM(o.chainID, q, o.lggr) } // InsertBlock is idempotent to support replays. func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64) error { @@ -333,7 +332,7 @@ func (o *DbORM) InsertLogs(ctx context.Context, logs []Log) error { return err } return o.Transaction(ctx, func(orm *DbORM) error { - return orm.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + return orm.insertLogsWithinTx(ctx, logs, orm.db) }) } @@ -353,11 +352,11 @@ func (o *DbORM) InsertLogsWithBlock(ctx context.Context, logs []Log, block LogPo if err != nil { return err } - return orm.insertLogsWithinTx(ctx, logs, orm.db.(*sqlx.Tx)) + return orm.insertLogsWithinTx(ctx, logs, orm.db) }) } -func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Queryer) error { +func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.DB) error { batchInsertSize := 4000 for i := 0; i < len(logs); i += batchInsertSize { start, end := i, i+batchInsertSize diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go index d6b5a59e7de..d4420302598 100644 --- a/core/chains/evm/txmgr/builder.go +++ b/core/chains/evm/txmgr/builder.go @@ -7,6 +7,7 @@ import ( "github.com/jmoiron/sqlx" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink/v2/common/txmgr" txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" @@ -20,7 +21,8 @@ import ( // NewTxm constructs the necessary dependencies for the EvmTxm (broadcaster, confirmer, etc) and returns a new EvmTxManager func NewTxm( - db *sqlx.DB, + sqlxDB *sqlx.DB, + db sqlutil.DB, chainConfig ChainConfig, fCfg FeeConfig, txConfig config.Transactions, @@ -44,7 +46,7 @@ func NewTxm( checker := &CheckerFactory{Client: client} // create tx attempt builder txAttemptBuilder := NewEvmTxAttemptBuilder(*client.ConfiguredChainID(), fCfg, keyStore, estimator) - txStore := NewTxStore(db, lggr, dbConfig) + txStore := NewTxStore(sqlxDB, lggr, dbConfig) txNonceSyncer := NewNonceSyncer(txStore, lggr, client) txmCfg := NewEvmTxmConfig(chainConfig) // wrap Evm specific config diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 57fb3186171..332031bc776 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -69,6 +69,7 @@ func makeTestEvmTxm( ) return txmgr.NewTxm( + db, db, ccfg, fcfg, diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index a8ea2f6686c..27c49a1afdc 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -15,6 +15,7 @@ import ( common "github.com/smartcontractkit/chainlink-common/pkg/chains" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" @@ -166,7 +167,8 @@ type ChainOpts struct { MailMon *mailbox.Monitor GasEstimator gas.EvmFeeEstimator - *sqlx.DB + SqlxDB *sqlx.DB // Deprecated: use DB instead + DB sqlutil.DB // TODO BCF-2513 remove test code from the API // Gen-functions are useful for dependency injection by tests @@ -187,6 +189,9 @@ func (o ChainOpts) Validate() error { if o.MailMon == nil { err = errors.Join(err, errors.New("nil MailMon")) } + if o.SqlxDB == nil { + err = errors.Join(err, errors.New("nil SqlxDB")) + } if o.DB == nil { err = errors.Join(err, errors.New("nil DB")) } @@ -223,14 +228,13 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod client = opts.GenEthClient(chainID) } - db := opts.DB headBroadcaster := headtracker.NewHeadBroadcaster(l) headSaver := headtracker.NullSaver var headTracker httypes.HeadTracker if !cfg.EVMRPCEnabled() { headTracker = headtracker.NullTracker } else if opts.GenHeadTracker == nil { - orm := headtracker.NewORM(*chainID, db) + orm := headtracker.NewORM(*chainID, opts.DB) headSaver = headtracker.NewHeadSaver(l, orm, cfg.EVM(), cfg.EVM().HeadTracker()) headTracker = headtracker.NewHeadTracker(l, client, cfg.EVM(), cfg.EVM().HeadTracker(), headBroadcaster, headSaver, opts.MailMon) } else { @@ -252,12 +256,12 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod LogPrunePageSize: int64(cfg.EVM().LogPrunePageSize()), BackupPollerBlockDelay: int64(cfg.EVM().BackupLogPollerBlockDelay()), } - logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, db, l), client, l, lpOpts) + logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, opts.DB, l), client, l, lpOpts) } } // note: gas estimator is started as a part of the txm - txm, gasEstimator, err := newEvmTxm(db, cfg.EVM(), cfg.EVMRPCEnabled(), cfg.Database(), cfg.Database().Listener(), client, l, logPoller, opts) + txm, gasEstimator, err := newEvmTxm(opts.SqlxDB, opts.DB, cfg.EVM(), cfg.EVMRPCEnabled(), cfg.Database(), cfg.Database().Listener(), client, l, logPoller, opts) if err != nil { return nil, fmt.Errorf("failed to instantiate EvmTxm for chain with ID %s: %w", chainID.String(), err) } @@ -280,7 +284,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod if !cfg.EVMRPCEnabled() { logBroadcaster = &log.NullBroadcaster{ErrMsg: fmt.Sprintf("Ethereum is disabled for chain %d", chainID)} } else if opts.GenLogBroadcaster == nil { - logORM := log.NewORM(db, l, cfg.Database(), *chainID) + logORM := log.NewORM(opts.SqlxDB, l, cfg.Database(), *chainID) logBroadcaster = log.NewBroadcaster(logORM, client, cfg.EVM(), l, highestSeenHead, opts.MailMon) } else { logBroadcaster = opts.GenLogBroadcaster(chainID) diff --git a/core/chains/legacyevm/chain_test.go b/core/chains/legacyevm/chain_test.go index e639db6e7cc..5dd7eb1c6ed 100644 --- a/core/chains/legacyevm/chain_test.go +++ b/core/chains/legacyevm/chain_test.go @@ -65,6 +65,7 @@ func TestChainOpts_Validate(t *testing.T) { o := legacyevm.ChainOpts{ AppConfig: tt.fields.AppConfig, MailMon: tt.fields.MailMon, + SqlxDB: tt.fields.DB, DB: tt.fields.DB, } if err := o.Validate(); (err != nil) != tt.wantErr { diff --git a/core/chains/legacyevm/evm_txm.go b/core/chains/legacyevm/evm_txm.go index 1606ea1b244..4ef515759f2 100644 --- a/core/chains/legacyevm/evm_txm.go +++ b/core/chains/legacyevm/evm_txm.go @@ -5,6 +5,7 @@ import ( "github.com/jmoiron/sqlx" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" @@ -14,7 +15,8 @@ import ( ) func newEvmTxm( - db *sqlx.DB, + sqlxDB *sqlx.DB, + db sqlutil.DB, cfg evmconfig.EVM, evmRPCEnabled bool, databaseConfig txmgr.DatabaseConfig, @@ -51,6 +53,7 @@ func newEvmTxm( if opts.GenTxManager == nil { txm, err = txmgr.NewTxm( + sqlxDB, db, cfg, txmgr.NewEvmTxmFeeConfig(cfg.GasEstimator()), diff --git a/core/cmd/ocr2vrf_configure_commands.go b/core/cmd/ocr2vrf_configure_commands.go index 8e3026bfc95..1f9e3f0bc98 100644 --- a/core/cmd/ocr2vrf_configure_commands.go +++ b/core/cmd/ocr2vrf_configure_commands.go @@ -209,7 +209,7 @@ func (s *Shell) ConfigureOCR2VRFNode(c *cli.Context, owner *bind.TransactOpts, e if err != nil { return nil, err } - err = s.authorizeForwarder(c, ldb.DB(), lggr, chainID, ec, owner, sendingKeysAddresses) + err = s.authorizeForwarder(c, ldb.DB(), chainID, ec, owner, sendingKeysAddresses) if err != nil { return nil, err } @@ -319,7 +319,7 @@ func (s *Shell) appendForwarders(ctx context.Context, chainID int64, ks keystore return sendingKeys, sendingKeysAddresses, nil } -func (s *Shell) authorizeForwarder(c *cli.Context, db *sqlx.DB, lggr logger.Logger, chainID int64, ec *ethclient.Client, owner *bind.TransactOpts, sendingKeysAddresses []common.Address) error { +func (s *Shell) authorizeForwarder(c *cli.Context, db *sqlx.DB, chainID int64, ec *ethclient.Client, owner *bind.TransactOpts, sendingKeysAddresses []common.Address) error { ctx := s.ctx() // Replace the transmitter ID with the forwarder address. forwarderAddress := c.String("forwarder-address") diff --git a/core/cmd/shell.go b/core/cmd/shell.go index 5ca938b1b40..0eb909623e5 100644 --- a/core/cmd/shell.go +++ b/core/cmd/shell.go @@ -33,7 +33,9 @@ import ( "github.com/jmoiron/sqlx" "github.com/smartcontractkit/chainlink-common/pkg/loop" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/build" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" @@ -143,7 +145,7 @@ type AppFactory interface { type ChainlinkAppFactory struct{} // NewApplication returns a new instance of the node with the given config. -func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.GeneralConfig, appLggr logger.Logger, db *sqlx.DB) (app chainlink.Application, err error) { +func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.GeneralConfig, appLggr logger.Logger, sqlxDB *sqlx.DB) (app chainlink.Application, err error) { err = initGlobals(cfg.Prometheus(), cfg.Tracing(), appLggr) if err != nil { appLggr.Errorf("Failed to initialize globals: %v", err) @@ -154,12 +156,14 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G return nil, err } - err = handleNodeVersioning(ctx, db, appLggr, cfg.RootDir(), cfg.Database(), cfg.WebServer().HTTPPort()) + db := sqlutil.NewWrappedDB(sqlxDB, appLggr, sqlutil.TimeoutHook(pg.DefaultQueryTimeout), sqlutil.MonitorHook(cfg.Database().LogSQL)) + + err = handleNodeVersioning(ctx, sqlxDB, appLggr, cfg.RootDir(), cfg.Database(), cfg.WebServer().HTTPPort()) if err != nil { return nil, err } - keyStore := keystore.New(db, utils.GetScryptParams(cfg), appLggr, cfg.Database()) + keyStore := keystore.New(sqlxDB, utils.GetScryptParams(cfg), appLggr, cfg.Database()) mailMon := mailbox.NewMonitor(cfg.AppID().String(), appLggr.Named("Mailbox")) loopRegistry := plugins.NewLoopRegistry(appLggr, cfg.Tracing()) @@ -180,7 +184,7 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G evmFactoryCfg := chainlink.EVMFactoryConfig{ CSAETHKeystore: keyStore, - ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DB: db}, + ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, SqlxDB: sqlxDB, DB: sqlxDB}, } // evm always enabled for backward compatibility // TODO BCF-2510 this needs to change in order to clear the path for EVM extraction @@ -190,7 +194,7 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G cosmosCfg := chainlink.CosmosFactoryConfig{ Keystore: keyStore.Cosmos(), TOMLConfigs: cfg.CosmosConfigs(), - DB: db, + DB: sqlxDB, QConfig: cfg.Database(), } initOps = append(initOps, chainlink.InitCosmos(ctx, relayerFactory, cosmosCfg)) @@ -224,10 +228,11 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G restrictedClient := clhttp.NewRestrictedHTTPClient(cfg.Database(), appLggr) unrestrictedClient := clhttp.NewUnrestrictedHTTPClient() - externalInitiatorManager := webhook.NewExternalInitiatorManager(db, unrestrictedClient, appLggr, cfg.Database()) + externalInitiatorManager := webhook.NewExternalInitiatorManager(sqlxDB, unrestrictedClient, appLggr, cfg.Database()) return chainlink.NewApplication(chainlink.ApplicationOpts{ Config: cfg, - SqlxDB: db, + SqlxDB: sqlxDB, + DB: db, KeyStore: keyStore, RelayerChainInteroperators: relayChainInterops, MailMon: mailMon, diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go index d6f4946dd9d..0dcf77d0f8e 100644 --- a/core/cmd/shell_local_test.go +++ b/core/cmd/shell_local_test.go @@ -91,6 +91,7 @@ func TestShell_RunNodeWithPasswords(t *testing.T) { ChainOpts: legacyevm.ChainOpts{ AppConfig: cfg, MailMon: &mailbox.Monitor{}, + SqlxDB: db, DB: db, }, } @@ -195,6 +196,7 @@ func TestShell_RunNodeWithAPICredentialsFile(t *testing.T) { ChainOpts: legacyevm.ChainOpts{ AppConfig: cfg, MailMon: &mailbox.Monitor{}, + SqlxDB: db, DB: db, }, } diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 08766d64c8b..9ed0d83d993 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -363,6 +363,7 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn ChainOpts: legacyevm.ChainOpts{ AppConfig: cfg, MailMon: mailMon, + SqlxDB: db, DB: db, }, CSAETHKeystore: keyStore, @@ -418,6 +419,7 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn Config: cfg, MailMon: mailMon, SqlxDB: db, + DB: db, KeyStore: keyStore, RelayerChainInteroperators: relayChainInterops, Logger: lggr, diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index 306d7468c39..44626b4f3b8 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -23,6 +23,7 @@ import ( "github.com/jmoiron/sqlx" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr" txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types" "github.com/smartcontractkit/chainlink/v2/core/auth" @@ -314,7 +315,7 @@ func MustGenerateRandomKeyState(_ testing.TB) ethkey.State { return ethkey.State{Address: NewEIP55Address()} } -func MustInsertHead(t *testing.T, db *sqlx.DB, cfg pg.QConfig, number int64) evmtypes.Head { +func MustInsertHead(t *testing.T, db sqlutil.DB, number int64) evmtypes.Head { h := evmtypes.NewHead(big.NewInt(number), evmutils.NewHash(), evmutils.NewHash(), 0, ubig.New(&FixtureChainID)) horm := headtracker.NewORM(FixtureChainID, db) diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go index b08bf943574..9c6d9cb8b62 100644 --- a/core/internal/features/features_test.go +++ b/core/internal/features/features_test.go @@ -774,7 +774,7 @@ func setupForwarderEnabledNode(t *testing.T, owner *bind.TransactOpts, portV2 in b.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB()) + forwarderORM := forwarders.NewORM(app.GetDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) _, err = forwarderORM.CreateForwarder(testutils.Context(t), forwarder, chainID) require.NoError(t, err) diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go index 5bfa1812a40..ce0f3087187 100644 --- a/core/internal/features/ocr2/features_ocr2_test.go +++ b/core/internal/features/ocr2/features_ocr2_test.go @@ -171,7 +171,7 @@ func setupNodeOCR2( b.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB()) + forwarderORM := forwarders.NewORM(app.GetDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) _, err2 = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err2) diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go index 20874e4b60e..cb0415e9203 100644 --- a/core/internal/mocks/application.go +++ b/core/internal/mocks/application.go @@ -31,6 +31,8 @@ import ( sessions "github.com/smartcontractkit/chainlink/v2/core/sessions" + sqlutil "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + sqlx "github.com/jmoiron/sqlx" txmgr "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" @@ -205,6 +207,26 @@ func (_m *Application) GetConfig() chainlink.GeneralConfig { return r0 } +// GetDB provides a mock function with given fields: +func (_m *Application) GetDB() sqlutil.DB { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetDB") + } + + var r0 sqlutil.DB + if rf, ok := ret.Get(0).(func() sqlutil.DB); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sqlutil.DB) + } + } + + return r0 +} + // GetExternalInitiatorManager provides a mock function with given fields: func (_m *Application) GetExternalInitiatorManager() webhook.ExternalInitiatorManager { ret := _m.Called() diff --git a/core/internal/testutils/evmtest/evmtest.go b/core/internal/testutils/evmtest/evmtest.go index cc56c3c9e9b..83c356bf1a3 100644 --- a/core/internal/testutils/evmtest/evmtest.go +++ b/core/internal/testutils/evmtest/evmtest.go @@ -89,6 +89,7 @@ func NewChainRelayExtOpts(t testing.TB, testopts TestChainOpts) legacyevm.ChainR AppConfig: testopts.GeneralConfig, MailMon: testopts.MailMon, GasEstimator: testopts.GasEstimator, + SqlxDB: testopts.DB, DB: testopts.DB, }, } diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 9b7952c10a5..e068e5d5cef 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -21,13 +21,13 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 + github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20240229181116-bfb2432a7a66 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/umbracle/ethgo v0.1.3 github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 github.com/urfave/cli v1.22.14 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 077f4538d01..9f923016fa9 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1174,8 +1174,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 h1:3AspKDXioDI0ROiFby3bcgWdRaDh3OYa8mPsud0HjHg= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958/go.mod h1:/bJGelrpXvCcDCuaIgt91UN4B9YxZdK1O7VX5lzbysI= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= @@ -1233,8 +1233,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1246,8 +1247,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index d95458838bc..beaa532c73c 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -20,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/loop" commonservices "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" "github.com/smartcontractkit/chainlink/v2/core/capabilities" @@ -74,7 +75,8 @@ type Application interface { GetLogger() logger.SugaredLogger GetAuditLogger() audit.AuditLogger GetHealthChecker() services.Checker - GetSqlxDB() *sqlx.DB + GetSqlxDB() *sqlx.DB // Deprecated: use GetDB + GetDB() sqlutil.DB GetConfig() GeneralConfig SetLogLevel(lvl zapcore.Level) error GetKeyStore() keystore.Master @@ -140,7 +142,8 @@ type ChainlinkApplication struct { logger logger.SugaredLogger AuditLogger audit.AuditLogger closeLogger func() error - sqlxDB *sqlx.DB + sqlxDB *sqlx.DB // Deprecated: use db instead + db sqlutil.DB secretGenerator SecretGenerator profiler *pyroscope.Profiler loopRegistry *plugins.LoopRegistry @@ -153,7 +156,8 @@ type ApplicationOpts struct { Config GeneralConfig Logger logger.Logger MailMon *mailbox.Monitor - SqlxDB *sqlx.DB + SqlxDB *sqlx.DB // Deprecated: use DB instead + DB sqlutil.DB KeyStore keystore.Master RelayerChainInteroperators *CoreRelayerChainInteroperators AuditLogger audit.AuditLogger @@ -176,7 +180,7 @@ type ApplicationOpts struct { func NewApplication(opts ApplicationOpts) (Application, error) { var srvcs []services.ServiceCtx auditLogger := opts.AuditLogger - db := opts.SqlxDB + sqlxDB := opts.SqlxDB cfg := opts.Config relayerChainInterops := opts.RelayerChainInteroperators mailMon := opts.MailMon @@ -257,12 +261,12 @@ func NewApplication(opts ApplicationOpts) (Application, error) { srvcs = append(srvcs, mailMon) srvcs = append(srvcs, relayerChainInterops.Services()...) - promReporter := promreporter.NewPromReporter(db.DB, legacyEVMChains, globalLogger) + promReporter := promreporter.NewPromReporter(sqlxDB.DB, legacyEVMChains, globalLogger) srvcs = append(srvcs, promReporter) // Initialize Local Users ORM and Authentication Provider specified in config // BasicAdminUsersORM is initialized and required regardless of separate Authentication Provider - localAdminUsersORM := localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) + localAdminUsersORM := localauth.NewORM(sqlxDB, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) // Initialize Sessions ORM based on environment configured authenticator // localDB auth or remote LDAP auth @@ -274,26 +278,26 @@ func NewApplication(opts ApplicationOpts) (Application, error) { case sessions.LDAPAuth: var err error authenticationProvider, err = ldapauth.NewLDAPAuthenticator( - db, cfg.Database(), cfg.WebServer().LDAP(), cfg.Insecure().DevWebServer(), globalLogger, auditLogger, + sqlxDB, cfg.Database(), cfg.WebServer().LDAP(), cfg.Insecure().DevWebServer(), globalLogger, auditLogger, ) if err != nil { return nil, errors.Wrap(err, "NewApplication: failed to initialize LDAP Authentication module") } - sessionReaper = ldapauth.NewLDAPServerStateSync(db, cfg.Database(), cfg.WebServer().LDAP(), globalLogger) + sessionReaper = ldapauth.NewLDAPServerStateSync(sqlxDB, cfg.Database(), cfg.WebServer().LDAP(), globalLogger) case sessions.LocalAuth: - authenticationProvider = localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) - sessionReaper = localauth.NewSessionReaper(db.DB, cfg.WebServer(), globalLogger) + authenticationProvider = localauth.NewORM(sqlxDB, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) + sessionReaper = localauth.NewSessionReaper(sqlxDB.DB, cfg.WebServer(), globalLogger) default: return nil, errors.Errorf("NewApplication: Unexpected 'AuthenticationMethod': %s supported values: %s, %s", authMethod, sessions.LocalAuth, sessions.LDAPAuth) } var ( - pipelineORM = pipeline.NewORM(db, globalLogger, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) - bridgeORM = bridges.NewORM(db, globalLogger, cfg.Database()) - mercuryORM = mercury.NewORM(db, globalLogger, cfg.Database()) + pipelineORM = pipeline.NewORM(sqlxDB, globalLogger, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgeORM = bridges.NewORM(sqlxDB, globalLogger, cfg.Database()) + mercuryORM = mercury.NewORM(sqlxDB, globalLogger, cfg.Database()) pipelineRunner = pipeline.NewRunner(pipelineORM, bridgeORM, cfg.JobPipeline(), cfg.WebServer(), legacyEVMChains, keyStore.Eth(), keyStore.VRF(), globalLogger, restrictedHTTPClient, unrestrictedHTTPClient) - jobORM = job.NewORM(db, pipelineORM, bridgeORM, keyStore, globalLogger, cfg.Database()) - txmORM = txmgr.NewTxStore(db, globalLogger, cfg.Database()) + jobORM = job.NewORM(sqlxDB, pipelineORM, bridgeORM, keyStore, globalLogger, cfg.Database()) + txmORM = txmgr.NewTxStore(sqlxDB, globalLogger, cfg.Database()) streamRegistry = streams.NewRegistry(globalLogger, pipelineRunner) ) @@ -313,14 +317,14 @@ func NewApplication(opts ApplicationOpts) (Application, error) { legacyEVMChains, mailMon), job.Keeper: keeper.NewDelegate( - db, + sqlxDB, jobORM, pipelineRunner, globalLogger, legacyEVMChains, mailMon), job.VRF: vrf.NewDelegate( - db, + sqlxDB, keyStore, pipelineRunner, pipelineORM, @@ -346,7 +350,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { job.Gateway: gateway.NewDelegate( legacyEVMChains, keyStore.Eth(), - db, + sqlxDB, cfg.Database(), globalLogger), job.Stream: streams.NewDelegate( @@ -372,7 +376,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { jobORM, pipelineORM, pipelineRunner, - db, + sqlxDB, legacyEVMChains, globalLogger, ) @@ -385,7 +389,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { if err := ocrcommon.ValidatePeerWrapperConfig(cfg.P2P()); err != nil { return nil, err } - peerWrapper = ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, globalLogger) + peerWrapper = ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), sqlxDB, globalLogger) srvcs = append(srvcs, peerWrapper) } else { return nil, fmt.Errorf("P2P stack required for OCR or OCR2") @@ -393,7 +397,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { if cfg.OCR().Enabled() { delegates[job.OffchainReporting] = ocr.NewDelegate( - db, + sqlxDB, jobORM, keyStore, pipelineRunner, @@ -412,7 +416,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { registrarConfig := plugins.NewRegistrarConfig(opts.GRPCOpts, opts.LoopRegistry.Register) ocr2DelegateConfig := ocr2.NewDelegateConfig(cfg.OCR2(), cfg.Mercury(), cfg.Threshold(), cfg.Insecure(), cfg.JobPipeline(), cfg.Database(), registrarConfig) delegates[job.OffchainReporting2] = ocr2.NewDelegate( - db, + sqlxDB, jobORM, bridgeORM, mercuryORM, @@ -432,7 +436,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { registry, ) delegates[job.Bootstrap] = ocrbootstrap.NewDelegateBootstrap( - db, + sqlxDB, jobORM, peerWrapper, globalLogger, @@ -450,7 +454,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { for _, c := range legacyEVMChains.Slice() { lbs = append(lbs, c.LogBroadcaster()) } - jobSpawner := job.NewSpawner(jobORM, cfg.Database(), healthChecker, delegates, db, globalLogger, lbs) + jobSpawner := job.NewSpawner(jobORM, cfg.Database(), healthChecker, delegates, sqlxDB, globalLogger, lbs) srvcs = append(srvcs, jobSpawner, pipelineRunner) // We start the log poller after the job spawner @@ -463,11 +467,11 @@ func NewApplication(opts ApplicationOpts) (Application, error) { var feedsService feeds.Service if cfg.Feature().FeedsManager() { - feedsORM := feeds.NewORM(db, opts.Logger, cfg.Database()) + feedsORM := feeds.NewORM(sqlxDB, opts.Logger, cfg.Database()) feedsService = feeds.NewService( feedsORM, jobORM, - db, + sqlxDB, jobSpawner, keyStore, cfg.Insecure(), @@ -518,6 +522,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { loopRegistry: loopRegistry, sqlxDB: opts.SqlxDB, + db: opts.DB, // NOTE: Can keep things clean by putting more things in srvcs instead of manually start/closing srvcs: srvcs, @@ -826,6 +831,10 @@ func (app *ChainlinkApplication) GetSqlxDB() *sqlx.DB { return app.sqlxDB } +func (app *ChainlinkApplication) GetDB() sqlutil.DB { + return app.db +} + // Returns the configuration to use for creating and authenticating // new WebAuthn credentials func (app *ChainlinkApplication) GetWebAuthnConfiguration() sessions.WebAuthnConfiguration { diff --git a/core/services/chainlink/relayer_chain_interoperators_test.go b/core/services/chainlink/relayer_chain_interoperators_test.go index ea1a9ec3746..4cb6f57f8ba 100644 --- a/core/services/chainlink/relayer_chain_interoperators_test.go +++ b/core/services/chainlink/relayer_chain_interoperators_test.go @@ -206,6 +206,7 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { ChainOpts: legacyevm.ChainOpts{ AppConfig: cfg, MailMon: &mailbox.Monitor{}, + SqlxDB: db, DB: db, }, CSAETHKeystore: keyStore, @@ -280,6 +281,7 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { AppConfig: cfg, MailMon: &mailbox.Monitor{}, + SqlxDB: db, DB: db, }, CSAETHKeystore: keyStore, diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index c42ca77dc39..f5cb1badb95 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -67,7 +67,7 @@ func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (m } relayerOpts := evmrelay.RelayerOpts{ - DB: ccOpts.DB, + DB: ccOpts.SqlxDB, QConfig: ccOpts.AppConfig.Database(), CSAETHKeystore: config.CSAETHKeystore, MercuryPool: r.MercuryPool, diff --git a/core/services/keeper/registry1_1_synchronizer_test.go b/core/services/keeper/registry1_1_synchronizer_test.go index a4f03d4d34a..e0c2ebb2b3a 100644 --- a/core/services/keeper/registry1_1_synchronizer_test.go +++ b/core/services/keeper/registry1_1_synchronizer_test.go @@ -229,8 +229,7 @@ func Test_RegistrySynchronizer1_1_ConfigSetLog(t *testing.T) { registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() registryMock.MockResponse("getConfig", newConfig).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_1.KeeperRegistryConfigSet{} logBroadcast := logmocks.NewBroadcast(t) @@ -276,8 +275,7 @@ func Test_RegistrySynchronizer1_1_KeepersUpdatedLog(t *testing.T) { registryMock.MockResponse("getConfig", registryConfig1_1).Once() registryMock.MockResponse("getKeeperList", addresses).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_1.KeeperRegistryKeepersUpdated{} logBroadcast := logmocks.NewBroadcast(t) @@ -316,8 +314,7 @@ func Test_RegistrySynchronizer1_1_UpkeepCanceledLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_1.KeeperRegistryUpkeepCanceled{Id: big.NewInt(1)} logBroadcast := logmocks.NewBroadcast(t) @@ -357,8 +354,7 @@ func Test_RegistrySynchronizer1_1_UpkeepRegisteredLog(t *testing.T) { registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, contractAddress) registryMock.MockResponse("getUpkeep", upkeepConfig1_1).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_1.KeeperRegistryUpkeepRegistered{Id: big.NewInt(1)} logBroadcast := logmocks.NewBroadcast(t) @@ -399,8 +395,7 @@ func Test_RegistrySynchronizer1_1_UpkeepPerformedLog(t *testing.T) { pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} log := registry1_1.KeeperRegistryUpkeepPerformed{Id: big.NewInt(0), From: fromAddress} logBroadcast := logmocks.NewBroadcast(t) diff --git a/core/services/keeper/registry1_2_synchronizer_test.go b/core/services/keeper/registry1_2_synchronizer_test.go index b7456ad94e4..387452dddf9 100644 --- a/core/services/keeper/registry1_2_synchronizer_test.go +++ b/core/services/keeper/registry1_2_synchronizer_test.go @@ -252,8 +252,7 @@ func Test_RegistrySynchronizer1_2_ConfigSetLog(t *testing.T) { Keepers: []common.Address{fromAddress}, }).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryConfigSet{} logBroadcast := logmocks.NewBroadcast(t) @@ -303,8 +302,7 @@ func Test_RegistrySynchronizer1_2_KeepersUpdatedLog(t *testing.T) { Keepers: addresses, }).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryKeepersUpdated{} logBroadcast := logmocks.NewBroadcast(t) @@ -345,8 +343,7 @@ func Test_RegistrySynchronizer1_2_UpkeepCanceledLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryUpkeepCanceled{Id: big.NewInt(3)} logBroadcast := logmocks.NewBroadcast(t) @@ -387,8 +384,7 @@ func Test_RegistrySynchronizer1_2_UpkeepRegisteredLog(t *testing.T) { registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) registryMock.MockResponse("getUpkeep", upkeepConfig1_2).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryUpkeepRegistered{Id: big.NewInt(420)} logBroadcast := logmocks.NewBroadcast(t) @@ -430,8 +426,7 @@ func Test_RegistrySynchronizer1_2_UpkeepPerformedLog(t *testing.T) { pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} log := registry1_2.KeeperRegistryUpkeepPerformed{Id: big.NewInt(3), From: fromAddress} logBroadcast := logmocks.NewBroadcast(t) @@ -495,8 +490,7 @@ func Test_RegistrySynchronizer1_2_UpkeepGasLimitSetLog(t *testing.T) { newConfig.ExecuteGas = 4_000_000 // change from default registryMock.MockResponse("getUpkeep", newConfig).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryUpkeepGasLimitSet{Id: big.NewInt(3), GasLimit: big.NewInt(4_000_000)} logBroadcast := logmocks.NewBroadcast(t) @@ -537,8 +531,7 @@ func Test_RegistrySynchronizer1_2_UpkeepReceivedLog(t *testing.T) { registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) registryMock.MockResponse("getUpkeep", upkeepConfig1_2).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryUpkeepReceived{Id: big.NewInt(420)} logBroadcast := logmocks.NewBroadcast(t) @@ -576,8 +569,7 @@ func Test_RegistrySynchronizer1_2_UpkeepMigratedLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_2.KeeperRegistryUpkeepMigrated{Id: big.NewInt(3)} logBroadcast := logmocks.NewBroadcast(t) diff --git a/core/services/keeper/registry1_3_synchronizer_test.go b/core/services/keeper/registry1_3_synchronizer_test.go index 77bb873e1d0..6fc919775cc 100644 --- a/core/services/keeper/registry1_3_synchronizer_test.go +++ b/core/services/keeper/registry1_3_synchronizer_test.go @@ -257,8 +257,7 @@ func Test_RegistrySynchronizer1_3_ConfigSetLog(t *testing.T) { Keepers: []common.Address{fromAddress}, }).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryConfigSet{} logBroadcast := logmocks.NewBroadcast(t) @@ -308,8 +307,7 @@ func Test_RegistrySynchronizer1_3_KeepersUpdatedLog(t *testing.T) { Keepers: addresses, }).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryKeepersUpdated{} logBroadcast := logmocks.NewBroadcast(t) @@ -350,8 +348,7 @@ func Test_RegistrySynchronizer1_3_UpkeepCanceledLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepCanceled{Id: big.NewInt(3)} logBroadcast := logmocks.NewBroadcast(t) @@ -392,8 +389,7 @@ func Test_RegistrySynchronizer1_3_UpkeepRegisteredLog(t *testing.T) { registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) registryMock.MockResponse("getUpkeep", upkeepConfig1_3).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepRegistered{Id: big.NewInt(420)} logBroadcast := logmocks.NewBroadcast(t) @@ -435,8 +431,7 @@ func Test_RegistrySynchronizer1_3_UpkeepPerformedLog(t *testing.T) { pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} log := registry1_3.KeeperRegistryUpkeepPerformed{Id: big.NewInt(3), From: fromAddress} logBroadcast := logmocks.NewBroadcast(t) @@ -500,8 +495,7 @@ func Test_RegistrySynchronizer1_3_UpkeepGasLimitSetLog(t *testing.T) { newConfig.ExecuteGas = 4_000_000 // change from default registryMock.MockResponse("getUpkeep", newConfig).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepGasLimitSet{Id: big.NewInt(3), GasLimit: big.NewInt(4_000_000)} logBroadcast := logmocks.NewBroadcast(t) @@ -542,8 +536,7 @@ func Test_RegistrySynchronizer1_3_UpkeepReceivedLog(t *testing.T) { registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) registryMock.MockResponse("getUpkeep", upkeepConfig1_3).Once() - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepReceived{Id: big.NewInt(420)} logBroadcast := logmocks.NewBroadcast(t) @@ -581,8 +574,7 @@ func Test_RegistrySynchronizer1_3_UpkeepMigratedLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepMigrated{Id: big.NewInt(3)} logBroadcast := logmocks.NewBroadcast(t) @@ -622,8 +614,7 @@ func Test_RegistrySynchronizer1_3_UpkeepPausedLog_UpkeepUnpausedLog(t *testing.T cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 3) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} log := registry1_3.KeeperRegistryUpkeepPaused{Id: upkeepId} logBroadcast := logmocks.NewBroadcast(t) @@ -638,8 +629,7 @@ func Test_RegistrySynchronizer1_3_UpkeepPausedLog_UpkeepUnpausedLog(t *testing.T cltest.WaitForCount(t, db, "upkeep_registrations", 2) - cfg = configtest.NewGeneralConfig(t, nil) - head = cltest.MustInsertHead(t, db, cfg.Database(), 2) + head = cltest.MustInsertHead(t, db, 2) rawLog = types.Log{BlockHash: head.Hash} unpausedlog := registry1_3.KeeperRegistryUpkeepUnpaused{Id: upkeepId} logBroadcast = logmocks.NewBroadcast(t) @@ -691,8 +681,7 @@ func Test_RegistrySynchronizer1_3_UpkeepCheckDataUpdatedLog(t *testing.T) { cltest.WaitForCount(t, db, "keeper_registries", 1) cltest.WaitForCount(t, db, "upkeep_registrations", 1) - cfg := configtest.NewGeneralConfig(t, nil) - head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + head := cltest.MustInsertHead(t, db, 1) rawLog := types.Log{BlockHash: head.Hash} _ = logmocks.NewBroadcast(t) newCheckData := []byte("Chainlink") diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go index f779f266b6f..236e89ae671 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -436,7 +436,7 @@ func setupForwarderForNode( backend.Commit() // add forwarder address to be tracked in db - forwarderORM := forwarders.NewORM(app.GetSqlxDB()) + forwarderORM := forwarders.NewORM(app.GetDB()) chainID := ubig.Big(*backend.Blockchain().Config().ChainID) _, err = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err) diff --git a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go index 38f2aa5f855..8f743a370c2 100644 --- a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go @@ -285,7 +285,7 @@ func setupNodeOCR2( b.Commit() // Add the forwarder to the node's forwarder manager. - forwarderORM := forwarders.NewORM(app.GetSqlxDB()) + forwarderORM := forwarders.NewORM(app.GetDB()) chainID := ubig.Big(*b.Blockchain().Config().ChainID) _, err = forwarderORM.CreateForwarder(testutils.Context(t), faddr, chainID) require.NoError(t, err) diff --git a/core/services/pg/q.go b/core/services/pg/q.go index 52225ac6168..30f2d01c511 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -10,22 +10,16 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/jmoiron/sqlx" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" ) -var promSQLQueryTime = promauto.NewHistogram(prometheus.HistogramOpts{ - Name: "sql_query_timeout_percent", - Help: "SQL query time as a pecentage of timeout.", - Buckets: []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}, -}) - +// QOpt is deprecated. Use [sqlutil.DB] with [sqlutil.QueryHook]s instead. +// // QOpt pattern for ORM methods aims to clarify usage and remove some common footguns, notably: // // 1. It should be easy and obvious how to pass a parent context or a transaction into an ORM method @@ -114,6 +108,7 @@ type QConfig interface { // // This is not the prettiest construct but without macros its about the best we // can do. +// Deprecated: Use a `sqlutil.DB` with `sqlutil.QueryHook`s instead type Q struct { Queryer ParentCtx context.Context @@ -385,5 +380,5 @@ func (q *queryLogger) postSqlLog(ctx context.Context, begin time.Time) { q.logger.Warnw("SLOW SQL QUERY", kvs...) } - promSQLQueryTime.Observe(pct) + sqlutil.PromSQLQueryTime.Observe(pct) } diff --git a/core/services/pg/sqlx.go b/core/services/pg/sqlx.go index c252edf9f5a..1316ba9c103 100644 --- a/core/services/pg/sqlx.go +++ b/core/services/pg/sqlx.go @@ -12,6 +12,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" ) +// Queryer is deprecated. Use sqlutil.DB instead type Queryer interface { sqlx.Ext sqlx.ExtContext diff --git a/core/services/promreporter/prom_reporter_test.go b/core/services/promreporter/prom_reporter_test.go index 9e634779af2..a2a744ae924 100644 --- a/core/services/promreporter/prom_reporter_test.go +++ b/core/services/promreporter/prom_reporter_test.go @@ -48,6 +48,7 @@ func newLegacyChainContainer(t *testing.T, db *sqlx.DB) legacyevm.LegacyChainCon lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, lpOpts) txm, err := txmgr.NewTxm( + db, db, evmConfig, evmConfig.GasEstimator(), diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go index 7376716d53b..29bbe41d288 100644 --- a/core/services/vrf/delegate_test.go +++ b/core/services/vrf/delegate_test.go @@ -82,7 +82,7 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg chainlink.GeneralConfig) vrfUniv btORM := bridges.NewORM(db, lggr, cfg.Database()) ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) _, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) - txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil) + txm, err := txmgr.NewTxm(db, db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil) orm := headtracker.NewORM(*testutils.FixtureChainID, db) require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(51))) jrm := job.NewORM(db, prm, btORM, ks, lggr, cfg.Database()) diff --git a/core/services/webhook/authorizer.go b/core/services/webhook/authorizer.go index 91aac9cc5fb..88a26188948 100644 --- a/core/services/webhook/authorizer.go +++ b/core/services/webhook/authorizer.go @@ -2,10 +2,10 @@ package webhook import ( "context" - "database/sql" "github.com/google/uuid" + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink/v2/core/bridges" "github.com/smartcontractkit/chainlink/v2/core/sessions" ) @@ -24,7 +24,7 @@ var ( _ Authorizer = &neverAuthorizer{} ) -func NewAuthorizer(db *sql.DB, user *sessions.User, ei *bridges.ExternalInitiator) Authorizer { +func NewAuthorizer(db sqlutil.DB, user *sessions.User, ei *bridges.ExternalInitiator) Authorizer { if user != nil { return &alwaysAuthorizer{} } else if ei != nil { @@ -34,11 +34,11 @@ func NewAuthorizer(db *sql.DB, user *sessions.User, ei *bridges.ExternalInitiato } type eiAuthorizer struct { - db *sql.DB + db sqlutil.DB ei bridges.ExternalInitiator } -func NewEIAuthorizer(db *sql.DB, ei bridges.ExternalInitiator) *eiAuthorizer { +func NewEIAuthorizer(db sqlutil.DB, ei bridges.ExternalInitiator) *eiAuthorizer { return &eiAuthorizer{db, ei} } @@ -46,7 +46,7 @@ func (ea *eiAuthorizer) CanRun(ctx context.Context, config AuthorizerConfig, job if !config.ExternalInitiatorsEnabled() { return false, nil } - row := ea.db.QueryRowContext(ctx, ` + row := ea.db.QueryRowxContext(ctx, ` SELECT EXISTS ( SELECT 1 FROM external_initiator_webhook_specs JOIN jobs ON external_initiator_webhook_specs.webhook_spec_id = jobs.webhook_spec_id diff --git a/core/services/webhook/authorizer_test.go b/core/services/webhook/authorizer_test.go index b6eb2feaccb..35292c6bbb9 100644 --- a/core/services/webhook/authorizer_test.go +++ b/core/services/webhook/authorizer_test.go @@ -51,7 +51,7 @@ func Test_Authorizer(t *testing.T) { require.NoError(t, err) t.Run("no user no ei never authorizes", func(t *testing.T) { - a := webhook.NewAuthorizer(db.DB, nil, nil) + a := webhook.NewAuthorizer(db, nil, nil) can, err := a.CanRun(testutils.Context(t), nil, jobWithFooAndBarEI.ExternalJobID) require.NoError(t, err) @@ -65,7 +65,7 @@ func Test_Authorizer(t *testing.T) { }) t.Run("with user no ei always authorizes", func(t *testing.T) { - a := webhook.NewAuthorizer(db.DB, &sessions.User{}, nil) + a := webhook.NewAuthorizer(db, &sessions.User{}, nil) can, err := a.CanRun(testutils.Context(t), nil, jobWithFooAndBarEI.ExternalJobID) require.NoError(t, err) @@ -79,7 +79,7 @@ func Test_Authorizer(t *testing.T) { }) t.Run("no user with ei authorizes conditionally", func(t *testing.T) { - a := webhook.NewAuthorizer(db.DB, nil, &eiFoo) + a := webhook.NewAuthorizer(db, nil, &eiFoo) can, err := a.CanRun(testutils.Context(t), eiEnabledCfg{}, jobWithFooAndBarEI.ExternalJobID) require.NoError(t, err) diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go index 02eb6d7e566..674d0285d81 100644 --- a/core/web/evm_forwarders_controller.go +++ b/core/web/evm_forwarders_controller.go @@ -26,7 +26,7 @@ type EVMForwardersController struct { // Index lists EVM forwarders. func (cc *EVMForwardersController) Index(c *gin.Context, size, page, offset int) { - orm := forwarders.NewORM(cc.App.GetSqlxDB()) + orm := forwarders.NewORM(cc.App.GetDB()) fwds, count, err := orm.FindForwarders(c.Request.Context(), 0, size) if err != nil { @@ -56,7 +56,7 @@ func (cc *EVMForwardersController) Track(c *gin.Context) { jsonAPIError(c, http.StatusUnprocessableEntity, err) return } - orm := forwarders.NewORM(cc.App.GetSqlxDB()) + orm := forwarders.NewORM(cc.App.GetDB()) fwd, err := orm.CreateForwarder(c.Request.Context(), request.Address, *request.EVMChainID) if err != nil { @@ -80,7 +80,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { return } - filterCleanup := func(tx sqlutil.Queryer, evmChainID int64, addr common.Address) error { + filterCleanup := func(tx sqlutil.DB, evmChainID int64, addr common.Address) error { chain, err2 := cc.App.GetRelayers().LegacyEVMChains().Get(big.NewInt(evmChainID).String()) if err2 != nil { // If the chain id doesn't even exist, or logpoller is disabled, then there isn't any filter to clean up. Returning an error @@ -95,7 +95,7 @@ func (cc *EVMForwardersController) Delete(c *gin.Context) { return chain.LogPoller().UnregisterFilter(c.Request.Context(), forwarders.FilterName(addr)) } - orm := forwarders.NewORM(cc.App.GetSqlxDB()) + orm := forwarders.NewORM(cc.App.GetDB()) err = orm.DeleteForwarder(c.Request.Context(), id, filterCleanup) if err != nil { diff --git a/core/web/pipeline_runs_controller.go b/core/web/pipeline_runs_controller.go index a1c8da6f748..3892da749ea 100644 --- a/core/web/pipeline_runs_controller.go +++ b/core/web/pipeline_runs_controller.go @@ -105,7 +105,7 @@ func (prc *PipelineRunsController) Create(c *gin.Context) { user, isUser := auth.GetAuthenticatedUser(c) ei, _ := auth.GetAuthenticatedExternalInitiator(c) - authorizer := webhook.NewAuthorizer(prc.App.GetSqlxDB().DB, user, ei) + authorizer := webhook.NewAuthorizer(prc.App.GetDB(), user, ei) // Is it a UUID? Then process it as a webhook job jobUUID, err := uuid.Parse(idStr) diff --git a/go.mod b/go.mod index bc500ce3b40..db44a921acc 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 github.com/smartcontractkit/chain-selectors v1.0.10 github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 + github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 github.com/smartcontractkit/chainlink-feeds v0.0.0-20240119021347-3c541a78cdb8 @@ -81,7 +81,7 @@ require ( github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 github.com/smartcontractkit/wsrpc v0.7.2 github.com/spf13/cast v1.6.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a github.com/tidwall/gjson v1.17.0 github.com/ugorji/go/codec v1.2.12 @@ -283,7 +283,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/go.sum b/go.sum index 0477adbed3c..04666567f44 100644 --- a/go.sum +++ b/go.sum @@ -1169,8 +1169,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 h1:3AspKDXioDI0ROiFby3bcgWdRaDh3OYa8mPsud0HjHg= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958/go.mod h1:/bJGelrpXvCcDCuaIgt91UN4B9YxZdK1O7VX5lzbysI= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= @@ -1229,8 +1229,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1242,8 +1243,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index a85871465ac..8d1bc95790a 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -23,7 +23,7 @@ require ( github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 + github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 github.com/smartcontractkit/chainlink-testing-framework v1.26.0 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 @@ -31,7 +31,7 @@ require ( github.com/smartcontractkit/seth v0.1.2 github.com/smartcontractkit/wasp v0.4.5 github.com/spf13/cobra v1.8.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/test-go/testify v1.1.4 github.com/testcontainers/testcontainers-go v0.28.0 github.com/umbracle/ethgo v0.1.3 @@ -390,7 +390,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index e45709fee48..48245dbebe7 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1516,8 +1516,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 h1:3AspKDXioDI0ROiFby3bcgWdRaDh3OYa8mPsud0HjHg= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958/go.mod h1:/bJGelrpXvCcDCuaIgt91UN4B9YxZdK1O7VX5lzbysI= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= @@ -1588,8 +1588,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1601,8 +1602,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index f432efaa614..c584ed98047 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -15,7 +15,7 @@ require ( github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 + github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 github.com/smartcontractkit/chainlink-testing-framework v1.26.0 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8 @@ -23,7 +23,7 @@ require ( github.com/smartcontractkit/seth v0.1.2 github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 github.com/smartcontractkit/wasp v0.4.6 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.uber.org/ratelimit v0.3.0 ) @@ -380,7 +380,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 1a5729f5ebd..21285d58e1c 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1499,8 +1499,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35 h1:GNhRKD3izyzAoGMXDvVUAwEuzz4Atdj3U3RH7eak5Is= github.com/smartcontractkit/chainlink-automation v1.0.2-0.20240311111125-22812a072c35/go.mod h1:2I0dWdYdK6jHPnSYYy7Y7Xp7L0YTnJ3KZtkhLQflsTU= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69 h1:LsusfMA80iEYoFOad9gcuLRQYdi0rP7PX/dsXq6Y7yw= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20240306173252-5cbf83ca3a69/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958 h1:3AspKDXioDI0ROiFby3bcgWdRaDh3OYa8mPsud0HjHg= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20240312193929-9bf02a194958/go.mod h1:/bJGelrpXvCcDCuaIgt91UN4B9YxZdK1O7VX5lzbysI= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240220203239-09be0ea34540 h1:xFSv8561jsLtF6gYZr/zW2z5qUUAkcFkApin2mnbYTo= @@ -1573,8 +1573,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1586,8 +1587,9 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index 8752e344330..5ed0c443a74 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -137,8 +137,8 @@ var registerSingleTopicFilter = func(registry contracts.KeeperRegistry, upkeepID // return nil // } -// NewOrm returns a new logpoller.orm instance -func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (logpoller.ORM, *sqlx.DB, error) { +// NewORM returns a new logpoller.orm instance +func NewORM(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (logpoller.ORM, *sqlx.DB, error) { dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", "127.0.0.1", postgresDb.ExternalPort, postgresDb.User, postgresDb.Password, postgresDb.DbName) db, err := sqlx.Open("postgres", dsn) if err != nil { @@ -171,7 +171,7 @@ func GetExpectedFilters(logEmitters []*contracts.LogEmitter, cfg *lp_config.Conf // NodeHasExpectedFilters returns true if the provided node has all the expected filters registered func NodeHasExpectedFilters(ctx context.Context, expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, string, error) { - orm, db, err := NewOrm(logger, chainID, postgresDb) + orm, db, err := NewORM(logger, chainID, postgresDb) if err != nil { return false, "", err } @@ -306,7 +306,7 @@ func LogPollerHasFinalisedEndBlock(endBlock int64, chainID *big.Int, l zerolog.L case <-ctx.Done(): return default: - orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + orm, db, err := NewORM(coreLogger, chainID, clNode.PostgresDb) if err != nil { r <- boolQueryResult{ nodeName: clNode.ContainerName, @@ -400,7 +400,7 @@ func ClNodesHaveExpectedLogCount(startBlock, endBlock int64, chainID *big.Int, e case <-ctx.Done(): return default: - orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + orm, db, err := NewORM(coreLogger, chainID, clNode.PostgresDb) if err != nil { resultChan <- logQueryResult{ nodeName: clNode.ContainerName, @@ -523,7 +523,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit nodeName := clnodeCluster.Nodes[i].ContainerName l.Debug().Str("Node name", nodeName).Msg("Fetching log poller logs") - orm, db, err := NewOrm(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb) + orm, db, err := NewORM(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb) if err != nil { r <- dbQueryResult{ err: err, From 87519d4b3bbc00edbb3822837966b880da6f40ad Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 13 Mar 2024 11:38:02 -0400 Subject: [PATCH 63/65] Check bind errors --- core/chains/evm/logpoller/orm.go | 152 ++++++++++++++++++++++++------- 1 file changed, 120 insertions(+), 32 deletions(-) diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index d1011040a73..6f1e2132720 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -99,7 +99,11 @@ func (o *DbORM) InsertBlock(ctx context.Context, blockHash common.Hash, blockNum (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) ON CONFLICT DO NOTHING` - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return err + } + _, err = o.db.ExecContext(ctx, query, sqlArgs...) return err } @@ -145,7 +149,11 @@ func (o *DbORM) InsertFilter(ctx context.Context, filter Filter) (err error) { topicsColumns.String(), topicsSql.String()) - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return err + } + _, err = o.db.ExecContext(ctx, query, sqlArgs...) return err } @@ -221,8 +229,11 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(ctx context.Context, eventSig ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) var l Log - query, sqlArgs, _ := o.db.BindNamed(query, args) - if err := o.db.GetContext(ctx, &l, query, sqlArgs...); err != nil { + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err = o.db.GetContext(ctx, &l, query, sqlArgs...); err != nil { return nil, err } return &l, nil @@ -242,13 +253,17 @@ func (o *DbORM) DeleteBlocksBefore(ctx context.Context, end int64, limit int64) ) AND evm_chain_id = $2`, end, ubig.New(o.chainID), limit) - rowsAffected, _ := result.RowsAffected() - return rowsAffected, err + if err != nil { + return 0, err + } + return result.RowsAffected() } result, err := o.db.ExecContext(ctx, `DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) - rowsAffected, _ := result.RowsAffected() - return rowsAffected, err + if err != nil { + return 0, err + } + return result.RowsAffected() } func (o *DbORM) DeleteLogsAndBlocksAfter(ctx context.Context, start int64) error { @@ -323,8 +338,10 @@ func (o *DbORM) DeleteExpiredLogs(ctx context.Context, limit int64) (int64, erro ubig.New(o.chainID)) } - rowsAffected, _ := result.RowsAffected() - return rowsAffected, err + if err != nil { + return 0, err + } + return result.RowsAffected() } // InsertLogs is idempotent to support replays. @@ -371,9 +388,12 @@ func (o *DbORM) insertLogsWithinTx(ctx context.Context, logs []Log, tx sqlutil.Q (:evm_chain_id, :log_index, :block_hash, :block_number, :block_timestamp, :address, :event_sig, :topics, :tx_hash, :data, NOW()) ON CONFLICT DO NOTHING` - query, sqlArgs, _ := o.db.BindNamed(query, logs[start:end]) - _, err := tx.ExecContext(ctx, query, sqlArgs...) + query, sqlArgs, err := o.db.BindNamed(query, logs[start:end]) + if err != nil { + return err + } + _, err = tx.ExecContext(ctx, query, sqlArgs...) if err != nil { if pkgerrors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 { // In case of DB timeouts, try to insert again with a smaller batch upto a limit @@ -412,7 +432,11 @@ func (o *DbORM) SelectLogsByBlockRange(ctx context.Context, start, end int64) ([ ORDER BY (block_number, log_index)` var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err @@ -439,7 +463,11 @@ func (o *DbORM) SelectLogs(ctx context.Context, start, end int64, address common ORDER BY (block_number, log_index)` var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err @@ -467,8 +495,12 @@ func (o *DbORM) SelectLogsCreatedAfter(ctx context.Context, address common.Addre ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) - if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + + if err = o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } return logs, nil @@ -494,7 +526,11 @@ func (o *DbORM) SelectLogsWithSigs(ctx context.Context, start, end int64, addres AND block_number BETWEEN :start_block AND :end_block ORDER BY (block_number, log_index)` - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if pkgerrors.Is(err, sql.ErrNoRows) { return nil, nil @@ -518,7 +554,11 @@ func (o *DbORM) GetBlocksRange(ctx context.Context, start int64, end int64) ([]L ORDER BY block_number ASC` var blocks []LogPollerBlock - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &blocks, query, sqlArgs...) if err != nil { return nil, err @@ -551,8 +591,12 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(ctx context.Context, from ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) - if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + + if err = o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, pkgerrors.Wrap(err, "failed to execute query") } return logs, nil @@ -578,8 +622,12 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(ctx context.Context, AND block_number <= %s`, nestedBlockNumberQuery(confs)) var blockNumber int64 - query, sqlArgs, _ := o.db.BindNamed(query, args) - if err := o.db.GetContext(ctx, &blockNumber, query, sqlArgs...); err != nil { + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return 0, err + } + + if err = o.db.GetContext(ctx, &blockNumber, query, sqlArgs...); err != nil { return 0, err } return blockNumber, nil @@ -606,7 +654,11 @@ func (o *DbORM) SelectLogsDataWordRange(ctx context.Context, address common.Addr ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -633,7 +685,11 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(ctx context.Context, address commo ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -661,7 +717,11 @@ func (o *DbORM) SelectLogsDataWordBetween(ctx context.Context, address common.Ad ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -688,7 +748,11 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(ctx context.Context, address c ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -717,7 +781,11 @@ func (o *DbORM) SelectIndexedLogsTopicRange(ctx context.Context, address common. ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -744,7 +812,11 @@ func (o *DbORM) SelectIndexedLogs(ctx context.Context, address common.Address, e ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -773,7 +845,11 @@ func (o *DbORM) SelectIndexedLogsByBlockRange(ctx context.Context, start, end in ORDER BY (block_number, log_index)` var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err @@ -803,7 +879,11 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(ctx context.Context, address commo ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } @@ -828,7 +908,11 @@ func (o *DbORM) SelectIndexedLogsByTxHash(ctx context.Context, address common.Ad ORDER BY (block_number, log_index)` var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + err = o.db.SelectContext(ctx, &logs, query, sqlArgs...) if err != nil { return nil, err @@ -872,7 +956,11 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(ctx context.Context, sigA, si ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) var logs []Log - query, sqlArgs, _ := o.db.BindNamed(query, args) + query, sqlArgs, err := o.db.BindNamed(query, args) + if err != nil { + return nil, err + } + if err := o.db.SelectContext(ctx, &logs, query, sqlArgs...); err != nil { return nil, err } From 12adced26f55d91ad8e5a3ac662723a06a42271a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 13 Mar 2024 17:45:13 -0400 Subject: [PATCH 64/65] Add close timeout --- core/services/relay/evm/chain_reader.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go index ed8c34b4831..ff4f026d118 100644 --- a/core/services/relay/evm/chain_reader.go +++ b/core/services/relay/evm/chain_reader.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "time" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/google/uuid" @@ -118,8 +119,7 @@ func (cr *chainReader) Start(ctx context.Context) error { func (cr *chainReader) Close() error { return cr.StopOnce("ChainReader", func() error { - // TODO: Propagate context - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return cr.contractBindings.ForEach(ctx, readBinding.Unregister) }) From 38ede16169fda9a3188154f34915569d1934ebbd Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 14 Mar 2024 09:45:12 -0400 Subject: [PATCH 65/65] Add changeset --- .changeset/strong-ears-heal.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/strong-ears-heal.md diff --git a/.changeset/strong-ears-heal.md b/.changeset/strong-ears-heal.md new file mode 100644 index 00000000000..b6332407ea5 --- /dev/null +++ b/.changeset/strong-ears-heal.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Refactor EVM ORMs to remove pg dependency