diff --git a/.changeset/dull-ants-collect.md b/.changeset/dull-ants-collect.md new file mode 100644 index 00000000000..78f608e3418 --- /dev/null +++ b/.changeset/dull-ants-collect.md @@ -0,0 +1,9 @@ +--- +"chainlink": patch +--- + +Fixed local finality violation caused by an RPC lagging behind on latest finalized block. + +Added `EVM.FinalizedBlockOffset` and `EVM.NodePool.EnforceRepeatableRead` config options. +With `EnforceRepeatableRead = true`, RPC is considered healthy only if its most recent finalized block is larger or equal to the highest finalized block observed by the Node minus `FinalizedBlockOffset`. +#bugfix diff --git a/common/client/ctx.go b/common/client/ctx.go new file mode 100644 index 00000000000..57b2fc8a866 --- /dev/null +++ b/common/client/ctx.go @@ -0,0 +1,17 @@ +package client + +import "context" + +type multiNodeContextKey int + +const ( + contextKeyHeathCheckRequest multiNodeContextKey = iota + 1 +) + +func CtxAddHealthCheckFlag(ctx context.Context) context.Context { + return context.WithValue(ctx, contextKeyHeathCheckRequest, struct{}{}) +} + +func CtxIsHeathCheckRequest(ctx context.Context) bool { + return ctx.Value(contextKeyHeathCheckRequest) != nil +} diff --git a/common/client/ctx_test.go b/common/client/ctx_test.go new file mode 100644 index 00000000000..822b36c3f81 --- /dev/null +++ b/common/client/ctx_test.go @@ -0,0 +1,16 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +func TestContext(t *testing.T) { + ctx := tests.Context(t) + assert.False(t, CtxIsHeathCheckRequest(ctx), "expected false for test context") + ctx = CtxAddHealthCheckFlag(ctx) + assert.True(t, CtxIsHeathCheckRequest(ctx), "expected context to contain the healthcheck flag") +} diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go index b9b9470de29..a7c0e4dbdb8 100644 --- a/common/client/mock_node_client_test.go +++ b/common/client/mock_node_client_test.go @@ -116,6 +116,34 @@ func (_m *mockNodeClient[CHAIN_ID, HEAD]) DisconnectAll() { _m.Called() } +// GetInterceptedChainInfo provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetInterceptedChainInfo") + } + + var r0 ChainInfo + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (ChainInfo, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + // IsSyncing provides a mock function with given fields: ctx func (_m *mockNodeClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) @@ -177,32 +205,29 @@ func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription _m.Called(_a0) } -// Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *mockNodeClient[CHAIN_ID, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) { - var _ca []interface{} - _ca = append(_ca, ctx, channel) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) +// SubscribeNewHead provides a mock function with given fields: ctx, channel +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) { + ret := _m.Called(ctx, channel) if len(ret) == 0 { - panic("no return value specified for Subscribe") + panic("no return value specified for SubscribeNewHead") } var r0 types.Subscription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok { - return rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) (types.Subscription, error)); ok { + return rf(ctx, channel) } - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok { - r0 = rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) types.Subscription); ok { + r0 = rf(ctx, channel) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(types.Subscription) } } - if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok { - r1 = rf(ctx, channel, args...) + if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD) error); ok { + r1 = rf(ctx, channel) } else { r1 = ret.Error(1) } diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index 67ee5a4ba99..5109eb6bb90 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -4,11 +4,9 @@ package client import ( context "context" - big "math/big" - - mock "github.com/stretchr/testify/mock" types "github.com/smartcontractkit/chainlink/v2/common/types" + mock "github.com/stretchr/testify/mock" ) // mockNode is an autogenerated mock type for the Node type @@ -52,6 +50,24 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() CHAIN_ID { return r0 } +// HighestUserObservations provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + // Name provides a mock function with given fields: func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Name() string { ret := _m.Called() @@ -106,6 +122,11 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) RPC() RPC { return r0 } +// SetPoolChainInfoProvider provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) SetPoolChainInfoProvider(_a0 PoolChainInfoProvider) { + _m.Called(_a0) +} + // Start provides a mock function with given fields: _a0 func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Start(_a0 context.Context) error { ret := _m.Called(_a0) @@ -143,7 +164,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) State() nodeState { } // StateAndLatest provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { ret := _m.Called() if len(ret) == 0 { @@ -151,9 +172,8 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *bi } var r0 nodeState - var r1 int64 - var r2 *big.Int - if rf, ok := ret.Get(0).(func() (nodeState, int64, *big.Int)); ok { + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (nodeState, ChainInfo)); ok { return rf() } if rf, ok := ret.Get(0).(func() nodeState); ok { @@ -162,21 +182,13 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *bi r0 = ret.Get(0).(nodeState) } - if rf, ok := ret.Get(1).(func() int64); ok { + if rf, ok := ret.Get(1).(func() ChainInfo); ok { r1 = rf() } else { - r1 = ret.Get(1).(int64) - } - - if rf, ok := ret.Get(2).(func() *big.Int); ok { - r2 = rf() - } else { - if ret.Get(2) != nil { - r2 = ret.Get(2).(*big.Int) - } + r1 = ret.Get(1).(ChainInfo) } - return r0, r1, r2 + return r0, r1 } // String provides a mock function with given fields: diff --git a/common/client/mock_pool_chain_info_provider_test.go b/common/client/mock_pool_chain_info_provider_test.go new file mode 100644 index 00000000000..4e4955e7381 --- /dev/null +++ b/common/client/mock_pool_chain_info_provider_test.go @@ -0,0 +1,70 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package client + +import mock "github.com/stretchr/testify/mock" + +// mockPoolChainInfoProvider is an autogenerated mock type for the PoolChainInfoProvider type +type mockPoolChainInfoProvider struct { + mock.Mock +} + +// HighestUserObservations provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// LatestChainInfo provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) LatestChainInfo() (int, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestChainInfo") + } + + var r0 int + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (int, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// newMockPoolChainInfoProvider creates a new instance of mockPoolChainInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockPoolChainInfoProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *mockPoolChainInfoProvider { + mock := &mockPoolChainInfoProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go index e3e7c59f2e2..81bac04547d 100644 --- a/common/client/mock_rpc_test.go +++ b/common/client/mock_rpc_test.go @@ -366,6 +366,34 @@ func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS return r0, r1 } +// GetInterceptedChainInfo provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetInterceptedChainInfo") + } + + var r0 ChainInfo + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (ChainInfo, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + // IsSyncing provides a mock function with given fields: ctx func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) @@ -637,32 +665,29 @@ func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS return r0 } -// Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) { - var _ca []interface{} - _ca = append(_ca, ctx, channel) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) +// SubscribeNewHead provides a mock function with given fields: ctx, channel +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) { + ret := _m.Called(ctx, channel) if len(ret) == 0 { - panic("no return value specified for Subscribe") + panic("no return value specified for SubscribeNewHead") } var r0 types.Subscription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok { - return rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) (types.Subscription, error)); ok { + return rf(ctx, channel) } - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok { - r0 = rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) types.Subscription); ok { + r0 = rf(ctx, channel) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(types.Subscription) } } - if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok { - r1 = rf(ctx, channel, args...) + if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD) error); ok { + r1 = rf(ctx, channel) } else { r1 = ret.Error(1) } diff --git a/common/client/mocks/config.go b/common/client/mocks/config.go index 306965a9f5d..d1007f39f0f 100644 --- a/common/client/mocks/config.go +++ b/common/client/mocks/config.go @@ -3,9 +3,10 @@ package mocks import "time" type ChainConfig struct { - IsFinalityTagEnabled bool - FinalityDepthVal uint32 - NoNewHeadsThresholdVal time.Duration + IsFinalityTagEnabled bool + FinalityDepthVal uint32 + NoNewHeadsThresholdVal time.Duration + FinalizedBlockOffsetVal uint32 } func (t ChainConfig) NodeNoNewHeadsThreshold() time.Duration { @@ -19,3 +20,7 @@ func (t ChainConfig) FinalityDepth() uint32 { func (t ChainConfig) FinalityTagEnabled() bool { return t.IsFinalityTagEnabled } + +func (t ChainConfig) FinalizedBlockOffset() uint32 { + return t.FinalizedBlockOffsetVal +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 0fc095c2931..c53e5d33b7e 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -90,18 +90,19 @@ type multiNode[ BATCH_ELEM any, ] struct { services.StateMachine - nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] - sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT] - chainID CHAIN_ID - lggr logger.SugaredLogger - selectionMode string - noNewHeadsThreshold time.Duration - nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] - leaseDuration time.Duration - leaseTicker *time.Ticker - chainFamily string - reportInterval time.Duration - sendTxSoftTimeout time.Duration // defines max waiting time from first response til responses evaluation + nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + chainID CHAIN_ID + lggr logger.SugaredLogger + selectionMode string + noNewHeadsThreshold time.Duration + nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] + leaseDuration time.Duration + leaseTicker *time.Ticker + chainFamily string + reportInterval time.Duration + deathDeclarationDelay time.Duration + sendTxSoftTimeout time.Duration // defines max waiting time from first response til responses evaluation activeMu sync.RWMutex activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT] @@ -137,6 +138,7 @@ func NewMultiNode[ chainFamily string, classifySendTxError func(tx TX, err error) SendTxReturnCode, sendTxSoftTimeout time.Duration, + deathDeclarationDelay time.Duration, ) MultiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM] { nodeSelector := newNodeSelector(selectionMode, nodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid @@ -146,19 +148,20 @@ func NewMultiNode[ sendTxSoftTimeout = QueryTimeout / 2 } c := &multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]{ - nodes: nodes, - sendonlys: sendonlys, - chainID: chainID, - lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), - selectionMode: selectionMode, - noNewHeadsThreshold: noNewHeadsThreshold, - nodeSelector: nodeSelector, - chStop: make(services.StopChan), - leaseDuration: leaseDuration, - chainFamily: chainFamily, - classifySendTxError: classifySendTxError, - reportInterval: reportInterval, - sendTxSoftTimeout: sendTxSoftTimeout, + nodes: nodes, + sendonlys: sendonlys, + chainID: chainID, + lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), + selectionMode: selectionMode, + noNewHeadsThreshold: noNewHeadsThreshold, + nodeSelector: nodeSelector, + chStop: make(services.StopChan), + leaseDuration: leaseDuration, + chainFamily: chainFamily, + classifySendTxError: classifySendTxError, + reportInterval: reportInterval, + deathDeclarationDelay: deathDeclarationDelay, + sendTxSoftTimeout: sendTxSoftTimeout, } c.lggr.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode) @@ -180,14 +183,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP if n.ConfiguredChainID().String() != c.chainID.String() { return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) } - rawNode, ok := n.(*node[CHAIN_ID, HEAD, RPC_CLIENT]) - if ok { - // This is a bit hacky but it allows the node to be aware of - // pool state and prevent certain state transitions that might - // otherwise leave no nodes available. It is better to have one - // node in a degraded state than no nodes at all. - rawNode.nLiveNodes = c.nLiveNodes - } + n.SetPoolChainInfoProvider(c) // node will handle its own redialing and automatic recovery if err := ms.Start(ctx, n); err != nil { return err @@ -253,6 +249,9 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return // another goroutine beat us here } + if c.activeNode != nil { + c.activeNode.UnsubscribeAllExceptAliveLoop() + } c.activeNode = c.nodeSelector.Select() if c.activeNode == nil { @@ -265,22 +264,37 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return c.activeNode, err } -// nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. -// totalDifficulty will be 0 if all nodes return nil. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { - totalDifficulty = big.NewInt(0) +// LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being marked as out-of-sync. +// Return highest ChainInfo most recently received by the alive nodes. +// E.g. If Node A's the most recent block is 10 and highest 15 and for Node B it's - 12 and 14. This method will return 12. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) LatestChainInfo() (int, ChainInfo) { + var nLiveNodes int + ch := ChainInfo{ + TotalDifficulty: big.NewInt(0), + } for _, n := range c.nodes { - if s, num, td := n.StateAndLatest(); s == nodeStateAlive { + if s, nodeChainInfo := n.StateAndLatest(); s == nodeStateAlive { nLiveNodes++ - if num > blockNumber { - blockNumber = num - } - if td != nil && td.Cmp(totalDifficulty) > 0 { - totalDifficulty = td - } + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.FinalizedBlockNumber = max(ch.FinalizedBlockNumber, nodeChainInfo.FinalizedBlockNumber) + ch.TotalDifficulty = MaxTotalDifficulty(ch.TotalDifficulty, nodeChainInfo.TotalDifficulty) } } - return + return nLiveNodes, ch +} + +// HighestUserObservations - returns highest ChainInfo ever observed by any user of the MultiNode +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) HighestUserObservations() ChainInfo { + ch := ChainInfo{ + TotalDifficulty: big.NewInt(0), + } + for _, n := range c.nodes { + nodeChainInfo := n.HighestUserObservations() + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.FinalizedBlockNumber = max(ch.FinalizedBlockNumber, nodeChainInfo.FinalizedBlockNumber) + ch.TotalDifficulty = MaxTotalDifficulty(ch.TotalDifficulty, nodeChainInfo.TotalDifficulty) + } + return ch } func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) checkLease() { @@ -295,10 +309,13 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } c.activeMu.Lock() + defer c.activeMu.Unlock() if bestNode != c.activeNode { + if c.activeNode != nil { + c.activeNode.UnsubscribeAllExceptAliveLoop() + } c.activeNode = bestNode } - c.activeMu.Unlock() } func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) checkLeaseLoop() { @@ -319,7 +336,16 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) runLoop() { defer c.wg.Done() - c.report() + nodeStates := make([]nodeWithState, len(c.nodes)) + for i, n := range c.nodes { + nodeStates[i] = nodeWithState{ + Node: n.String(), + State: n.State().String(), + DeadSince: nil, + } + } + + c.report(nodeStates) monitor := time.NewTicker(utils.WithJitter(c.reportInterval)) defer monitor.Stop() @@ -327,44 +353,54 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP for { select { case <-monitor.C: - c.report() + c.report(nodeStates) case <-c.chStop: return } } } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) report() { - type nodeWithState struct { - Node string - State string - } +type nodeWithState struct { + Node string + State string + DeadSince *time.Time +} - var total, dead int +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) report(nodesStateInfo []nodeWithState) { + start := time.Now() + var dead int counts := make(map[nodeState]int) - nodeStates := make([]nodeWithState, len(c.nodes)) for i, n := range c.nodes { state := n.State() - nodeStates[i] = nodeWithState{n.String(), state.String()} - total++ - if state != nodeStateAlive { + counts[state]++ + nodesStateInfo[i].State = state.String() + if state == nodeStateAlive { + nodesStateInfo[i].DeadSince = nil + continue + } + + if nodesStateInfo[i].DeadSince == nil { + nodesStateInfo[i].DeadSince = &start + } + + if start.Sub(*nodesStateInfo[i].DeadSince) >= c.deathDeclarationDelay { dead++ } - counts[state]++ } for _, state := range allNodeStates { count := counts[state] PromMultiNodeRPCNodeStates.WithLabelValues(c.chainFamily, c.chainID.String(), state.String()).Set(float64(count)) } + total := len(c.nodes) live := total - dead - c.lggr.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + c.lggr.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) if total == dead { rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total) - c.lggr.Criticalw(rerr.Error(), "nodeStates", nodeStates) + c.lggr.Criticalw(rerr.Error(), "nodeStates", nodesStateInfo) c.SvcErrBuffer.Append(rerr) } else if dead > 0 { - c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo) } } @@ -779,12 +815,12 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return n.RPC().SimulateTransaction(ctx, tx) } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (s types.Subscription, err error) { +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (s types.Subscription, err error) { n, err := c.selectNode() if err != nil { return s, err } - return n.RPC().Subscribe(ctx, channel, args...) + return n.RPC().SubscribeNewHead(ctx, channel) } func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) { diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 3076d99b618..ffef0c29d56 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -29,16 +29,17 @@ type testMultiNode struct { } type multiNodeOpts struct { - logger logger.Logger - selectionMode string - leaseDuration time.Duration - noNewHeadsThreshold time.Duration - nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] - sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] - chainID types.ID - chainFamily string - classifySendTxError func(tx any, err error) SendTxReturnCode - sendTxSoftTimeout time.Duration + logger logger.Logger + selectionMode string + leaseDuration time.Duration + noNewHeadsThreshold time.Duration + nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] + chainID types.ID + chainFamily string + classifySendTxError func(tx any, err error) SendTxReturnCode + sendTxSoftTimeout time.Duration + deathDeclarationDelay time.Duration } func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { @@ -49,7 +50,7 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { result := NewMultiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient, any](opts.logger, opts.selectionMode, opts.leaseDuration, opts.noNewHeadsThreshold, opts.nodes, opts.sendonlys, - opts.chainID, opts.chainFamily, opts.classifySendTxError, opts.sendTxSoftTimeout) + opts.chainID, opts.chainFamily, opts.classifySendTxError, opts.sendTxSoftTimeout, opts.deathDeclarationDelay) return testMultiNode{ result.(*multiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient, any]), @@ -67,14 +68,21 @@ func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.He } func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + node := newDialableNode(t, chainID) + node.On("State").Return(state).Maybe() + return node +} + +func newDialableNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) node.On("ConfiguredChainID").Return(chainID).Once() node.On("Start", mock.Anything).Return(nil).Once() node.On("Close").Return(nil).Once() - node.On("State").Return(state).Maybe() node.On("String").Return(fmt.Sprintf("healthy_node_%d", rand.Int())).Maybe() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() return node } + func TestMultiNode_Dial(t *testing.T) { t.Parallel() @@ -111,6 +119,7 @@ func TestMultiNode_Dial(t *testing.T) { node := newMockNode(t) chainID := types.RandomID() node.On("ConfiguredChainID").Return(chainID).Once() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() expectedError := errors.New("failed to start node") node.On("Start", mock.Anything).Return(expectedError).Once() mn := newTestMultiNode(t, multiNodeOpts{ @@ -128,6 +137,7 @@ func TestMultiNode_Dial(t *testing.T) { node1 := newHealthyNode(t, chainID) node2 := newMockNode(t) node2.On("ConfiguredChainID").Return(chainID).Once() + node2.On("SetPoolChainInfoProvider", mock.Anything).Once() expectedError := errors.New("failed to start node") node2.On("Start", mock.Anything).Return(expectedError).Once() @@ -219,6 +229,7 @@ func TestMultiNode_Report(t *testing.T) { logger: lggr, }) mn.reportInterval = tests.TestInterval + mn.deathDeclarationDelay = tests.TestInterval defer func() { assert.NoError(t, mn.Close()) }() err := mn.Dial(tests.Context(t)) require.NoError(t, err) @@ -236,6 +247,7 @@ func TestMultiNode_Report(t *testing.T) { logger: lggr, }) mn.reportInterval = tests.TestInterval + mn.deathDeclarationDelay = tests.TestInterval defer func() { assert.NoError(t, mn.Close()) }() err := mn.Dial(tests.Context(t)) require.NoError(t, err) @@ -377,6 +389,7 @@ func TestMultiNode_selectNode(t *testing.T) { chainID := types.RandomID() oldBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) oldBest.On("String").Return("oldBest").Maybe() + oldBest.On("UnsubscribeAllExceptAliveLoop").Once() newBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) newBest.On("String").Return("newBest").Maybe() mn := newTestMultiNode(t, multiNodeOpts{ @@ -417,49 +430,94 @@ func TestMultiNode_selectNode(t *testing.T) { }) } -func TestMultiNode_nLiveNodes(t *testing.T) { +func TestMultiNode_ChainInfo(t *testing.T) { t.Parallel() type nodeParams struct { - BlockNumber int64 - TotalDifficulty *big.Int - State nodeState + LatestChainInfo ChainInfo + HighestUserObservations ChainInfo + State nodeState } testCases := []struct { - Name string - ExpectedNLiveNodes int - ExpectedBlockNumber int64 - ExpectedTotalDifficulty *big.Int - NodeParams []nodeParams + Name string + ExpectedNLiveNodes int + ExpectedLatestChainInfo ChainInfo + ExpectedHighestUserObservations ChainInfo + NodeParams []nodeParams }{ { - Name: "no nodes", - ExpectedTotalDifficulty: big.NewInt(0), + Name: "no nodes", + ExpectedLatestChainInfo: ChainInfo{ + TotalDifficulty: big.NewInt(0), + }, + ExpectedHighestUserObservations: ChainInfo{ + TotalDifficulty: big.NewInt(0), + }, }, { - Name: "Best node is not healthy", - ExpectedTotalDifficulty: big.NewInt(10), - ExpectedBlockNumber: 20, - ExpectedNLiveNodes: 3, + Name: "Best node is not healthy", + ExpectedNLiveNodes: 3, + ExpectedLatestChainInfo: ChainInfo{ + BlockNumber: 20, + FinalizedBlockNumber: 10, + TotalDifficulty: big.NewInt(10), + }, + ExpectedHighestUserObservations: ChainInfo{ + BlockNumber: 1005, + FinalizedBlockNumber: 995, + TotalDifficulty: big.NewInt(2005), + }, NodeParams: []nodeParams{ { - State: nodeStateOutOfSync, - BlockNumber: 1000, - TotalDifficulty: big.NewInt(2000), + State: nodeStateOutOfSync, + LatestChainInfo: ChainInfo{ + BlockNumber: 1000, + FinalizedBlockNumber: 990, + TotalDifficulty: big.NewInt(2000), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 1005, + FinalizedBlockNumber: 995, + TotalDifficulty: big.NewInt(2005), + }, }, { - State: nodeStateAlive, - BlockNumber: 20, - TotalDifficulty: big.NewInt(9), + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 20, + FinalizedBlockNumber: 10, + TotalDifficulty: big.NewInt(9), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 25, + FinalizedBlockNumber: 15, + TotalDifficulty: big.NewInt(14), + }, }, { - State: nodeStateAlive, - BlockNumber: 19, - TotalDifficulty: big.NewInt(10), + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 19, + FinalizedBlockNumber: 9, + TotalDifficulty: big.NewInt(10), + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 24, + FinalizedBlockNumber: 14, + TotalDifficulty: big.NewInt(15), + }, }, { - State: nodeStateAlive, - BlockNumber: 11, - TotalDifficulty: nil, + State: nodeStateAlive, + LatestChainInfo: ChainInfo{ + BlockNumber: 11, + FinalizedBlockNumber: 1, + TotalDifficulty: nil, + }, + HighestUserObservations: ChainInfo{ + BlockNumber: 16, + FinalizedBlockNumber: 6, + TotalDifficulty: nil, + }, }, }, }, @@ -475,14 +533,17 @@ func TestMultiNode_nLiveNodes(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { for _, params := range tc.NodeParams { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node.On("StateAndLatest").Return(params.State, params.BlockNumber, params.TotalDifficulty) + node.On("StateAndLatest").Return(params.State, params.LatestChainInfo) + node.On("HighestUserObservations").Return(params.HighestUserObservations) mn.nodes = append(mn.nodes, node) } - nNodes, blockNum, td := mn.nLiveNodes() + nNodes, latestChainInfo := mn.LatestChainInfo() assert.Equal(t, tc.ExpectedNLiveNodes, nNodes) - assert.Equal(t, tc.ExpectedTotalDifficulty, td) - assert.Equal(t, tc.ExpectedBlockNumber, blockNum) + assert.Equal(t, tc.ExpectedLatestChainInfo, latestChainInfo) + + highestChainInfo := mn.HighestUserObservations() + assert.Equal(t, tc.ExpectedHighestUserObservations, highestChainInfo) }) } } diff --git a/common/client/node.go b/common/client/node.go index 869ea89c039..7871c622eb4 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math/big" "net/url" "sync" "time" @@ -44,12 +43,15 @@ type NodeConfig interface { SyncThreshold() uint32 NodeIsSyncingEnabled() bool FinalizedBlockPollInterval() time.Duration + EnforceRepeatableRead() bool + DeathDeclarationDelay() time.Duration } type ChainConfig interface { NodeNoNewHeadsThreshold() time.Duration FinalityDepth() uint32 FinalityTagEnabled() bool + FinalizedBlockOffset() uint32 } //go:generate mockery --quiet --name Node --structname mockNode --filename "mock_node_test.go" --inpackage --case=underscore @@ -58,15 +60,21 @@ type Node[ HEAD Head, RPC NodeClient[CHAIN_ID, HEAD], ] interface { - // State returns nodeState + // State returns most accurate state of the Node on the moment of call. + // While some of the checks may be performed in the background and State may return cached value, critical, like + // `FinalizedBlockOutOfSync`, must be executed upon every call. State() nodeState - // StateAndLatest returns nodeState with the latest received block number & total difficulty. - StateAndLatest() (nodeState, int64, *big.Int) + // StateAndLatest returns nodeState with the latest ChainInfo observed by Node during current lifecycle. + StateAndLatest() (nodeState, ChainInfo) + // HighestUserObservations - returns highest ChainInfo ever observed by underlying RPC excluding results of health check requests + HighestUserObservations() ChainInfo + SetPoolChainInfoProvider(PoolChainInfoProvider) // Name is a unique identifier for this node. Name() string String() string RPC() RPC SubscribersCount() int32 + // UnsubscribeAllExceptAliveLoop - closes all subscriptions except the aliveLoop subscription UnsubscribeAllExceptAliveLoop() ConfiguredChainID() CHAIN_ID Order() int32 @@ -96,20 +104,12 @@ type node[ stateMu sync.RWMutex // protects state* fields state nodeState - // Each node is tracking the last received head number and total difficulty - stateLatestBlockNumber int64 - stateLatestTotalDifficulty *big.Int - stateLatestFinalizedBlockNumber int64 + + poolInfoProvider PoolChainInfoProvider stopCh services.StopChan // wg waits for subsidiary goroutines wg sync.WaitGroup - - // nLiveNodes is a passed in function that allows this node to: - // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being - // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. - // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far. - nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) } func NewNode[ @@ -150,7 +150,6 @@ func NewNode[ "nodeOrder", n.order, ) n.lfcLog = logger.Named(lggr, "Lifecycle") - n.stateLatestBlockNumber = -1 n.rpc = rpc n.chainFamily = chainFamily return n @@ -243,7 +242,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() } - st := n.State() + st := n.getCachedState() switch st { case nodeStateClosed: // The node is already closed, and any subsequent transition is invalid. @@ -258,7 +257,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg var err error if chainID, err = n.rpc.ChainID(callerCtx); err != nil { promFailed() - lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.State()) + lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.getCachedState()) return nodeStateUnreachable } else if chainID.String() != n.chainID.String() { promFailed() @@ -269,7 +268,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg n.name, errInvalidChainID, ) - lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.State()) + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.getCachedState()) return nodeStateInvalidChainID } @@ -282,7 +281,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. func (n *node[CHAIN_ID, HEAD, RPC]) createVerifiedConn(ctx context.Context, lggr logger.Logger) nodeState { if err := n.rpc.Dial(ctx); err != nil { - n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.State()) + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.getCachedState()) return nodeStateUnreachable } @@ -300,12 +299,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyConn(ctx context.Context, lggr logger. if n.nodePoolCfg.NodeIsSyncingEnabled() { isSyncing, err := n.rpc.IsSyncing(ctx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.getCachedState()) return nodeStateUnreachable } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.getCachedState()) return nodeStateSyncing } } @@ -323,3 +322,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) disconnectAll() { func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 { return n.order } + +func (n *node[CHAIN_ID, HEAD, RPC]) newCtx() (context.Context, context.CancelFunc) { + ctx, cancel := n.stopCh.NewCtx() + ctx = CtxAddHealthCheckFlag(ctx) + return ctx, cancel +} diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index e9105dcc060..5a5e2554431 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -63,6 +63,8 @@ func (n nodeState) String() string { return "Closed" case nodeStateSyncing: return "Syncing" + case nodeStateFinalizedBlockOutOfSync: + return "FinalizedBlockOutOfSync" default: return fmt.Sprintf("nodeState(%d)", n) } @@ -98,6 +100,8 @@ const ( // to other primary nodes configured in the MultiNode. In contrast, `nodeStateSyncing` represents the internal state of // the node (RPC). nodeStateSyncing + // nodeStateFinalizedBlockOutOfSync - node is lagging behind on latest finalized block + nodeStateFinalizedBlockOutOfSync // nodeStateLen tracks the number of states nodeStateLen ) @@ -115,15 +119,59 @@ func init() { // State allows reading the current state of the node. func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.recalculateState() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) getCachedState() nodeState { n.stateMu.RLock() defer n.stateMu.RUnlock() return n.state } -func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { +func (n *node[CHAIN_ID, HEAD, RPC]) recalculateState() nodeState { + if n.state != nodeStateAlive { + return n.state + } + + // double check that node is not lagging on finalized block + if n.nodePoolCfg.EnforceRepeatableRead() && n.isFinalizedBlockOutOfSync() { + return nodeStateFinalizedBlockOutOfSync + } + + return nodeStateAlive +} + +func (n *node[CHAIN_ID, HEAD, RPC]) isFinalizedBlockOutOfSync() bool { + if n.poolInfoProvider == nil { + return false + } + + highestObservedByCaller := n.poolInfoProvider.HighestUserObservations() + latest, _ := n.rpc.GetInterceptedChainInfo() + if n.chainCfg.FinalityTagEnabled() { + return latest.FinalizedBlockNumber < highestObservedByCaller.FinalizedBlockNumber-int64(n.chainCfg.FinalizedBlockOffset()) + } + + return latest.BlockNumber < highestObservedByCaller.BlockNumber-int64(n.chainCfg.FinalizedBlockOffset()) +} + +// StateAndLatest returns nodeState with the latest ChainInfo observed by Node during current lifecycle. +func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { n.stateMu.RLock() defer n.stateMu.RUnlock() - return n.state, n.stateLatestBlockNumber, n.stateLatestTotalDifficulty + latest, _ := n.rpc.GetInterceptedChainInfo() + return n.recalculateState(), latest +} + +// HighestUserObservations - returns highest ChainInfo ever observed by external user of the Node +func (n *node[CHAIN_ID, HEAD, RPC]) HighestUserObservations() ChainInfo { + _, highestUserObservations := n.rpc.GetInterceptedChainInfo() + return highestUserObservations +} +func (n *node[CHAIN_ID, HEAD, RPC]) SetPoolChainInfoProvider(poolInfoProvider PoolChainInfoProvider) { + n.poolInfoProvider = poolInfoProvider } // setState is only used by internal state management methods. @@ -243,7 +291,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { } func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state nodeState) { - if n.State() == nodeStateClosed { + if n.getCachedState() == nodeStateClosed { return } switch state { diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 5947774e202..39e17bb4972 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -56,20 +56,11 @@ func zombieNodeCheckInterval(noNewHeadsThreshold time.Duration) time.Duration { return utils.WithJitter(interval) } -func (n *node[CHAIN_ID, HEAD, RPC]) setLatestReceived(blockNumber int64, totalDifficulty *big.Int) { - n.stateMu.Lock() - defer n.stateMu.Unlock() - n.stateLatestBlockNumber = blockNumber - n.stateLatestTotalDifficulty = totalDifficulty -} - const ( msgCannotDisable = "but cannot disable this connection because there are no other RPC endpoints, or all other RPC endpoints are dead." msgDegradedState = "Chainlink is now operating in a degraded state and urgent action is required to resolve the issue" ) -const rpcSubscriptionMethodNewHeads = "newHeads" - // Node is a FSM // Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. // Only one loop must run at a time. @@ -79,12 +70,12 @@ const rpcSubscriptionMethodNewHeads = "newHeads" // Should only be run ONCE per node, after a successful Dial func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { defer n.wg.Done() - ctx, cancel := n.stopCh.NewCtx() + ctx, cancel := n.newCtx() defer cancel() { // sanity check - state := n.State() + state := n.getCachedState() switch state { case nodeStateAlive: case nodeStateClosed: @@ -99,12 +90,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { pollInterval := n.nodePoolCfg.PollInterval() lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) - lggr.Tracew("Alive loop starting", "nodeState", n.State()) + lggr.Tracew("Alive loop starting", "nodeState", n.getCachedState()) headsC := make(chan HEAD) - sub, err := n.rpc.Subscribe(ctx, headsC, rpcSubscriptionMethodNewHeads) + sub, err := n.rpc.SubscribeNewHead(ctx, headsC) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState()) n.declareUnreachable() return } @@ -116,7 +107,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { var outOfSyncT *time.Ticker var outOfSyncTC <-chan time.Time if noNewHeadsTimeoutThreshold > 0 { - lggr.Debugw("Head liveness checking enabled", "nodeState", n.State()) + lggr.Debugw("Head liveness checking enabled", "nodeState", n.getCachedState()) outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) defer outOfSyncT.Stop() outOfSyncTC = outOfSyncT.C @@ -148,7 +139,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { pollFinalizedHeadCh = pollT.C } - _, highestReceivedBlockNumber, _ := n.StateAndLatest() + localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 for { @@ -157,7 +148,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { return case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) + lggr.Tracew("Polling for version", "nodeState", n.getCachedState(), "pollFailures", pollFailures) version, err := func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, pollInterval) defer cancel() @@ -169,16 +160,16 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures++ } - lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.getCachedState()) } else { - lggr.Debugw("Version poll successful", "nodeState", n.State(), "clientVersion", version) + lggr.Debugw("Version poll successful", "nodeState", n.getCachedState(), "clientVersion", version) promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures = 0 } if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { - lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) - if n.nLiveNodes != nil { - if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.getCachedState()) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) continue } @@ -186,10 +177,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - _, num, td := n.StateAndLatest() - if outOfSync, liveNodes := n.syncStatus(num, td); outOfSync { + _, ci := n.StateAndLatest() + if outOfSync, liveNodes := n.syncStatus(ci.BlockNumber, ci.TotalDifficulty); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", ci.BlockNumber, "totalDifficulty", ci.TotalDifficulty, "nodeState", n.getCachedState()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue @@ -199,40 +190,39 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } case bh, open := <-headsC: if !open { - lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Got head", "head", bh) - if bh.BlockNumber() > highestReceivedBlockNumber { + if bh.BlockNumber() > localHighestChainInfo.BlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) - highestReceivedBlockNumber = bh.BlockNumber() + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) + localHighestChainInfo.BlockNumber = bh.BlockNumber() } else { - lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) } if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) } - n.setLatestReceived(bh.BlockNumber(), bh.BlockDifficulty()) if !n.chainCfg.FinalityTagEnabled() { latestFinalizedBN := max(bh.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) - if latestFinalizedBN > n.stateLatestFinalizedBlockNumber { + if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - n.stateLatestFinalizedBlockNumber = latestFinalizedBN + localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State()) + lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.getCachedState()) n.declareUnreachable() return case <-outOfSyncTC: // We haven't received a head on the channel for at least the // threshold amount of time, mark it broken - lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) - if n.nLiveNodes != nil { - if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, localHighestChainInfo.BlockNumber), "nodeState", n.getCachedState(), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) // We don't necessarily want to wait the full timeout to check again, we should // check regularly and log noisily in this state @@ -240,7 +230,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { continue } } - n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < highestReceivedBlockNumber }) + n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < localHighestChainInfo.BlockNumber }) return case <-pollFinalizedHeadCh: latestFinalized, err := func(ctx context.Context) (HEAD, error) { @@ -259,9 +249,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } latestFinalizedBN := latestFinalized.BlockNumber() - if latestFinalizedBN > n.stateLatestFinalizedBlockNumber { + if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - n.stateLatestFinalizedBlockNumber = latestFinalizedBN + localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN } } } @@ -276,7 +266,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSync(num int64, td *big.Int) (outOfSy // Always returns outOfSync false for SyncThreshold 0. // liveNodes is only included when outOfSync is true. func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSync bool, liveNodes int) { - if n.nLiveNodes == nil { + if n.poolInfoProvider == nil { return // skip for tests } threshold := n.nodePoolCfg.SyncThreshold() @@ -284,14 +274,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSyn return // disabled } // Check against best node - ln, highest, greatest := n.nLiveNodes() + ln, ci := n.poolInfoProvider.LatestChainInfo() mode := n.nodePoolCfg.SelectionMode() switch mode { case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: - return num < highest-int64(threshold), ln + return num < ci.BlockNumber-int64(threshold), ln case NodeSelectionModeTotalDifficulty: bigThreshold := big.NewInt(int64(threshold)) - return td.Cmp(bigmath.Sub(greatest, bigThreshold)) < 0, ln + return td.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln default: panic("unrecognized NodeSelectionMode: " + mode) } @@ -305,12 +295,12 @@ const ( // outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { defer n.wg.Done() - ctx, cancel := n.stopCh.NewCtx() + ctx, cancel := n.newCtx() defer cancel() { // sanity check - state := n.State() + state := n.getCachedState() switch state { case nodeStateOutOfSync: case nodeStateClosed: @@ -323,7 +313,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) - lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) + lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.getCachedState()) // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) @@ -332,12 +322,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.getCachedState()) ch := make(chan HEAD) - sub, err := n.rpc.Subscribe(ctx, ch, rpcSubscriptionMethodNewHeads) + sub, err := n.rpc.SubscribeNewHead(ctx, ch) if err != nil { - lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.getCachedState(), "err", err) n.declareUnreachable() return } @@ -349,28 +339,27 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return case head, open := <-ch: if !open { - lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State()) + lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } - n.setLatestReceived(head.BlockNumber(), head.BlockDifficulty()) if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { // back in-sync! flip back into alive loop - lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) n.declareInSync() return } - lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) case <-time.After(zombieNodeCheckInterval(n.chainCfg.NodeNoNewHeadsThreshold())): - if n.nLiveNodes != nil { - if l, _, _ := n.nLiveNodes(); l < 1 { + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 1 { lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") n.declareInSync() return } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err) + lggr.Errorw("Subscription was terminated", "nodeState", n.getCachedState(), "err", err) n.declareUnreachable() return } @@ -379,12 +368,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { defer n.wg.Done() - ctx, cancel := n.stopCh.NewCtx() + ctx, cancel := n.newCtx() defer cancel() { // sanity check - state := n.State() + state := n.getCachedState() switch state { case nodeStateUnreachable: case nodeStateClosed: @@ -397,7 +386,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { unreachableAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) - lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) + lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.getCachedState()) dialRetryBackoff := iutils.NewRedialBackoff() @@ -406,11 +395,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { case <-ctx.Done(): return case <-time.After(dialRetryBackoff.Duration()): - lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.getCachedState()) err := n.rpc.Dial(ctx) if err != nil { - lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State()) + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.getCachedState()) continue } @@ -422,7 +411,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { n.setState(nodeStateUnreachable) continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.getCachedState()) fallthrough default: n.declareState(state) @@ -434,12 +423,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { defer n.wg.Done() - ctx, cancel := n.stopCh.NewCtx() + ctx, cancel := n.newCtx() defer cancel() { // sanity check - state := n.State() + state := n.getCachedState() switch state { case nodeStateInvalidChainID: case nodeStateClosed: @@ -460,7 +449,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { return } - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.getCachedState()) chainIDRecheckBackoff := iutils.NewRedialBackoff() @@ -474,7 +463,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { case nodeStateInvalidChainID: continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.getCachedState()) fallthrough default: n.declareState(state) @@ -486,12 +475,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { defer n.wg.Done() - ctx, cancel := n.stopCh.NewCtx() + ctx, cancel := n.newCtx() defer cancel() { // sanity check - state := n.State() + state := n.getCachedState() switch state { case nodeStateSyncing: case nodeStateClosed: @@ -504,7 +493,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { syncingAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Syncing")) - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.getCachedState()) // Need to redial since syncing nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) if state != nodeStateSyncing { @@ -519,20 +508,20 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { case <-ctx.Done(): return case <-time.After(recheckBackoff.Duration()): - lggr.Tracew("Trying to recheck if the node is still syncing", "nodeState", n.State()) + lggr.Tracew("Trying to recheck if the node is still syncing", "nodeState", n.getCachedState()) isSyncing, err := n.rpc.IsSyncing(ctx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.getCachedState()) n.declareUnreachable() return } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.getCachedState()) continue } - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "nodeState", n.getCachedState()) n.declareAlive() return } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 4bdfd698f7a..863a15a1fad 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -15,7 +15,6 @@ import ( "go.uber.org/zap" "github.com/smartcontractkit/chainlink-common/pkg/logger" - bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" clientMocks "github.com/smartcontractkit/chainlink/v2/common/client/mocks" @@ -49,8 +48,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() expectedError := errors.New("failed to subscribe to rpc") - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once() rpc.On("DisconnectAll").Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -67,6 +66,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc: rpc, lggr: lggr, }) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() defer func() { assert.NoError(t, node.close()) }() sub := mocks.NewSubscription(t) @@ -74,7 +74,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() // disconnects all on transfer to unreachable rpc.On("DisconnectAll").Once() @@ -89,13 +89,14 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - opts.rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + opts.rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() opts.rpc.On("SetAliveLoopSub", sub).Once() return newDialedNode(t, opts) } t.Run("Stays alive and waits for signal", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -111,6 +112,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -152,6 +154,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -189,9 +192,12 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 1, 20, big.NewInt(10) - } + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 20}, ChainInfo{BlockNumber: 20}) pollError := errors.New("failed to get ClientVersion") rpc.On("ClientVersion", mock.Anything).Return("", pollError) node.declareAlive() @@ -213,10 +219,14 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() - node.stateLatestBlockNumber = 20 - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 10, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) - } + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(10, ChainInfo{ + BlockNumber: syncThreshold + mostRecentBlock + 1, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) rpc.On("ClientVersion", mock.Anything).Return("", nil) // tries to redial in outOfSync rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { @@ -246,10 +256,14 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() - node.stateLatestBlockNumber = 20 - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 1, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) - } + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: syncThreshold + mostRecentBlock + 1, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) rpc.On("ClientVersion", mock.Anything).Return("", nil) node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)) @@ -268,19 +282,17 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() - node.stateLatestBlockNumber = 20 - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 1, node.stateLatestBlockNumber + 100, big.NewInt(10) - } + const mostRecentBlock = 20 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) rpc.On("ClientVersion", mock.Anything).Return("", nil) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) assert.Equal(t, nodeStateAlive, node.State()) }) - t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ @@ -307,6 +319,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -317,21 +330,24 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc: rpc, }) defer func() { assert.NoError(t, node.close()) }() - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 1, 20, big.NewInt(10) - } + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) assert.Equal(t, nodeStateAlive, node.State()) }) - t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) close(ch) }).Return(sub, nil).Once() @@ -354,30 +370,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") assert.Equal(t, nodeStateUnreachable, node.State()) }) - t.Run("updates block number and difficulty on new head", func(t *testing.T) { - t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - expectedBlockNumber := rand.Int64() - expectedDiff := big.NewInt(rand.Int64()) - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - go writeHeads(t, ch, head{BlockNumber: expectedBlockNumber, BlockDifficulty: expectedDiff}) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() - node := newDialedNode(t, testNodeOpts{ - config: testNodeConfig{}, - rpc: rpc, - }) - defer func() { assert.NoError(t, node.close()) }() - node.declareAlive() - tests.AssertEventually(t, func() bool { - state, block, diff := node.StateAndLatest() - return state == nodeStateAlive && block == expectedBlockNumber == bigmath.Equal(diff, expectedDiff) - }) - }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) @@ -387,7 +379,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { const blockNumber = 1000 const finalityDepth = 10 const expectedBlock = 990 - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) go writeHeads(t, ch, head{BlockNumber: blockNumber - 1}, head{BlockNumber: blockNumber}, head{BlockNumber: blockNumber - 1}) }).Return(sub, nil).Once() @@ -413,11 +406,12 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("Logs warning if failed to get finalized block", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -440,10 +434,11 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { head := newMockHead(t) head.On("IsValid").Return(false) rpc.On("LatestFinalizedBlock", mock.Anything).Return(head, nil) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -470,12 +465,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting // the metric go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) }).Return(sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() rpc.On("SetAliveLoopSub", sub).Once() name := "node-" + rand.Str(5) node := newDialedNode(t, testNodeOpts{ @@ -531,8 +527,9 @@ func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) { aliveSubscription := mocks.NewSubscription(t) aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() aliveSubscription.On("Unsubscribe").Maybe() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(aliveSubscription, nil).Maybe() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(aliveSubscription, nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Maybe() } func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { @@ -577,7 +574,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) go writeHeads(t, ch, heads...) }).Return(outOfSyncSubscription, nil).Once() @@ -701,7 +698,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() expectedError := errors.New("failed to subscribe") - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError) + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError) rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { @@ -728,7 +725,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") @@ -754,7 +751,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) close(ch) }).Return(sub, nil).Once() @@ -785,10 +782,11 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() const highestBlock = 1000 - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { ch := args.Get(1).(chan<- Head) go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}) }).Return(outOfSyncSubscription, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: highestBlock}, ChainInfo{BlockNumber: highestBlock}) setupRPCForAliveLoop(t, rpc) @@ -815,9 +813,13 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return 0, 100, big.NewInt(200) - } + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(0, ChainInfo{ + BlockNumber: 100, + TotalDifficulty: big.NewInt(200), + }) + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 0}) rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -825,7 +827,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(outOfSyncSubscription, nil).Once() + rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(outOfSyncSubscription, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -1304,9 +1306,8 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { }) t.Run("skip if syncThreshold is not configured", func(t *testing.T) { node := newTestNode(t, testNodeOpts{}) - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return - } + poolInfo := newMockPoolChainInfoProvider(t) + node.SetPoolChainInfoProvider(poolInfo) outOfSync, liveNodes := node.syncStatus(0, nil) assert.Equal(t, false, outOfSync) assert.Equal(t, 0, liveNodes) @@ -1315,9 +1316,9 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { node := newTestNode(t, testNodeOpts{ config: testNodeConfig{syncThreshold: 1}, }) - node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { - return - } + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{}).Once() + node.SetPoolChainInfoProvider(poolInfo) assert.Panics(t, func() { _, _ = node.syncStatus(0, nil) }) @@ -1361,9 +1362,12 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { selectionMode: selectionMode, }, }) - node.nLiveNodes = func() (int, int64, *big.Int) { - return nodesNum, highestBlock, big.NewInt(totalDifficulty) - } + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(nodesNum, ChainInfo{ + BlockNumber: highestBlock, + TotalDifficulty: big.NewInt(totalDifficulty), + }) + node.SetPoolChainInfoProvider(poolInfo) for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { @@ -1413,9 +1417,13 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { selectionMode: NodeSelectionModeTotalDifficulty, }, }) - node.nLiveNodes = func() (int, int64, *big.Int) { - return nodesNum, highestBlock, big.NewInt(totalDifficulty) - } + + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(nodesNum, ChainInfo{ + BlockNumber: highestBlock, + TotalDifficulty: big.NewInt(totalDifficulty), + }) + node.SetPoolChainInfoProvider(poolInfo) for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { @@ -1573,3 +1581,98 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) }) } + +func TestNode_State(t *testing.T) { + t.Run("If not Alive, returns as is", func(t *testing.T) { + for state := nodeState(0); state < nodeStateLen; state++ { + if state == nodeStateAlive { + continue + } + + node := newTestNode(t, testNodeOpts{}) + node.setState(state) + assert.Equal(t, state, node.State()) + } + }) + t.Run("If repeatable read is not enforced, returns alive", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateAlive) + assert.Equal(t, nodeStateAlive, node.State()) + }) + testCases := []struct { + Name string + FinalizedBlockOffsetVal uint32 + IsFinalityTagEnabled bool + PoolChainInfo ChainInfo + NodeChainInfo ChainInfo + ExpectedState nodeState + }{ + { + Name: "If finality lag does not exceeds offset, returns alive (FinalityDepth)", + FinalizedBlockOffsetVal: 15, + PoolChainInfo: ChainInfo{ + BlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + BlockNumber: 5, + }, + ExpectedState: nodeStateAlive, + }, + { + Name: "If finality lag does not exceeds offset, returns alive (FinalityTag)", + FinalizedBlockOffsetVal: 15, + IsFinalityTagEnabled: true, + PoolChainInfo: ChainInfo{ + FinalizedBlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + FinalizedBlockNumber: 5, + }, + ExpectedState: nodeStateAlive, + }, + { + Name: "If finality lag exceeds offset, returns nodeStateFinalizedBlockOutOfSync (FinalityDepth)", + FinalizedBlockOffsetVal: 15, + PoolChainInfo: ChainInfo{ + BlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + BlockNumber: 4, + }, + ExpectedState: nodeStateFinalizedBlockOutOfSync, + }, + { + Name: "If finality lag exceeds offset, returns nodeStateFinalizedBlockOutOfSync (FinalityTag)", + FinalizedBlockOffsetVal: 15, + IsFinalityTagEnabled: true, + PoolChainInfo: ChainInfo{ + FinalizedBlockNumber: 20, + }, + NodeChainInfo: ChainInfo{ + FinalizedBlockNumber: 4, + }, + ExpectedState: nodeStateFinalizedBlockOutOfSync, + }, + } + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(tc.NodeChainInfo, tc.PoolChainInfo).Once() + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + enforceRepeatableRead: true, + }, + chainConfig: clientMocks.ChainConfig{ + FinalizedBlockOffsetVal: tc.FinalizedBlockOffsetVal, + IsFinalityTagEnabled: tc.IsFinalityTagEnabled, + }, + rpc: rpc, + }) + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("HighestUserObservations").Return(tc.PoolChainInfo).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.setState(nodeStateAlive) + assert.Equal(t, tc.ExpectedState, node.State()) + }) + } +} diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index 99a130004a9..25a931fc01b 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -24,7 +24,8 @@ func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HE var highestHeadNumber int64 = math.MinInt64 var highestHeadNodes []Node[CHAIN_ID, HEAD, RPC] for _, n := range s { - state, currentHeadNumber, _ := n.StateAndLatest() + state, currentChainInfo := n.StateAndLatest() + currentHeadNumber := currentChainInfo.BlockNumber if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber { if highestHeadNumber < currentHeadNumber { highestHeadNumber = currentHeadNumber diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index 6e47bbedcae..e245924589c 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -24,13 +24,13 @@ func TestHighestHeadNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) } else if i == 1 { // second node is alive, LatestReceivedBlockNumber = 1 - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) } else { // third node is alive, LatestReceivedBlockNumber = 2 (best node) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) } node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -42,7 +42,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -53,7 +53,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -63,10 +63,10 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) node1.On("Order").Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(-1)}) node2.On("Order").Return(int32(1)) selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2}) assert.Same(t, node1, selector.Select()) @@ -83,10 +83,10 @@ func TestHighestHeadNodeSelector_None(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: int64(-1)}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: int64(-1)}) } nodes = append(nodes, node) } @@ -104,7 +104,7 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("same head and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) } @@ -115,15 +115,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("same head but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node3.On("Order").Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -134,15 +134,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(1)}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(2)}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(3)}) node3.On("Order").Return(int32(3)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -153,19 +153,19 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) node2.On("Order").Maybe().Return(int32(4)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(11)}) node3.On("Order").Maybe().Return(int32(3)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: int64(10)}) node4.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go index 35491503bcc..6b45e75528b 100644 --- a/common/client/node_selector_total_difficulty.go +++ b/common/client/node_selector_total_difficulty.go @@ -27,11 +27,12 @@ func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID var aliveNodes []Node[CHAIN_ID, HEAD, RPC] for _, n := range s { - state, _, currentTD := n.StateAndLatest() + state, currentChainInfo := n.StateAndLatest() if state != nodeStateAlive { continue } + currentTD := currentChainInfo.TotalDifficulty aliveNodes = append(aliveNodes, n) if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) { if highestTD == nil || currentTD.Cmp(highestTD) > 0 { diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go index 5c43cdd8472..0bc214918d7 100644 --- a/common/client/node_selector_total_difficulty_test.go +++ b/common/client/node_selector_total_difficulty_test.go @@ -1,7 +1,7 @@ package client import ( - big "math/big" + "math/big" "testing" "github.com/smartcontractkit/chainlink/v2/common/types" @@ -24,13 +24,13 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else if i == 1 { // second node is alive - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(7)) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(7)}) } else { // third node is alive and best - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2, TotalDifficulty: big.NewInt(8)}) } node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -42,7 +42,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2, TotalDifficulty: big.NewInt(8)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -53,7 +53,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(11)) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(11)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -63,10 +63,10 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) node1.On("Order").Maybe().Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) node2.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2} @@ -85,10 +85,10 @@ func TestTotalDifficultyNodeSelector_None(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1, TotalDifficulty: nil}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), big.NewInt(7)) + node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(7)}) } nodes = append(nodes, node) } @@ -106,7 +106,7 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("same td and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) } @@ -117,15 +117,15 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("same td but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3, TotalDifficulty: big.NewInt(10)}) node3.On("Order").Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -136,15 +136,15 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different td but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(10)}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(11)) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(11)}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(12)) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(12)}) node3.On("Order").Return(int32(3)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -155,19 +155,19 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(100)) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(100)}) node1.On("Order").Maybe().Return(int32(4)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) node2.On("Order").Maybe().Return(int32(5)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(110)}) node3.On("Order").Maybe().Return(int32(1)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(105)) + node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1, TotalDifficulty: big.NewInt(105)}) node4.On("Order").Maybe().Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} diff --git a/common/client/node_test.go b/common/client/node_test.go index a97f26555a9..3b971e84902 100644 --- a/common/client/node_test.go +++ b/common/client/node_test.go @@ -17,7 +17,9 @@ type testNodeConfig struct { selectionMode string syncThreshold uint32 nodeIsSyncingEnabled bool + enforceRepeatableRead bool finalizedBlockPollInterval time.Duration + deathDeclarationDelay time.Duration } func (n testNodeConfig) PollFailureThreshold() uint32 { @@ -44,6 +46,14 @@ func (n testNodeConfig) FinalizedBlockPollInterval() time.Duration { return n.finalizedBlockPollInterval } +func (n testNodeConfig) EnforceRepeatableRead() bool { + return n.enforceRepeatableRead +} + +func (n testNodeConfig) DeathDeclarationDelay() time.Duration { + return n.deathDeclarationDelay +} + type testNode struct { *node[types.ID, Head, NodeClient[types.ID, Head]] } diff --git a/common/client/types.go b/common/client/types.go index a27e6a50b73..3d548b9deba 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -66,6 +66,7 @@ type NodeClient[ connection[CHAIN_ID, HEAD] DialHTTP() error + // DisconnectAll - cancels all inflight requests, terminates all subscriptions and resets latest ChainInfo. DisconnectAll() Close() ClientVersion(context.Context) (string, error) @@ -74,6 +75,17 @@ type NodeClient[ UnsubscribeAllExceptAliveLoop() IsSyncing(ctx context.Context) (bool, error) LatestFinalizedBlock(ctx context.Context) (HEAD, error) + // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. + // latest ChainInfo is the most recent value received within a NodeClient's current lifecycle between Dial and DisconnectAll. + // highestUserObservations ChainInfo is the highest ChainInfo observed excluding health checks calls. + // Its values must not be reset. + // The results of corresponding calls, to get the most recent head and the latest finalized head, must be + // intercepted and reflected in ChainInfo before being returned to a caller. Otherwise, MultiNode is not able to + // provide repeatable read guarantee. + // DisconnectAll must reset latest ChainInfo to default value. + // Ensure implementation does not have a race condition when values are reset before request completion and as + // a result latest ChainInfo contains information from the previous cycle. + GetInterceptedChainInfo() (latest, highestUserObservations ChainInfo) } // clientAPI includes all the direct RPC methods required by the generalized common client to implement its own. @@ -145,5 +157,41 @@ type connection[ ] interface { ChainID(ctx context.Context) (CHAIN_ID, error) Dial(ctx context.Context) error - Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) + SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) +} + +// PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo +// +//go:generate mockery --quiet --name PoolChainInfoProvider --structname mockPoolChainInfoProvider --filename "mock_pool_chain_info_provider_test.go" --inpackage --case=underscore +type PoolChainInfoProvider interface { + // LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being + // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. + // Returns highest latest ChainInfo within the alive nodes. E.g. most recent block number and highest block number + // observed by Node A are 10 and 15; Node B - 12 and 14. This method will return 12. + LatestChainInfo() (int, ChainInfo) + // HighestUserObservations - returns highest ChainInfo ever observed by any user of MultiNode. + HighestUserObservations() ChainInfo +} + +// ChainInfo - defines RPC's or MultiNode's view on the chain +type ChainInfo struct { + BlockNumber int64 + FinalizedBlockNumber int64 + TotalDifficulty *big.Int +} + +func MaxTotalDifficulty(a, b *big.Int) *big.Int { + if a == nil { + if b == nil { + return nil + } + + return big.NewInt(0).Set(b) + } + + if b == nil || a.Cmp(b) >= 0 { + return big.NewInt(0).Set(a) + } + + return big.NewInt(0).Set(b) } diff --git a/common/client/types_test.go b/common/client/types_test.go new file mode 100644 index 00000000000..68d7a3fe78e --- /dev/null +++ b/common/client/types_test.go @@ -0,0 +1,34 @@ +package client + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaxDifficulty(t *testing.T) { + cases := []struct { + A, B, Result *big.Int + }{ + { + A: nil, B: nil, Result: nil, + }, + { + A: nil, B: big.NewInt(1), Result: big.NewInt(1), + }, + { + A: big.NewInt(1), B: big.NewInt(1), Result: big.NewInt(1), + }, + { + A: big.NewInt(1), B: big.NewInt(2), Result: big.NewInt(2), + }, + } + + for _, test := range cases { + actualResult := MaxTotalDifficulty(test.A, test.B) + assert.Equal(t, test.Result, actualResult, "expected max(%v, %v) to produce %v", test.A, test.B, test.Result) + inverted := MaxTotalDifficulty(test.B, test.A) + assert.Equal(t, actualResult, inverted, "expected max(%v, %v) == max(%v, %v)", test.A, test.B, test.B, test.A) + } +} diff --git a/common/headtracker/head_listener.go b/common/headtracker/head_listener.go index 15977c4dfe4..25715b35280 100644 --- a/common/headtracker/head_listener.go +++ b/common/headtracker/head_listener.go @@ -36,7 +36,7 @@ type headHandler[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] func(ctx c type HeadListener[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] interface { // ListenForNewHeads kicks off the listen loop (not thread safe) // done() must be executed upon leaving ListenForNewHeads() - ListenForNewHeads(handleNewHead headHandler[H, BLOCK_HASH], done func()) + ListenForNewHeads(onSubscribe func(), handleNewHead headHandler[H, BLOCK_HASH], done func()) // ReceivingHeads returns true if the listener is receiving heads (thread safe) ReceivingHeads() bool @@ -88,7 +88,7 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) Name() string { return hl.logger.Name() } -func (hl *headListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(handleNewHead headHandler[HTH, BLOCK_HASH], done func()) { +func (hl *headListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(onSubscription func(), handleNewHead headHandler[HTH, BLOCK_HASH], done func()) { defer done() defer hl.unsubscribe() @@ -99,6 +99,8 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(handleNewHead if !hl.subscribe(ctx) { break } + + onSubscription() err := hl.receiveHeaders(ctx, handleNewHead) if ctx.Err() != nil { break diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go index 48c4859a64c..bc5f5274c0c 100644 --- a/common/headtracker/head_tracker.go +++ b/common/headtracker/head_tracker.go @@ -40,8 +40,11 @@ const HeadsBufferSize = 10 type HeadTracker[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] interface { services.Service // Backfill given a head will fill in any missing heads up to latestFinalized - Backfill(ctx context.Context, headWithChain, latestFinalized H) (err error) + Backfill(ctx context.Context, headWithChain H) (err error) LatestChain() H + // LatestAndFinalizedBlock - returns latest and latest finalized blocks. + // NOTE: Returns latest finalized block as is, ignoring the FinalityTagBypass feature flag. + LatestAndFinalizedBlock(ctx context.Context) (latest, finalized H, err error) } type headTracker[ @@ -114,16 +117,15 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Start(ctx context.Context) error // anyway when we connect (but we should not rely on this because it is // not specced). If it happens this is fine, and the head will be // ignored as a duplicate. - err := ht.handleInitialHead(ctx) - if err != nil { - if ctx.Err() != nil { - return ctx.Err() + onSubscribe := func() { + err := ht.handleInitialHead(ctx) + if err != nil { + ht.log.Errorw("Error handling initial head", "err", err.Error()) } - ht.log.Errorw("Error handling initial head", "err", err.Error()) } ht.wgDone.Add(3) - go ht.headListener.ListenForNewHeads(ht.handleNewHead, ht.wgDone.Done) + go ht.headListener.ListenForNewHeads(onSubscribe, ht.handleNewHead, ht.wgDone.Done) go ht.backfillLoop() go ht.broadcastLoop() @@ -145,7 +147,7 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) handleInitialHead(ctx context.Con } ht.log.Debugw("Got initial head", "head", initialHead, "blockNumber", initialHead.BlockNumber(), "blockHash", initialHead.BlockHash()) - latestFinalized, err := ht.calculateLatestFinalized(ctx, initialHead) + latestFinalized, err := ht.calculateLatestFinalized(ctx, initialHead, ht.htConfig.FinalityTagBypass()) if err != nil { return fmt.Errorf("failed to calculate latest finalized head: %w", err) } @@ -195,7 +197,12 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) HealthReport() map[string]error { return report } -func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain, latestFinalized HTH) (err error) { +func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain HTH) (err error) { + latestFinalized, err := ht.calculateLatestFinalized(ctx, headWithChain, ht.htConfig.FinalityTagBypass()) + if err != nil { + return fmt.Errorf("failed to calculate finalized block: %w", err) + } + if !latestFinalized.IsValid() { return errors.New("can not perform backfill without a valid latestFinalized head") } @@ -208,6 +215,11 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) Backfill(ctx context.Context, hea return errors.New(errMsg) } + if headWithChain.BlockNumber()-latestFinalized.BlockNumber() > int64(ht.htConfig.MaxAllowedFinalityDepth()) { + return fmt.Errorf("gap between latest finalized block (%d) and current head (%d) is too large (> %d)", + latestFinalized.BlockNumber(), headWithChain.BlockNumber(), ht.htConfig.MaxAllowedFinalityDepth()) + } + return ht.backfill(ctx, headWithChain, latestFinalized) } @@ -317,13 +329,7 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) backfillLoop() { break } { - latestFinalized, err := ht.calculateLatestFinalized(ctx, head) - if err != nil { - ht.log.Warnw("Failed to calculate finalized block", "err", err) - continue - } - - err = ht.Backfill(ctx, head, latestFinalized) + err := ht.Backfill(ctx, head) if err != nil { ht.log.Warnw("Unexpected error while backfilling heads", "err", err) } else if ctx.Err() != nil { @@ -335,12 +341,58 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) backfillLoop() { } } +// LatestAndFinalizedBlock - returns latest and latest finalized blocks. +// NOTE: Returns latest finalized block as is, ignoring the FinalityTagBypass feature flag. +// TODO: BCI-3321 use cached values instead of making RPC requests +func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) LatestAndFinalizedBlock(ctx context.Context) (latest, finalized HTH, err error) { + latest, err = ht.client.HeadByNumber(ctx, nil) + if err != nil { + err = fmt.Errorf("failed to get latest block: %w", err) + return + } + + if !latest.IsValid() { + err = fmt.Errorf("expected latest block to be valid") + return + } + + finalized, err = ht.calculateLatestFinalized(ctx, latest, false) + if err != nil { + err = fmt.Errorf("failed to calculate latest finalized block: %w", err) + return + } + if !finalized.IsValid() { + err = fmt.Errorf("expected finalized block to be valid") + return + } + + return +} + +func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) getHeadAtHeight(ctx context.Context, chainHeadHash BLOCK_HASH, blockHeight int64) (HTH, error) { + chainHead := ht.headSaver.Chain(chainHeadHash) + if chainHead.IsValid() { + // check if provided chain contains a block of specified height + headAtHeight, err := chainHead.HeadAtHeight(blockHeight) + if err == nil { + // we are forced to reload the block due to type mismatched caused by generics + hthAtHeight := ht.headSaver.Chain(headAtHeight.BlockHash()) + // ensure that the block was not removed from the chain by another goroutine + if hthAtHeight.IsValid() { + return hthAtHeight, nil + } + } + } + + return ht.client.HeadByNumber(ctx, big.NewInt(blockHeight)) +} + // calculateLatestFinalized - returns latest finalized block. It's expected that currentHeadNumber - is the head of // canonical chain. There is no guaranties that returned block belongs to the canonical chain. Additional verification // must be performed before usage. -func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) calculateLatestFinalized(ctx context.Context, currentHead HTH) (latestFinalized HTH, err error) { - if ht.config.FinalityTagEnabled() && !ht.htConfig.FinalityTagBypass() { - latestFinalized, err = ht.client.LatestFinalizedBlock(ctx) +func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) calculateLatestFinalized(ctx context.Context, currentHead HTH, finalityTagBypass bool) (HTH, error) { + if ht.config.FinalityTagEnabled() && !finalityTagBypass { + latestFinalized, err := ht.client.LatestFinalizedBlock(ctx) if err != nil { return latestFinalized, fmt.Errorf("failed to get latest finalized block: %w", err) } @@ -349,22 +401,22 @@ func (ht *headTracker[HTH, S, ID, BLOCK_HASH]) calculateLatestFinalized(ctx cont return latestFinalized, fmt.Errorf("failed to get valid latest finalized block") } - if currentHead.BlockNumber()-latestFinalized.BlockNumber() > int64(ht.htConfig.MaxAllowedFinalityDepth()) { - return latestFinalized, fmt.Errorf("gap between latest finalized block (%d) and current head (%d) is too large (> %d)", - latestFinalized.BlockNumber(), currentHead.BlockNumber(), ht.htConfig.MaxAllowedFinalityDepth()) + if ht.config.FinalizedBlockOffset() == 0 { + return latestFinalized, nil } - return latestFinalized, nil + finalizedBlockNumber := max(latestFinalized.BlockNumber()-int64(ht.config.FinalizedBlockOffset()), 0) + return ht.getHeadAtHeight(ctx, latestFinalized.BlockHash(), finalizedBlockNumber) } // no need to make an additional RPC call on chains with instant finality - if ht.config.FinalityDepth() == 0 { + if ht.config.FinalityDepth() == 0 && ht.config.FinalizedBlockOffset() == 0 { return currentHead, nil } - finalizedBlockNumber := currentHead.BlockNumber() - int64(ht.config.FinalityDepth()) + finalizedBlockNumber := currentHead.BlockNumber() - int64(ht.config.FinalityDepth()) - int64(ht.config.FinalizedBlockOffset()) if finalizedBlockNumber <= 0 { finalizedBlockNumber = 0 } - return ht.client.HeadByNumber(ctx, big.NewInt(finalizedBlockNumber)) + return ht.getHeadAtHeight(ctx, currentHead.BlockHash(), finalizedBlockNumber) } // backfill fetches all missing heads up until the latestFinalizedHead diff --git a/common/headtracker/mocks/head_tracker.go b/common/headtracker/mocks/head_tracker.go index 0e30b44154b..b02c605f535 100644 --- a/common/headtracker/mocks/head_tracker.go +++ b/common/headtracker/mocks/head_tracker.go @@ -15,17 +15,17 @@ type HeadTracker[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct { mock.Mock } -// Backfill provides a mock function with given fields: ctx, headWithChain, latestFinalized -func (_m *HeadTracker[H, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain H, latestFinalized H) error { - ret := _m.Called(ctx, headWithChain, latestFinalized) +// Backfill provides a mock function with given fields: ctx, headWithChain +func (_m *HeadTracker[H, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain H) error { + ret := _m.Called(ctx, headWithChain) if len(ret) == 0 { panic("no return value specified for Backfill") } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, H, H) error); ok { - r0 = rf(ctx, headWithChain, latestFinalized) + if rf, ok := ret.Get(0).(func(context.Context, H) error); ok { + r0 = rf(ctx, headWithChain) } else { r0 = ret.Error(0) } @@ -71,6 +71,41 @@ func (_m *HeadTracker[H, BLOCK_HASH]) HealthReport() map[string]error { return r0 } +// LatestAndFinalizedBlock provides a mock function with given fields: ctx +func (_m *HeadTracker[H, BLOCK_HASH]) LatestAndFinalizedBlock(ctx context.Context) (H, H, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LatestAndFinalizedBlock") + } + + var r0 H + var r1 H + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (H, H, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) H); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(H) + } + + if rf, ok := ret.Get(1).(func(context.Context) H); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(H) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // LatestChain provides a mock function with given fields: func (_m *HeadTracker[H, BLOCK_HASH]) LatestChain() H { ret := _m.Called() diff --git a/common/headtracker/types/config.go b/common/headtracker/types/config.go index e0eb422672d..06ad93d39d2 100644 --- a/common/headtracker/types/config.go +++ b/common/headtracker/types/config.go @@ -6,6 +6,7 @@ type Config interface { BlockEmissionIdleWarningThreshold() time.Duration FinalityDepth() uint32 FinalityTagEnabled() bool + FinalizedBlockOffset() uint32 } type HeadTrackerConfig interface { diff --git a/common/types/head.go b/common/types/head.go index 9d927d4f5e4..77a2a7a0dcf 100644 --- a/common/types/head.go +++ b/common/types/head.go @@ -33,6 +33,9 @@ type Head[BLOCK_HASH Hashable] interface { // If not in chain, returns the zero hash HashAtHeight(blockNum int64) BLOCK_HASH + // HeadAtHeight returns head at specified height or an error, if one does not exist in provided chain. + HeadAtHeight(blockNum int64) (Head[BLOCK_HASH], error) + // Returns the total difficulty of the block. For chains who do not have a concept of block // difficulty, return 0. BlockDifficulty() *big.Int diff --git a/common/types/mocks/head.go b/common/types/mocks/head.go index 1234fd38935..102368199ae 100644 --- a/common/types/mocks/head.go +++ b/common/types/mocks/head.go @@ -184,6 +184,36 @@ func (_m *Head[BLOCK_HASH]) HashAtHeight(blockNum int64) BLOCK_HASH { return r0 } +// HeadAtHeight provides a mock function with given fields: blockNum +func (_m *Head[BLOCK_HASH]) HeadAtHeight(blockNum int64) (types.Head[BLOCK_HASH], error) { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for HeadAtHeight") + } + + var r0 types.Head[BLOCK_HASH] + var r1 error + if rf, ok := ret.Get(0).(func(int64) (types.Head[BLOCK_HASH], error)); ok { + return rf(blockNum) + } + if rf, ok := ret.Get(0).(func(int64) types.Head[BLOCK_HASH]); ok { + r0 = rf(blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // IsValid provides a mock function with given fields: func (_m *Head[BLOCK_HASH]) IsValid() bool { ret := _m.Called() diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 8d1dcb6cc8c..f18c900b038 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -60,6 +60,9 @@ type Client interface { HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) + // LatestFinalizedBlock - returns the latest finalized block as it's returned from an RPC. + // CAUTION: Using this method might cause local finality violations. It's highly recommended + // to use HeadTracker to get latest finalized block. LatestFinalizedBlock(ctx context.Context) (head *evmtypes.Head, err error) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) @@ -132,6 +135,7 @@ func NewChainClient( chainID *big.Int, chainType chaintype.ChainType, clientErrors evmconfig.ClientErrors, + deathDeclarationDelay time.Duration, ) Client { multiNode := commonclient.NewMultiNode( lggr, @@ -146,6 +150,7 @@ func NewChainClient( return ClassifySendError(err, clientErrors, logger.Sugared(logger.Nop()), tx, common.Address{}, chainType.IsL2()) }, 0, // use the default value provided by the implementation + deathDeclarationDelay, ) return &chainClient{ multiNode: multiNode, @@ -306,12 +311,7 @@ func (c *chainClient) SubscribeFilterLogs(ctx context.Context, q ethereum.Filter } func (c *chainClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { - csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) - err := csf.start(c.multiNode.Subscribe(ctx, csf.srcCh, "newHeads")) - if err != nil { - return nil, err - } - return csf, nil + return c.multiNode.SubscribeNewHead(ctx, ch) } func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) { diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index f18ec539677..33955c16451 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -751,7 +751,7 @@ func newMockRpc(t *testing.T) *mocks.RPCClient { mockRpc.On("Close").Return(nil).Once() mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes - mockRpc.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() + mockRpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() return mockRpc } @@ -777,6 +777,7 @@ func TestChainClient_BatchCallContext(t *testing.T) { } mockRpc := newMockRpc(t) + mockRpc.On("GetInterceptedChainInfo").Return(commonclient.ChainInfo{}, commonclient.ChainInfo{}).Maybe() mockRpc.On("BatchCallContext", mock.Anything, b).Run(func(args mock.Arguments) { reqs := args.Get(1).([]rpc.BatchElem) for i := 0; i < len(reqs); i++ { diff --git a/core/chains/evm/client/chain_id_sub_test.go b/core/chains/evm/client/chain_id_sub_test.go deleted file mode 100644 index f959376acca..00000000000 --- a/core/chains/evm/client/chain_id_sub_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -import ( - "errors" - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" - ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" -) - -func TestChainIDSubForwarder(t *testing.T) { - t.Parallel() - - chainID := big.NewInt(123) - - t.Run("unsubscribe forwarder", func(t *testing.T) { - t.Parallel() - - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) - sub := NewMockSubscription() - err := forwarder.start(sub, nil) - assert.NoError(t, err) - forwarder.Unsubscribe() - - assert.True(t, sub.unsubscribed) - _, ok := <-sub.Err() - assert.False(t, ok) - _, ok = <-forwarder.Err() - assert.False(t, ok) - }) - - t.Run("unsubscribe forwarder with error", func(t *testing.T) { - t.Parallel() - - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) - sub := NewMockSubscription() - err := forwarder.start(sub, nil) - assert.NoError(t, err) - sub.Errors <- errors.New("boo") - forwarder.Unsubscribe() - - assert.True(t, sub.unsubscribed) - _, ok := <-sub.Err() - assert.False(t, ok) - _, ok = <-forwarder.Err() - assert.False(t, ok) - }) - - t.Run("unsubscribe forwarder with message", func(t *testing.T) { - t.Parallel() - - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) - sub := NewMockSubscription() - err := forwarder.start(sub, nil) - assert.NoError(t, err) - forwarder.srcCh <- &evmtypes.Head{} - forwarder.Unsubscribe() - - assert.True(t, sub.unsubscribed) - _, ok := <-sub.Err() - assert.False(t, ok) - _, ok = <-forwarder.Err() - assert.False(t, ok) - }) - - t.Run("non nil error parameter", func(t *testing.T) { - t.Parallel() - - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) - sub := NewMockSubscription() - errIn := errors.New("foo") - errOut := forwarder.start(sub, errIn) - assert.Equal(t, errIn, errOut) - }) - - t.Run("forwarding", func(t *testing.T) { - t.Parallel() - - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) - sub := NewMockSubscription() - err := forwarder.start(sub, nil) - assert.NoError(t, err) - - head := &evmtypes.Head{ - ID: 1, - } - forwarder.srcCh <- head - receivedHead := <-ch - assert.Equal(t, head, receivedHead) - assert.Equal(t, ubig.New(chainID), receivedHead.EVMChainID) - - expectedErr := errors.New("error") - sub.Errors <- expectedErr - receivedErr := <-forwarder.Err() - assert.Equal(t, expectedErr, receivedErr) - }) -} diff --git a/core/chains/evm/client/config_builder.go b/core/chains/evm/client/config_builder.go index ae41d40dd33..19e0f14fd67 100644 --- a/core/chains/evm/client/config_builder.go +++ b/core/chains/evm/client/config_builder.go @@ -38,6 +38,9 @@ func NewClientConfigs( noNewHeadsThreshold time.Duration, finalityDepth *uint32, finalityTagEnabled *bool, + finalizedBlockOffset *uint32, + enforceRepeatableRead *bool, + deathDeclarationDelay time.Duration, ) (commonclient.ChainConfig, evmconfig.NodePool, []*toml.Node, error) { nodes, err := parseNodeConfigs(nodeCfgs) @@ -45,21 +48,24 @@ func NewClientConfigs( return nil, nil, nil, err } nodePool := toml.NodePool{ - SelectionMode: selectionMode, - LeaseDuration: commonconfig.MustNewDuration(leaseDuration), - PollFailureThreshold: pollFailureThreshold, - PollInterval: commonconfig.MustNewDuration(pollInterval), - SyncThreshold: syncThreshold, - NodeIsSyncingEnabled: nodeIsSyncingEnabled, + SelectionMode: selectionMode, + LeaseDuration: commonconfig.MustNewDuration(leaseDuration), + PollFailureThreshold: pollFailureThreshold, + PollInterval: commonconfig.MustNewDuration(pollInterval), + SyncThreshold: syncThreshold, + NodeIsSyncingEnabled: nodeIsSyncingEnabled, + EnforceRepeatableRead: enforceRepeatableRead, + DeathDeclarationDelay: commonconfig.MustNewDuration(deathDeclarationDelay), } nodePoolCfg := &evmconfig.NodePoolConfig{C: nodePool} chainConfig := &evmconfig.EVMConfig{ C: &toml.EVMConfig{ Chain: toml.Chain{ - ChainType: chaintype.NewChainTypeConfig(chainType), - FinalityDepth: finalityDepth, - FinalityTagEnabled: finalityTagEnabled, - NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), + ChainType: chaintype.NewChainTypeConfig(chainType), + FinalityDepth: finalityDepth, + FinalityTagEnabled: finalityTagEnabled, + NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), + FinalizedBlockOffset: finalizedBlockOffset, }, }, } diff --git a/core/chains/evm/client/config_builder_test.go b/core/chains/evm/client/config_builder_test.go index 0e24161b27b..7c08bf18c1d 100644 --- a/core/chains/evm/client/config_builder_test.go +++ b/core/chains/evm/client/config_builder_test.go @@ -23,6 +23,9 @@ func TestClientConfigBuilder(t *testing.T) { syncThreshold := ptr(uint32(5)) nodeIsSyncingEnabled := ptr(false) chainTypeStr := "" + finalizedBlockOffset := ptr[uint32](16) + enforceRepeatableRead := ptr(true) + deathDeclarationDelay := time.Second * 3 nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -34,7 +37,8 @@ func TestClientConfigBuilder(t *testing.T) { finalityTagEnabled := ptr(true) noNewHeadsThreshold := time.Second chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, - pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, finalityTagEnabled) + pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) require.NoError(t, err) // Validate node pool configs @@ -44,6 +48,8 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, pollInterval, nodePool.PollInterval()) require.Equal(t, *syncThreshold, nodePool.SyncThreshold()) require.Equal(t, *nodeIsSyncingEnabled, nodePool.NodeIsSyncingEnabled()) + require.Equal(t, *enforceRepeatableRead, nodePool.EnforceRepeatableRead()) + require.Equal(t, deathDeclarationDelay, nodePool.DeathDeclarationDelay()) // Validate node configs require.Equal(t, *nodeConfigs[0].Name, *nodes[0].Name) @@ -54,6 +60,7 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, noNewHeadsThreshold, chainCfg.NodeNoNewHeadsThreshold()) require.Equal(t, *finalityDepth, chainCfg.FinalityDepth()) require.Equal(t, *finalityTagEnabled, chainCfg.FinalityTagEnabled()) + require.Equal(t, *finalizedBlockOffset, chainCfg.FinalizedBlockOffset()) // let combiler tell us, when we do not have sufficient data to create evm client _ = client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), big.NewInt(10), nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 4d309440590..fd7fa5868a4 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -35,5 +35,5 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli } return NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), chainCfg.NodeNoNewHeadsThreshold(), - primaries, sendonlys, chainID, chainType, clientErrors) + primaries, sendonlys, chainID, chainType, clientErrors, cfg.DeathDeclarationDelay()) } diff --git a/core/chains/evm/client/evm_client_test.go b/core/chains/evm/client/evm_client_test.go index 29113d4c3c9..9ad25f96025 100644 --- a/core/chains/evm/client/evm_client_test.go +++ b/core/chains/evm/client/evm_client_test.go @@ -24,6 +24,9 @@ func TestNewEvmClient(t *testing.T) { syncThreshold := ptr(uint32(5)) nodeIsSyncingEnabled := ptr(false) chainTypeStr := "" + finalizedBlockOffset := ptr[uint32](16) + enforceRepeatableRead := ptr(true) + deathDeclarationDelay := time.Second * 3 nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -34,7 +37,8 @@ func TestNewEvmClient(t *testing.T) { finalityDepth := ptr(uint32(10)) finalityTagEnabled := ptr(true) chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, - pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, finalityTagEnabled) + pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) require.NoError(t, err) client := client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), testutils.FixtureChainID, nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 391d580c1f6..e1017a5564f 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -87,6 +87,8 @@ type TestNodePoolConfig struct { NodeIsSyncingEnabledVal bool NodeFinalizedBlockPollInterval time.Duration NodeErrors config.ClientErrors + EnforceRepeatableReadVal bool + NodeDeathDeclarationDelay time.Duration } func (tc TestNodePoolConfig) PollFailureThreshold() uint32 { return tc.NodePollFailureThreshold } @@ -109,6 +111,14 @@ func (tc TestNodePoolConfig) Errors() config.ClientErrors { return tc.NodeErrors } +func (tc TestNodePoolConfig) EnforceRepeatableRead() bool { + return tc.EnforceRepeatableReadVal +} + +func (tc TestNodePoolConfig) DeathDeclarationDelay() time.Duration { + return tc.NodeDeathDeclarationDelay +} + func NewChainClientWithTestNode( t *testing.T, nodeCfg commonclient.NodeConfig, @@ -150,7 +160,7 @@ func NewChainClientWithTestNode( var chainType chaintype.ChainType clientErrors := NewTestClientErrors() - c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, noNewHeadsThreshold, primaries, sendonlys, chainID, chainType, &clientErrors) + c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, noNewHeadsThreshold, primaries, sendonlys, chainID, chainType, &clientErrors, 0) t.Cleanup(c.Close) return c, nil } @@ -165,7 +175,7 @@ func NewChainClientWithEmptyNode( lggr := logger.Test(t) var chainType chaintype.ChainType - c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, nil, nil, chainID, chainType, nil) + c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, nil, nil, chainID, chainType, nil, 0) t.Cleanup(c.Close) return c } @@ -191,7 +201,7 @@ func NewChainClientWithMockedRpc( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") primaries := []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient]{n} clientErrors := NewTestClientErrors() - c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaries, nil, chainID, chainType, &clientErrors) + c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaries, nil, chainID, chainType, &clientErrors, 0) t.Cleanup(c.Close) return c } diff --git a/core/chains/evm/client/mocks/rpc_client.go b/core/chains/evm/client/mocks/rpc_client.go index 1c39d221361..e6cb07af2f2 100644 --- a/core/chains/evm/client/mocks/rpc_client.go +++ b/core/chains/evm/client/mocks/rpc_client.go @@ -9,6 +9,8 @@ import ( common "github.com/ethereum/go-ethereum/common" + commonclient "github.com/smartcontractkit/chainlink/v2/common/client" + commontypes "github.com/smartcontractkit/chainlink/v2/common/types" context "context" @@ -442,6 +444,34 @@ func (_m *RPCClient) FilterEvents(ctx context.Context, query ethereum.FilterQuer return r0, r1 } +// GetInterceptedChainInfo provides a mock function with given fields: +func (_m *RPCClient) GetInterceptedChainInfo() (commonclient.ChainInfo, commonclient.ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetInterceptedChainInfo") + } + + var r0 commonclient.ChainInfo + var r1 commonclient.ChainInfo + if rf, ok := ret.Get(0).(func() (commonclient.ChainInfo, commonclient.ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() commonclient.ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(commonclient.ChainInfo) + } + + if rf, ok := ret.Get(1).(func() commonclient.ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(commonclient.ChainInfo) + } + + return r0, r1 +} + // HeaderByHash provides a mock function with given fields: ctx, h func (_m *RPCClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { ret := _m.Called(ctx, h) @@ -805,32 +835,29 @@ func (_m *RPCClient) SimulateTransaction(ctx context.Context, tx *coretypes.Tran return r0 } -// Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *RPCClient) Subscribe(ctx context.Context, channel chan<- *types.Head, args ...interface{}) (commontypes.Subscription, error) { - var _ca []interface{} - _ca = append(_ca, ctx, channel) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *RPCClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) if len(ret) == 0 { - panic("no return value specified for Subscribe") + panic("no return value specified for SubscribeFilterLogs") } - var r0 commontypes.Subscription + var r0 ethereum.Subscription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) (commontypes.Subscription, error)); ok { - return rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) commontypes.Subscription); ok { - r0 = rf(ctx, channel, args...) + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(commontypes.Subscription) + r0 = ret.Get(0).(ethereum.Subscription) } } - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Head, ...interface{}) error); ok { - r1 = rf(ctx, channel, args...) + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) error); ok { + r1 = rf(ctx, q, ch) } else { r1 = ret.Error(1) } @@ -838,29 +865,29 @@ func (_m *RPCClient) Subscribe(ctx context.Context, channel chan<- *types.Head, return r0, r1 } -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *RPCClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) +// SubscribeNewHead provides a mock function with given fields: ctx, channel +func (_m *RPCClient) SubscribeNewHead(ctx context.Context, channel chan<- *types.Head) (commontypes.Subscription, error) { + ret := _m.Called(ctx, channel) if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") + panic("no return value specified for SubscribeNewHead") } - var r0 ethereum.Subscription + var r0 commontypes.Subscription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head) (commontypes.Subscription, error)); ok { + return rf(ctx, channel) } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head) commontypes.Subscription); ok { + r0 = rf(ctx, channel) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) + r0 = ret.Get(0).(commontypes.Subscription) } } - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) error); ok { - r1 = rf(ctx, q, ch) + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Head) error); ok { + r1 = rf(ctx, channel) } else { r1 = ret.Error(1) } diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 5b64900a0cb..6499b18f795 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -101,6 +101,7 @@ type RPCClient interface { SuggestGasPrice(ctx context.Context) (p *big.Int, err error) SuggestGasTipCap(ctx context.Context) (t *big.Int, err error) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error) + GetInterceptedChainInfo() (latest, highestUserObservations commonclient.ChainInfo) } type rawclient struct { @@ -132,6 +133,11 @@ type rpcClient struct { // this rpcClient. Closing and replacing should be serialized through // stateMu since it can happen on state transitions as well as rpcClient Close. chStopInFlight chan struct{} + + // intercepted values seen by callers of the rpcClient excluding health check calls. Need to ensure MultiNode provides repeatable read guarantee + highestUserObservations commonclient.ChainInfo + // most recent chain info observed during current lifecycle (reseted on DisconnectAll) + latestChainInfo commonclient.ChainInfo } // NewRPCCLient returns a new *rpcClient as commonclient.RPC @@ -289,21 +295,32 @@ func (r *rpcClient) getRPCDomain() string { } // registerSub adds the sub to the rpcClient list -func (r *rpcClient) registerSub(sub ethereum.Subscription) { +func (r *rpcClient) registerSub(sub ethereum.Subscription, stopInFLightCh chan struct{}) error { r.stateMu.Lock() defer r.stateMu.Unlock() + // ensure that the `sub` belongs to current life cycle of the `rpcClient` and it should not be killed due to + // previous `DisconnectAll` call. + select { + case <-stopInFLightCh: + sub.Unsubscribe() + return fmt.Errorf("failed to register subscription - all in-flight requests were canceled") + default: + } + // TODO: BCI-3358 - delete sub when caller unsubscribes. r.subs = append(r.subs, sub) + return nil } -// disconnectAll disconnects all clients connected to the rpcClient -// WARNING: NOT THREAD-SAFE -// This must be called from within the r.stateMu lock +// DisconnectAll disconnects all clients connected to the rpcClient func (r *rpcClient) DisconnectAll() { + r.stateMu.Lock() + defer r.stateMu.Unlock() if r.ws.rpc != nil { r.ws.rpc.Close() } r.cancelInflightRequests() r.unsubscribeAll() + r.latestChainInfo = commonclient.ChainInfo{} } // unsubscribeAll unsubscribes all subscriptions @@ -388,24 +405,35 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err return err } -func (r *rpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { - ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) +func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtypes.Head) (_ commontypes.Subscription, err error) { + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) defer cancel() + args := []interface{}{"newHeads"} lggr := r.newRqLggr().With("args", args) lggr.Debug("RPC call: evmclient.Client#EthSubscribe") start := time.Now() - var sub commontypes.Subscription - sub, err := ws.rpc.EthSubscribe(ctx, channel, args...) - if err == nil { - sub = newSubscriptionErrorWrapper(sub, r.rpcClientErrorPrefix()) - r.registerSub(sub) + defer func() { + duration := time.Since(start) + r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + err = r.wrapWS(err) + }() + subForwarder := newSubForwarder(channel, func(head *evmtypes.Head) *evmtypes.Head { + head.EVMChainID = ubig.New(r.chainID) + r.onNewHead(ctx, chStopInFlight, head) + return head + }, r.wrapRPCClientError) + err = subForwarder.start(ws.rpc.EthSubscribe(ctx, subForwarder.srcCh, args...)) + if err != nil { + return } - duration := time.Since(start) - r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + err = r.registerSub(subForwarder, chStopInFlight) + if err != nil { + return + } - return sub, r.wrapWS(err) + return subForwarder, nil } // GethClient wrappers @@ -513,7 +541,7 @@ func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header return } -func (r *rpcClient) LatestFinalizedBlock(ctx context.Context) (head *evmtypes.Head, err error) { +func (r *rpcClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { return r.blockByNumber(ctx, rpc.FinalizedBlockNumber.String()) } @@ -523,7 +551,25 @@ func (r *rpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *e } func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evmtypes.Head, err error) { - err = r.CallContext(ctx, &head, "eth_getBlockByNumber", number, false) + ctx, cancel, chStopInFlight, ws, http := r.acquireQueryCtx(ctx) + defer cancel() + const method = "eth_getBlockByNumber" + args := []interface{}{number, false} + lggr := r.newRqLggr().With( + "method", method, + "args", args, + ) + + lggr.Debug("RPC call: evmclient.Client#CallContext") + start := time.Now() + if http != nil { + err = r.wrapHTTP(http.rpc.CallContext(ctx, &head, method, args...)) + } else { + err = r.wrapWS(ws.rpc.CallContext(ctx, &head, method, args...)) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContext") if err != nil { return nil, err } @@ -532,6 +578,14 @@ func (r *rpcClient) blockByNumber(ctx context.Context, number string) (head *evm return } head.EVMChainID = ubig.New(r.chainID) + + switch number { + case rpc.FinalizedBlockNumber.String(): + r.onNewFinalizedHead(ctx, chStopInFlight, head) + case rpc.LatestBlockNumber.String(): + r.onNewHead(ctx, chStopInFlight, head) + } + return } @@ -958,24 +1012,30 @@ func (r *rpcClient) ClientVersion(ctx context.Context) (version string, err erro return } -func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (sub ethereum.Subscription, err error) { - ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) +func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (_ ethereum.Subscription, err error) { + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) defer cancel() lggr := r.newRqLggr().With("q", q) lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs") start := time.Now() - sub, err = ws.geth.SubscribeFilterLogs(ctx, q, ch) - if err == nil { - sub = newSubscriptionErrorWrapper(sub, r.rpcClientErrorPrefix()) - r.registerSub(sub) + defer func() { + duration := time.Since(start) + r.logResult(lggr, err, duration, r.getRPCDomain(), "SubscribeFilterLogs") + err = r.wrapWS(err) + }() + sub := newSubForwarder(ch, nil, r.wrapRPCClientError) + err = sub.start(ws.geth.SubscribeFilterLogs(ctx, q, sub.srcCh)) + if err != nil { + return } - err = r.wrapWS(err) - duration := time.Since(start) - r.logResult(lggr, err, duration, r.getRPCDomain(), "SubscribeFilterLogs") + err = r.registerSub(sub, chStopInFlight) + if err != nil { + return + } - return + return sub, nil } func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { @@ -1060,17 +1120,23 @@ func (r *rpcClient) wrapHTTP(err error) error { // makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient) { + ctx, cancel, _, ws, http = r.acquireQueryCtx(parentCtx) + return +} + +func (r *rpcClient) acquireQueryCtx(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, + chStopInFlight chan struct{}, ws rawclient, http *rawclient) { // Need to wrap in mutex because state transition can cancel and replace the // context r.stateMu.RLock() - cancelCh := r.chStopInFlight + chStopInFlight = r.chStopInFlight ws = r.ws if r.http != nil { cp := *r.http http = &cp } r.stateMu.RUnlock() - ctx, cancel = makeQueryCtx(parentCtx, cancelCh) + ctx, cancel = makeQueryCtx(parentCtx, chStopInFlight) return } @@ -1134,6 +1200,49 @@ func Name(r *rpcClient) string { return r.name } +func (r *rpcClient) onNewHead(ctx context.Context, requestCh <-chan struct{}, head *evmtypes.Head) { + if head == nil { + return + } + + r.stateMu.Lock() + defer r.stateMu.Unlock() + if !commonclient.CtxIsHeathCheckRequest(ctx) { + r.highestUserObservations.BlockNumber = max(r.highestUserObservations.BlockNumber, head.Number) + r.highestUserObservations.TotalDifficulty = commonclient.MaxTotalDifficulty(r.highestUserObservations.TotalDifficulty, head.TotalDifficulty) + } + select { + case <-requestCh: // no need to update latestChainInfo, as rpcClient already started new life cycle + return + default: + r.latestChainInfo.BlockNumber = head.Number + r.latestChainInfo.TotalDifficulty = head.TotalDifficulty + } +} + +func (r *rpcClient) onNewFinalizedHead(ctx context.Context, requestCh <-chan struct{}, head *evmtypes.Head) { + if head == nil { + return + } + r.stateMu.Lock() + defer r.stateMu.Unlock() + if !commonclient.CtxIsHeathCheckRequest(ctx) { + r.highestUserObservations.FinalizedBlockNumber = max(r.highestUserObservations.FinalizedBlockNumber, head.Number) + } + select { + case <-requestCh: // no need to update latestChainInfo, as rpcClient already started new life cycle + return + default: + r.latestChainInfo.FinalizedBlockNumber = head.Number + } +} + +func (r *rpcClient) GetInterceptedChainInfo() (latest, highestUserObservations commonclient.ChainInfo) { + r.stateMu.RLock() + defer r.stateMu.RUnlock() + return r.latestChainInfo, r.highestUserObservations +} + func ToBlockNumArg(number *big.Int) string { if number == nil { return "latest" diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go new file mode 100644 index 00000000000..682c4352457 --- /dev/null +++ b/core/chains/evm/client/rpc_client_test.go @@ -0,0 +1,300 @@ +package client_test + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "net/url" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "go.uber.org/zap" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + + commonclient "github.com/smartcontractkit/chainlink/v2/common/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" +) + +func makeNewHeadWSMessage(head *evmtypes.Head) string { + asJSON, err := json.Marshal(head) + if err != nil { + panic(fmt.Errorf("failed to marshal head: %w", err)) + } + return fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_subscription","params":{"subscription":"0x00","result":%s}}`, string(asJSON)) +} + +func TestRPCClient_SubscribeNewHead(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(tests.Context(t), tests.WaitTimeout(t)) + defer cancel() + + chainId := big.NewInt(123456) + lggr := logger.Test(t) + + serverCallBack := func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + if method == "eth_unsubscribe" { + resp.Result = "true" + return + } + assert.Equal(t, "eth_subscribe", method) + if assert.True(t, params.IsArray()) && assert.Equal(t, "newHeads", params.Array()[0].String()) { + resp.Result = `"0x00"` + } + return + } + t.Run("Updates chain info on new blocks", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, serverCallBack) + wsURL := server.WSURL() + + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + defer rpc.Close() + require.NoError(t, rpc.Dial(ctx)) + // set to default values + latest, highestUserObservations := rpc.GetInterceptedChainInfo() + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(0), latest.FinalizedBlockNumber) + assert.Nil(t, latest.TotalDifficulty) + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(0), highestUserObservations.FinalizedBlockNumber) + assert.Nil(t, highestUserObservations.TotalDifficulty) + + ch := make(chan *evmtypes.Head) + sub, err := rpc.SubscribeNewHead(tests.Context(t), ch) + require.NoError(t, err) + defer sub.Unsubscribe() + go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256, TotalDifficulty: big.NewInt(1000)})) + // received 256 head + <-ch + go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 128, TotalDifficulty: big.NewInt(500)})) + // received 128 head + <-ch + + latest, highestUserObservations = rpc.GetInterceptedChainInfo() + assert.Equal(t, int64(128), latest.BlockNumber) + assert.Equal(t, int64(0), latest.FinalizedBlockNumber) + assert.Equal(t, big.NewInt(500), latest.TotalDifficulty) + + assertHighestUserObservations := func(highestUserObservations commonclient.ChainInfo) { + assert.Equal(t, int64(256), highestUserObservations.BlockNumber) + assert.Equal(t, int64(0), highestUserObservations.FinalizedBlockNumber) + assert.Equal(t, big.NewInt(1000), highestUserObservations.TotalDifficulty) + } + + assertHighestUserObservations(highestUserObservations) + + // DisconnectAll resets latest + rpc.DisconnectAll() + + latest, highestUserObservations = rpc.GetInterceptedChainInfo() + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(0), latest.FinalizedBlockNumber) + assert.Nil(t, latest.TotalDifficulty) + + assertHighestUserObservations(highestUserObservations) + }) + t.Run("App layer observations are not affected by new block if health check flag is present", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, serverCallBack) + wsURL := server.WSURL() + + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + defer rpc.Close() + require.NoError(t, rpc.Dial(ctx)) + ch := make(chan *evmtypes.Head) + sub, err := rpc.SubscribeNewHead(commonclient.CtxAddHealthCheckFlag(tests.Context(t)), ch) + require.NoError(t, err) + defer sub.Unsubscribe() + go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256, TotalDifficulty: big.NewInt(1000)})) + // received 256 head + <-ch + + latest, highestUserObservations := rpc.GetInterceptedChainInfo() + assert.Equal(t, int64(256), latest.BlockNumber) + assert.Equal(t, int64(0), latest.FinalizedBlockNumber) + assert.Equal(t, big.NewInt(1000), latest.TotalDifficulty) + + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(0), highestUserObservations.FinalizedBlockNumber) + assert.Equal(t, (*big.Int)(nil), highestUserObservations.TotalDifficulty) + }) + t.Run("Block's chain ID matched configured", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, serverCallBack) + wsURL := server.WSURL() + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + defer rpc.Close() + require.NoError(t, rpc.Dial(ctx)) + ch := make(chan *evmtypes.Head) + sub, err := rpc.SubscribeNewHead(tests.Context(t), ch) + require.NoError(t, err) + defer sub.Unsubscribe() + go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256})) + head := <-ch + require.Equal(t, chainId, head.ChainID()) + }) + t.Run("Failed SubscribeNewHead returns and logs proper error", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, func(reqMethod string, reqParams gjson.Result) (resp testutils.JSONRPCResponse) { + return resp + }) + wsURL := server.WSURL() + observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + require.NoError(t, rpc.Dial(ctx)) + server.Close() + _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) + require.ErrorContains(t, err, "RPCClient returned error (rpc)") + tests.AssertLogEventually(t, observed, "evmclient.Client#EthSubscribe RPC call failure") + }) + t.Run("Subscription error is properly wrapper", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, serverCallBack) + wsURL := server.WSURL() + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + defer rpc.Close() + require.NoError(t, rpc.Dial(ctx)) + sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) + require.NoError(t, err) + go server.MustWriteBinaryMessageSync(t, "invalid msg") + select { + case err = <-sub.Err(): + require.ErrorContains(t, err, "RPCClient returned error (rpc): invalid character") + case <-ctx.Done(): + t.Errorf("Expected subscription to return an error, but test timeout instead") + } + }) +} + +func TestRPCClient_SubscribeFilterLogs(t *testing.T) { + t.Parallel() + + chainId := big.NewInt(123456) + lggr := logger.Test(t) + ctx, cancel := context.WithTimeout(tests.Context(t), tests.WaitTimeout(t)) + defer cancel() + t.Run("Failed SubscribeFilterLogs logs and returns proper error", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, func(reqMethod string, reqParams gjson.Result) (resp testutils.JSONRPCResponse) { + return resp + }) + wsURL := server.WSURL() + observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + require.NoError(t, rpc.Dial(ctx)) + server.Close() + _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) + require.ErrorContains(t, err, "RPCClient returned error (rpc)") + tests.AssertLogEventually(t, observed, "evmclient.Client#SubscribeFilterLogs RPC call failure") + }) + t.Run("Subscription error is properly wrapper", func(t *testing.T) { + server := testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + assert.Equal(t, "eth_subscribe", method) + if assert.True(t, params.IsArray()) && assert.Equal(t, "logs", params.Array()[0].String()) { + resp.Result = `"0x00"` + resp.Notify = "{}" + } + return resp + }) + wsURL := server.WSURL() + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + defer rpc.Close() + require.NoError(t, rpc.Dial(ctx)) + sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) + require.NoError(t, err) + go server.MustWriteBinaryMessageSync(t, "invalid msg") + errorCtx, cancel := context.WithTimeout(ctx, tests.DefaultWaitTimeout) + defer cancel() + select { + case err = <-sub.Err(): + require.ErrorContains(t, err, "RPCClient returned error (rpc): invalid character") + case <-errorCtx.Done(): + t.Errorf("Expected subscription to return an error, but test timeout instead") + } + }) +} + +func TestRPCClient_LatestFinalizedBlock(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(tests.Context(t), tests.WaitTimeout(t)) + defer cancel() + + chainId := big.NewInt(123456) + lggr := logger.Test(t) + + type rpcServer struct { + Head *evmtypes.Head + URL *url.URL + } + createRPCServer := func() *rpcServer { + server := &rpcServer{} + server.URL = testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + assert.Equal(t, "eth_getBlockByNumber", method) + if assert.True(t, params.IsArray()) && assert.Equal(t, "finalized", params.Array()[0].String()) { + head := server.Head + jsonHead, err := json.Marshal(head) + if err != nil { + panic(fmt.Errorf("failed to marshal head: %w", err)) + } + resp.Result = string(jsonHead) + } + + return + }).WSURL() + + return server + } + + server := createRPCServer() + rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary) + require.NoError(t, rpc.Dial(ctx)) + defer rpc.Close() + server.Head = &evmtypes.Head{Number: 128} + // updates chain info + _, err := rpc.LatestFinalizedBlock(ctx) + require.NoError(t, err) + latest, highestUserObservations := rpc.GetInterceptedChainInfo() + + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(128), highestUserObservations.FinalizedBlockNumber) + + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(128), latest.FinalizedBlockNumber) + + // lower block number does not update highestUserObservations + server.Head = &evmtypes.Head{Number: 127} + _, err = rpc.LatestFinalizedBlock(ctx) + require.NoError(t, err) + latest, highestUserObservations = rpc.GetInterceptedChainInfo() + + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(128), highestUserObservations.FinalizedBlockNumber) + + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(127), latest.FinalizedBlockNumber) + + // health check flg prevents change in highestUserObservations + server.Head = &evmtypes.Head{Number: 256} + _, err = rpc.LatestFinalizedBlock(commonclient.CtxAddHealthCheckFlag(ctx)) + require.NoError(t, err) + latest, highestUserObservations = rpc.GetInterceptedChainInfo() + + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(128), highestUserObservations.FinalizedBlockNumber) + + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(256), latest.FinalizedBlockNumber) + + // DisconnectAll resets latest ChainInfo + rpc.DisconnectAll() + latest, highestUserObservations = rpc.GetInterceptedChainInfo() + assert.Equal(t, int64(0), highestUserObservations.BlockNumber) + assert.Equal(t, int64(128), highestUserObservations.FinalizedBlockNumber) + + assert.Equal(t, int64(0), latest.BlockNumber) + assert.Equal(t, int64(0), latest.FinalizedBlockNumber) +} diff --git a/core/chains/evm/client/sub_error_wrapper.go b/core/chains/evm/client/sub_error_wrapper.go deleted file mode 100644 index 689991ce70f..00000000000 --- a/core/chains/evm/client/sub_error_wrapper.go +++ /dev/null @@ -1,77 +0,0 @@ -package client - -import ( - "fmt" - - commontypes "github.com/smartcontractkit/chainlink/v2/common/types" -) - -// subErrorWrapper - adds specified prefix to a subscription error -type subErrorWrapper struct { - sub commontypes.Subscription - errorPrefix string - - done chan struct{} - unSub chan struct{} - errorCh chan error -} - -func newSubscriptionErrorWrapper(sub commontypes.Subscription, errorPrefix string) *subErrorWrapper { - s := &subErrorWrapper{ - sub: sub, - errorPrefix: errorPrefix, - done: make(chan struct{}), - unSub: make(chan struct{}), - errorCh: make(chan error), - } - - go func() { - for { - select { - // sub.Err channel is closed by sub.Unsubscribe - case err, ok := <-sub.Err(): - if !ok { - // might only happen if someone terminated wrapped subscription - // in any case - do our best to release resources - // we can't call Unsubscribe on root sub as this might cause panic - close(s.errorCh) - close(s.done) - return - } - - select { - case s.errorCh <- fmt.Errorf("%s: %w", s.errorPrefix, err): - case <-s.unSub: - s.close() - return - } - case <-s.unSub: - s.close() - return - } - } - }() - - return s -} - -func (s *subErrorWrapper) close() { - s.sub.Unsubscribe() - close(s.errorCh) - close(s.done) -} - -func (s *subErrorWrapper) Unsubscribe() { - select { - // already unsubscribed - case <-s.done: - // signal unsubscribe - case s.unSub <- struct{}{}: - // wait for unsubscribe to complete - <-s.done - } -} - -func (s *subErrorWrapper) Err() <-chan error { - return s.errorCh -} diff --git a/core/chains/evm/client/sub_error_wrapper_test.go b/core/chains/evm/client/sub_error_wrapper_test.go deleted file mode 100644 index 5dd81069572..00000000000 --- a/core/chains/evm/client/sub_error_wrapper_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package client - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" -) - -func TestSubscriptionErrorWrapper(t *testing.T) { - t.Parallel() - t.Run("Unsubscribe wrapper releases resources", func(t *testing.T) { - t.Parallel() - - mockedSub := NewMockSubscription() - const prefix = "RPC returned error" - wrapper := newSubscriptionErrorWrapper(mockedSub, prefix) - wrapper.Unsubscribe() - - // mock's resources were relased - assert.True(t, mockedSub.unsubscribed) - _, ok := <-mockedSub.Err() - assert.False(t, ok) - // wrapper's channels are closed - _, ok = <-wrapper.Err() - assert.False(t, ok) - // subsequence unsubscribe does not causes panic - wrapper.Unsubscribe() - }) - t.Run("Unsubscribe interrupts error delivery", func(t *testing.T) { - t.Parallel() - sub := NewMockSubscription() - const prefix = "RPC returned error" - wrapper := newSubscriptionErrorWrapper(sub, prefix) - sub.Errors <- fmt.Errorf("error") - - wrapper.Unsubscribe() - _, ok := <-wrapper.Err() - assert.False(t, ok) - }) - t.Run("Successfully wraps error", func(t *testing.T) { - t.Parallel() - sub := NewMockSubscription() - const prefix = "RPC returned error" - wrapper := newSubscriptionErrorWrapper(sub, prefix) - sub.Errors <- fmt.Errorf("root error") - - err, ok := <-wrapper.Err() - assert.True(t, ok) - assert.Equal(t, "RPC returned error: root error", err.Error()) - - wrapper.Unsubscribe() - _, ok = <-wrapper.Err() - assert.False(t, ok) - }) - t.Run("Unsubscribe on root does not cause panic", func(t *testing.T) { - t.Parallel() - mockedSub := NewMockSubscription() - wrapper := newSubscriptionErrorWrapper(mockedSub, "") - - mockedSub.Unsubscribe() - // mock's resources were released - assert.True(t, mockedSub.unsubscribed) - _, ok := <-mockedSub.Err() - assert.False(t, ok) - // wrapper's channels are eventually closed - tests.AssertEventually(t, func() bool { - _, ok = <-wrapper.Err() - return !ok - }) - }) -} diff --git a/core/chains/evm/client/chain_id_sub.go b/core/chains/evm/client/sub_forwarder.go similarity index 51% rename from core/chains/evm/client/chain_id_sub.go rename to core/chains/evm/client/sub_forwarder.go index c3162b300c7..93e9b106b4a 100644 --- a/core/chains/evm/client/chain_id_sub.go +++ b/core/chains/evm/client/sub_forwarder.go @@ -1,42 +1,40 @@ package client import ( - "math/big" - "github.com/ethereum/go-ethereum" - - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" - ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) -var _ ethereum.Subscription = &chainIDSubForwarder{} +var _ ethereum.Subscription = &subForwarder[any]{} -// chainIDSubForwarder wraps a head subscription in order to intercept and augment each head with chainID before forwarding. -type chainIDSubForwarder struct { - chainID *big.Int - destCh chan<- *evmtypes.Head +// subForwarder wraps a subscription in order to intercept and augment each result before forwarding. +type subForwarder[T any] struct { + destCh chan<- T - srcCh chan *evmtypes.Head + srcCh chan T srcSub ethereum.Subscription + interceptResult func(T) T + interceptError func(error) error + done chan struct{} err chan error unSub chan struct{} } -func newChainIDSubForwarder(chainID *big.Int, ch chan<- *evmtypes.Head) *chainIDSubForwarder { - return &chainIDSubForwarder{ - chainID: chainID, - destCh: ch, - srcCh: make(chan *evmtypes.Head), - done: make(chan struct{}), - err: make(chan error), - unSub: make(chan struct{}, 1), +func newSubForwarder[T any](destCh chan<- T, interceptResult func(T) T, interceptError func(error) error) *subForwarder[T] { + return &subForwarder[T]{ + interceptResult: interceptResult, + interceptError: interceptError, + destCh: destCh, + srcCh: make(chan T), + done: make(chan struct{}), + err: make(chan error), + unSub: make(chan struct{}, 1), } } // start spawns the forwarding loop for sub. -func (c *chainIDSubForwarder) start(sub ethereum.Subscription, err error) error { +func (c *subForwarder[T]) start(sub ethereum.Subscription, err error) error { if err != nil { close(c.srcCh) return err @@ -48,7 +46,7 @@ func (c *chainIDSubForwarder) start(sub ethereum.Subscription, err error) error // forwardLoop receives from src, adds the chainID, and then sends to dest. // It also handles Unsubscribing, which may interrupt either forwarding operation. -func (c *chainIDSubForwarder) forwardLoop() { +func (c *subForwarder[T]) forwardLoop() { // the error channel must be closed when unsubscribing defer close(c.err) defer close(c.done) @@ -56,6 +54,9 @@ func (c *chainIDSubForwarder) forwardLoop() { for { select { case err := <-c.srcSub.Err(): + if c.interceptError != nil { + err = c.interceptError(err) + } select { case c.err <- err: case <-c.unSub: @@ -64,7 +65,9 @@ func (c *chainIDSubForwarder) forwardLoop() { return case h := <-c.srcCh: - h.EVMChainID = ubig.New(c.chainID) + if c.interceptResult != nil { + h = c.interceptResult(h) + } select { case c.destCh <- h: case <-c.unSub: @@ -79,7 +82,7 @@ func (c *chainIDSubForwarder) forwardLoop() { } } -func (c *chainIDSubForwarder) Unsubscribe() { +func (c *subForwarder[T]) Unsubscribe() { // tell forwardLoop to unsubscribe select { case c.unSub <- struct{}{}: @@ -90,6 +93,6 @@ func (c *chainIDSubForwarder) Unsubscribe() { <-c.done } -func (c *chainIDSubForwarder) Err() <-chan error { +func (c *subForwarder[T]) Err() <-chan error { return c.err } diff --git a/core/chains/evm/client/sub_forwarder_test.go b/core/chains/evm/client/sub_forwarder_test.go new file mode 100644 index 00000000000..1bc0122603b --- /dev/null +++ b/core/chains/evm/client/sub_forwarder_test.go @@ -0,0 +1,190 @@ +package client + +import ( + "errors" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + commontypes "github.com/smartcontractkit/chainlink/v2/common/types" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" +) + +func TestChainIDSubForwarder(t *testing.T) { + t.Parallel() + + newChainIDSubForwarder := func(chainID *big.Int, ch chan<- *evmtypes.Head) *subForwarder[*evmtypes.Head] { + return newSubForwarder(ch, func(head *evmtypes.Head) *evmtypes.Head { + head.EVMChainID = ubig.New(chainID) + return head + }, nil) + } + + chainID := big.NewInt(123) + + t.Run("unsubscribe forwarder", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := NewMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("unsubscribe forwarder with error", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := NewMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + sub.Errors <- errors.New("boo") + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("unsubscribe forwarder with message", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := NewMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + forwarder.srcCh <- &evmtypes.Head{} + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("non nil error parameter", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := NewMockSubscription() + errIn := errors.New("foo") + errOut := forwarder.start(sub, errIn) + assert.Equal(t, errIn, errOut) + }) + + t.Run("forwarding", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := NewMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + + head := &evmtypes.Head{ + ID: 1, + } + forwarder.srcCh <- head + receivedHead := <-ch + assert.Equal(t, head, receivedHead) + assert.Equal(t, ubig.New(chainID), receivedHead.EVMChainID) + + expectedErr := errors.New("error") + sub.Errors <- expectedErr + receivedErr := <-forwarder.Err() + assert.Equal(t, expectedErr, receivedErr) + }) +} + +func TestSubscriptionErrorWrapper(t *testing.T) { + t.Parallel() + newSubscriptionErrorWrapper := func(t *testing.T, sub commontypes.Subscription, errorPrefix string) ethereum.Subscription { + ch := make(chan *evmtypes.Head) + result := newSubForwarder(ch, nil, func(err error) error { + return fmt.Errorf("%s: %w", errorPrefix, err) + }) + require.NoError(t, result.start(sub, nil)) + return result + } + t.Run("Unsubscribe wrapper releases resources", func(t *testing.T) { + t.Parallel() + + mockedSub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(t, mockedSub, prefix) + wrapper.Unsubscribe() + + // mock's resources were released + assert.True(t, mockedSub.unsubscribed) + _, ok := <-mockedSub.Err() + assert.False(t, ok) + // wrapper's channels are closed + _, ok = <-wrapper.Err() + assert.False(t, ok) + // subsequence unsubscribe does not causes panic + wrapper.Unsubscribe() + }) + t.Run("Unsubscribe interrupts error delivery", func(t *testing.T) { + t.Parallel() + sub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(t, sub, prefix) + sub.Errors <- fmt.Errorf("error") + + wrapper.Unsubscribe() + _, ok := <-wrapper.Err() + assert.False(t, ok) + }) + t.Run("Successfully wraps error", func(t *testing.T) { + t.Parallel() + sub := NewMockSubscription() + const prefix = "RPC returned error" + wrapper := newSubscriptionErrorWrapper(t, sub, prefix) + sub.Errors <- fmt.Errorf("root error") + + err, ok := <-wrapper.Err() + assert.True(t, ok) + assert.Equal(t, "RPC returned error: root error", err.Error()) + + wrapper.Unsubscribe() + _, ok = <-wrapper.Err() + assert.False(t, ok) + }) + t.Run("Unsubscribe on root does not cause panic", func(t *testing.T) { + t.Parallel() + mockedSub := NewMockSubscription() + wrapper := newSubscriptionErrorWrapper(t, mockedSub, "") + + mockedSub.Unsubscribe() + // mock's resources were released + assert.True(t, mockedSub.unsubscribed) + _, ok := <-mockedSub.Err() + assert.False(t, ok) + // wrapper's channels are eventually closed + tests.AssertEventually(t, func() bool { + _, ok = <-wrapper.Err() + return !ok + }) + }) +} diff --git a/core/chains/evm/config/chain_scoped.go b/core/chains/evm/config/chain_scoped.go index 8064e2de207..db598e3e82b 100644 --- a/core/chains/evm/config/chain_scoped.go +++ b/core/chains/evm/config/chain_scoped.go @@ -179,3 +179,7 @@ func (e *EVMConfig) OperatorFactoryAddress() string { func (e *EVMConfig) LogPrunePageSize() uint32 { return *e.C.LogPrunePageSize } + +func (e *EVMConfig) FinalizedBlockOffset() uint32 { + return *e.C.FinalizedBlockOffset +} diff --git a/core/chains/evm/config/chain_scoped_node_pool.go b/core/chains/evm/config/chain_scoped_node_pool.go index 50269366829..a4974366486 100644 --- a/core/chains/evm/config/chain_scoped_node_pool.go +++ b/core/chains/evm/config/chain_scoped_node_pool.go @@ -38,6 +38,12 @@ func (n *NodePoolConfig) FinalizedBlockPollInterval() time.Duration { return n.C.FinalizedBlockPollInterval.Duration() } -func (n *NodePoolConfig) Errors() ClientErrors { - return &clientErrorsConfig{c: n.C.Errors} +func (n *NodePoolConfig) Errors() ClientErrors { return &clientErrorsConfig{c: n.C.Errors} } + +func (n *NodePoolConfig) EnforceRepeatableRead() bool { + return *n.C.EnforceRepeatableRead +} + +func (n *NodePoolConfig) DeathDeclarationDelay() time.Duration { + return n.C.DeathDeclarationDelay.Duration() } diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go index b44c112e204..ffb2a496baf 100644 --- a/core/chains/evm/config/config.go +++ b/core/chains/evm/config/config.go @@ -45,6 +45,7 @@ type EVM interface { OperatorFactoryAddress() string RPCDefaultBatchSize() uint32 NodeNoNewHeadsThreshold() time.Duration + FinalizedBlockOffset() uint32 IsEnabled() bool TOMLString() (string, error) @@ -170,6 +171,8 @@ type NodePool interface { NodeIsSyncingEnabled() bool FinalizedBlockPollInterval() time.Duration Errors() ClientErrors + EnforceRepeatableRead() bool + DeathDeclarationDelay() time.Duration } // TODO BCF-2509 does the chainscopedconfig really need the entire app config? diff --git a/core/chains/evm/config/config_test.go b/core/chains/evm/config/config_test.go index 26ac2db0852..ba362bda981 100644 --- a/core/chains/evm/config/config_test.go +++ b/core/chains/evm/config/config_test.go @@ -326,6 +326,8 @@ func TestNodePoolConfig(t *testing.T) { require.Equal(t, time.Duration(10000000000), cfg.EVM().NodePool().PollInterval()) require.Equal(t, uint32(5), cfg.EVM().NodePool().PollFailureThreshold()) require.Equal(t, false, cfg.EVM().NodePool().NodeIsSyncingEnabled()) + require.Equal(t, false, cfg.EVM().NodePool().EnforceRepeatableRead()) + require.Equal(t, time.Duration(10000000000), cfg.EVM().NodePool().DeathDeclarationDelay()) } func TestClientErrorsConfig(t *testing.T) { diff --git a/core/chains/evm/config/toml/config.go b/core/chains/evm/config/toml/config.go index d35f9bd0a3b..3e35bb4b55c 100644 --- a/core/chains/evm/config/toml/config.go +++ b/core/chains/evm/config/toml/config.go @@ -358,6 +358,7 @@ type Chain struct { OperatorFactoryAddress *types.EIP55Address RPCDefaultBatchSize *uint32 RPCBlockQueryDelay *uint16 + FinalizedBlockOffset *uint32 Transactions Transactions `toml:",omitempty"` BalanceMonitor BalanceMonitor `toml:",omitempty"` @@ -389,6 +390,11 @@ func (c *Chain) ValidateConfig() (err error) { Msg: "must be greater than or equal to 1"}) } + if *c.FinalizedBlockOffset > *c.HeadTracker.HistoryDepth { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "HeadTracker.HistoryDepth", Value: *c.HeadTracker.HistoryDepth, + Msg: "must be greater than or equal to FinalizedBlockOffset"}) + } + // AutoPurge configs depend on ChainType so handling validation on per chain basis if c.Transactions.AutoPurge.Enabled != nil && *c.Transactions.AutoPurge.Enabled { chainType := c.ChainType.ChainType() @@ -842,6 +848,8 @@ type NodePool struct { NodeIsSyncingEnabled *bool FinalizedBlockPollInterval *commonconfig.Duration Errors ClientErrors `toml:",omitempty"` + EnforceRepeatableRead *bool + DeathDeclarationDelay *commonconfig.Duration } func (p *NodePool) setFrom(f *NodePool) { @@ -866,6 +874,14 @@ func (p *NodePool) setFrom(f *NodePool) { if v := f.FinalizedBlockPollInterval; v != nil { p.FinalizedBlockPollInterval = v } + + if v := f.EnforceRepeatableRead; v != nil { + p.EnforceRepeatableRead = v + } + + if v := f.DeathDeclarationDelay; v != nil { + p.DeathDeclarationDelay = v + } p.Errors.setFrom(&f.Errors) } diff --git a/core/chains/evm/config/toml/defaults.go b/core/chains/evm/config/toml/defaults.go index e006babfb68..38eef40bf76 100644 --- a/core/chains/evm/config/toml/defaults.go +++ b/core/chains/evm/config/toml/defaults.go @@ -161,6 +161,9 @@ func (c *Chain) SetFrom(f *Chain) { if v := f.RPCBlockQueryDelay; v != nil { c.RPCBlockQueryDelay = v } + if v := f.FinalizedBlockOffset; v != nil { + c.FinalizedBlockOffset = v + } c.Transactions.setFrom(&f.Transactions) c.BalanceMonitor.setFrom(&f.BalanceMonitor) diff --git a/core/chains/evm/config/toml/defaults/fallback.toml b/core/chains/evm/config/toml/defaults/fallback.toml index 5a16aca091c..a11e646e08b 100644 --- a/core/chains/evm/config/toml/defaults/fallback.toml +++ b/core/chains/evm/config/toml/defaults/fallback.toml @@ -14,6 +14,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -68,6 +69,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index be8513f5925..c3fae5292a2 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -23,6 +23,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -75,7 +76,8 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(evmClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, ht, lpOpts) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM()) fwdMgr.ORM = forwarders.NewORM(db) @@ -136,7 +138,8 @@ func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(evmClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, ht, lpOpts) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM()) fwdMgr.ORM = forwarders.NewORM(db) @@ -201,7 +204,8 @@ func TestFwdMgr_InvalidForwarderForOCR2FeedsStates(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(evmClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), evmClient, lggr, ht, lpOpts) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM()) fwdMgr.ORM = forwarders.NewORM(db) diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go index 2da41de8774..7ac61ab34b0 100644 --- a/core/chains/evm/headtracker/head_broadcaster_test.go +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -60,8 +60,7 @@ func TestHeadBroadcaster_Subscribe(t *testing.T) { chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) }). Return(sub, nil) - // 2 for initial and 2 for backfill - ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(testutils.Head(1), nil).Times(4) + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(testutils.Head(1), nil) sub.On("Unsubscribe").Return() sub.On("Err").Return(nil) diff --git a/core/chains/evm/headtracker/head_listener_test.go b/core/chains/evm/headtracker/head_listener_test.go index 2e1a9c81d5e..29b090bbffe 100644 --- a/core/chains/evm/headtracker/head_listener_test.go +++ b/core/chains/evm/headtracker/head_listener_test.go @@ -70,7 +70,7 @@ func Test_HeadListener_HappyPath(t *testing.T) { done := func() { doneAwaiter.ItHappened() } - go hl.ListenForNewHeads(handler, done) + go hl.ListenForNewHeads(func() {}, handler, done) subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t)) require.Eventually(t, hl.Connected, tests.WaitTimeout(t), tests.TestInterval) @@ -129,7 +129,7 @@ func Test_HeadListener_NotReceivingHeads(t *testing.T) { done := func() { doneAwaiter.ItHappened() } - go hl.ListenForNewHeads(handler, done) + go hl.ListenForNewHeads(func() {}, handler, done) subscribeAwaiter.AwaitOrFail(t, tests.WaitTimeout(t)) @@ -190,7 +190,7 @@ func Test_HeadListener_SubscriptionErr(t *testing.T) { subscribeAwaiter.ItHappened() }) go func() { - hl.ListenForNewHeads(hnh, done) + hl.ListenForNewHeads(func() {}, hnh, done) }() // Put a head on the channel to ensure we test all code paths diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go index 9be9f838d08..43e79235e90 100644 --- a/core/chains/evm/headtracker/head_saver_test.go +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -47,6 +47,7 @@ type config struct { finalityDepth uint32 blockEmissionIdleWarningThreshold time.Duration finalityTagEnabled bool + finalizedBlockOffset uint32 } func (c *config) FinalityDepth() uint32 { return c.finalityDepth } @@ -58,6 +59,10 @@ func (c *config) FinalityTagEnabled() bool { return c.finalityTagEnabled } +func (c *config) FinalizedBlockOffset() uint32 { + return c.finalizedBlockOffset +} + type saverOpts struct { headTrackerConfig *headTrackerConfig } diff --git a/core/chains/evm/headtracker/head_tracker.go b/core/chains/evm/headtracker/head_tracker.go index 357c4dae99a..d6c2cdc64e7 100644 --- a/core/chains/evm/headtracker/head_tracker.go +++ b/core/chains/evm/headtracker/head_tracker.go @@ -49,7 +49,10 @@ func (*nullTracker) Ready() error { return nil } func (*nullTracker) HealthReport() map[string]error { return map[string]error{} } func (*nullTracker) Name() string { return "" } func (*nullTracker) SetLogLevel(zapcore.Level) {} -func (*nullTracker) Backfill(ctx context.Context, headWithChain, latestFinalized *evmtypes.Head) (err error) { +func (*nullTracker) Backfill(ctx context.Context, headWithChain *evmtypes.Head) (err error) { return nil } func (*nullTracker) LatestChain() *evmtypes.Head { return nil } +func (*nullTracker) LatestAndFinalizedBlock(ctx context.Context) (latest, finalized *evmtypes.Head, err error) { + return nil, nil, nil +} diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 4da8d27c552..81ba3ea85f0 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -3,6 +3,7 @@ package headtracker_test import ( "context" "errors" + "fmt" "math/big" "slices" "sync" @@ -20,17 +21,19 @@ import ( "go.uber.org/zap/zaptest/observer" "golang.org/x/exp/maps" + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox/mailboxtest" + "github.com/jmoiron/sqlx" commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" - "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" - "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox/mailboxtest" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" htmocks "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks" commontypes "github.com/smartcontractkit/chainlink/v2/common/headtracker/types" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" @@ -60,6 +63,12 @@ func TestHeadTracker_New(t *testing.T) { ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(testutils.Head(0), nil) // finalized ethClient.On("HeadByNumber", mock.Anything, big.NewInt(0)).Return(testutils.Head(0), nil) + mockEth := &testutils.MockEth{ + EthClient: ethClient, + } + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Maybe(). + Return(mockEth.NewSub(t), nil) orm := headtracker.NewORM(*testutils.FixtureChainID, db) assert.Nil(t, orm.IdempotentInsertHead(tests.Context(t), testutils.Head(1))) @@ -71,9 +80,10 @@ func TestHeadTracker_New(t *testing.T) { ht := createHeadTracker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm) ht.Start(t) - latest := ht.headSaver.LatestChain() - require.NotNil(t, latest) - assert.Equal(t, last.Number, latest.Number) + tests.AssertEventually(t, func() bool { + latest := ht.headSaver.LatestChain() + return latest != nil && last.Number == latest.Number + }) } func TestHeadTracker_MarkFinalized_MarksAndTrimsTable(t *testing.T) { @@ -126,7 +136,8 @@ func TestHeadTracker_Get(t *testing.T) { {"nil no initial", nil, nil, big.NewInt(0)}, } - for _, test := range cases { + for i := range cases { + test := cases[i] t.Run(test.name, func(t *testing.T) { db := pgtest.NewSqlxDB(t) config := testutils.NewTestChainScopedConfig(t, nil) @@ -146,9 +157,9 @@ func TestHeadTracker_Get(t *testing.T) { }, func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) - ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(testutils.Head(0), nil) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(testutils.Head(0), nil).Maybe() - fnCall := ethClient.On("HeadByNumber", mock.Anything, mock.Anything) + fnCall := ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Maybe() fnCall.RunFn = func(args mock.Arguments) { num := args.Get(1).(*big.Int) fnCall.ReturnArguments = mock.Arguments{testutils.Head(num.Int64()), nil} @@ -166,7 +177,10 @@ func TestHeadTracker_Get(t *testing.T) { assert.NoError(t, err) } - assert.Equal(t, test.want, ht.headSaver.LatestChain().ToInt()) + tests.AssertEventually(t, func() bool { + latest := ht.headSaver.LatestChain().ToInt() + return latest != nil && test.want.Cmp(latest) == 0 + }) }) } } @@ -226,19 +240,12 @@ func TestHeadTracker_Start(t *testing.T) { } }) orm := headtracker.NewORM(*testutils.FixtureChainID, db) - ethClient := testutils.NewEthClientMockWithDefaultChain(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + mockEth := &testutils.MockEth{EthClient: ethClient} + sub := mockEth.NewSub(t) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Maybe() return createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) } - - t.Run("Fail start if context was canceled", func(t *testing.T) { - ctx, cancel := context.WithCancel(tests.Context(t)) - ht := newHeadTracker(t, opts{}) - ht.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Run(func(args mock.Arguments) { - cancel() - }).Return(testutils.Head(0), context.Canceled) - err := ht.headTracker.Start(ctx) - require.ErrorIs(t, err, context.Canceled) - }) t.Run("Starts even if failed to get initialHead", func(t *testing.T) { ht := newHeadTracker(t, opts{}) ht.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(testutils.Head(0), errors.New("failed to get init head")) @@ -268,20 +275,6 @@ func TestHeadTracker_Start(t *testing.T) { ht.Start(t) tests.AssertLogEventually(t, ht.observer, "Error handling initial head") }) - t.Run("Logs error if finality gap is too big", func(t *testing.T) { - ht := newHeadTracker(t, opts{FinalityTagEnable: ptr(true), FinalityTagBypass: ptr(false), MaxAllowedFinalityDepth: ptr(uint32(10))}) - head := testutils.Head(1000) - ht.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(head, nil).Once() - ht.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(testutils.Head(989), nil).Once() - ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("failed to connect")).Maybe() - ht.Start(t) - tests.AssertEventually(t, func() bool { - // must exactly match the error passed to logger - field := zap.String("err", "failed to calculate latest finalized head: gap between latest finalized block (989) and current head (1000) is too large (> 10)") - filtered := ht.observer.FilterMessage("Error handling initial head").FilterField(field) - return filtered.Len() > 0 - }) - }) t.Run("Happy path (finality tag)", func(t *testing.T) { head := testutils.Head(1000) ht := newHeadTracker(t, opts{FinalityTagEnable: ptr(true), FinalityTagBypass: ptr(false)}) @@ -877,10 +870,23 @@ func TestHeadTracker_Backfill(t *testing.T) { ctx := tests.Context(t) type opts struct { - Heads []evmtypes.Head + Heads []evmtypes.Head + FinalityTagEnabled bool + FinalizedBlockOffset uint32 + FinalityDepth uint32 + MaxAllowedFinalityDepth uint32 } newHeadTrackerUniverse := func(t *testing.T, opts opts) *headTrackerUniverse { - evmcfg := testutils.NewTestChainScopedConfig(t, nil) + evmcfg := testutils.NewTestChainScopedConfig(t, func(c *toml.EVMConfig) { + c.FinalityTagEnabled = ptr(opts.FinalityTagEnabled) + c.FinalizedBlockOffset = ptr(opts.FinalizedBlockOffset) + c.FinalityDepth = ptr(opts.FinalityDepth) + c.HeadTracker.FinalityTagBypass = ptr(false) + if opts.MaxAllowedFinalityDepth > 0 { + c.HeadTracker.MaxAllowedFinalityDepth = ptr(opts.MaxAllowedFinalityDepth) + } + }) + db := pgtest.NewSqlxDB(t) orm := headtracker.NewORM(*testutils.FixtureChainID, db) for i := range opts.Heads { @@ -894,26 +900,44 @@ func TestHeadTracker_Backfill(t *testing.T) { return ht } + t.Run("returns error if failed to get latestFinalized block", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + const expectedError = "failed to fetch latest finalized block" + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, errors.New(expectedError)).Once() + + err := htu.headTracker.Backfill(ctx, &h12) + require.ErrorContains(t, err, expectedError) + }) t.Run("returns error if latestFinalized is not valid", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{}) + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, nil).Once() + + err := htu.headTracker.Backfill(ctx, &h12) + require.EqualError(t, err, "failed to calculate finalized block: failed to get valid latest finalized block") + }) + t.Run("Returns error if finality gap is too big", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true, MaxAllowedFinalityDepth: 2}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h9, nil).Once() - err := htu.headTracker.Backfill(ctx, &h12, nil) - require.EqualError(t, err, "can not perform backfill without a valid latestFinalized head") + err := htu.headTracker.Backfill(ctx, &h12) + require.EqualError(t, err, "gap between latest finalized block (9) and current head (12) is too large (> 2)") }) t.Run("Returns error if finalized head is ahead of canonical", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{}) + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h14Orphaned, nil).Once() - err := htu.headTracker.Backfill(ctx, &h12, &h14Orphaned) + err := htu.headTracker.Backfill(ctx, &h12) require.EqualError(t, err, "invariant violation: expected head of canonical chain to be ahead of the latestFinalized") }) t.Run("Returns error if finalizedHead is not present in the canonical chain", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h14Orphaned, nil).Once() - err := htu.headTracker.Backfill(ctx, &h15, &h14Orphaned) + err := htu.headTracker.Backfill(ctx, &h15) require.EqualError(t, err, "expected finalized block to be present in canonical chain") }) t.Run("Marks all blocks in chain that are older than finalized", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) assertFinalized := func(expectedFinalized bool, msg string, heads ...evmtypes.Head) { for _, h := range heads { @@ -922,18 +946,20 @@ func TestHeadTracker_Backfill(t *testing.T) { } } - err := htu.headTracker.Backfill(ctx, &h15, &h14) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h14, nil).Once() + err := htu.headTracker.Backfill(ctx, &h15) require.NoError(t, err) assertFinalized(true, "expected heads to be marked as finalized after backfill", h14, h13, h12, h11) assertFinalized(false, "expected heads to remain unfinalized", h15, head10) }) t.Run("fetches a missing head", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h9, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, head10.Hash). Return(&head10, nil) - err := htu.headTracker.Backfill(ctx, &h12, &h9) + err := htu.headTracker.Backfill(ctx, &h12) require.NoError(t, err) h := htu.headSaver.Chain(h12.Hash) @@ -950,16 +976,16 @@ func TestHeadTracker_Backfill(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(10), writtenHead.Number) }) - t.Run("fetches only heads that are missing", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&head8, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, head10.Hash). Return(&head10, nil) htu.ethClient.On("HeadByHash", mock.Anything, head8.Hash). Return(&head8, nil) - err := htu.headTracker.Backfill(ctx, &h15, &head8) + err := htu.headTracker.Backfill(ctx, &h15) require.NoError(t, err) h := htu.headSaver.Chain(h15.Hash) @@ -971,7 +997,8 @@ func TestHeadTracker_Backfill(t *testing.T) { }) t.Run("abandons backfill and returns error if the eth node returns not found", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&head8, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, head10.Hash). Return(&head10, nil). Once() @@ -979,9 +1006,9 @@ func TestHeadTracker_Backfill(t *testing.T) { Return(nil, ethereum.NotFound). Once() - err := htu.headTracker.Backfill(ctx, &h12, &head8) + err := htu.headTracker.Backfill(ctx, &h12) require.Error(t, err) - require.EqualError(t, err, "fetchAndSaveHead failed: not found") + require.ErrorContains(t, err, "fetchAndSaveHead failed: not found") h := htu.headSaver.Chain(h12.Hash) @@ -991,7 +1018,8 @@ func TestHeadTracker_Backfill(t *testing.T) { }) t.Run("abandons backfill and returns error if the context time budget is exceeded", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: heads}) + htu := newHeadTrackerUniverse(t, opts{Heads: heads, FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&head8, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, head10.Hash). Return(&head10, nil) lctx, cancel := context.WithCancel(ctx) @@ -1000,9 +1028,9 @@ func TestHeadTracker_Backfill(t *testing.T) { cancel() }) - err := htu.headTracker.Backfill(lctx, &h12, &head8) + err := htu.headTracker.Backfill(lctx, &h12) require.Error(t, err) - require.EqualError(t, err, "fetchAndSaveHead failed: context canceled") + require.ErrorContains(t, err, "fetchAndSaveHead failed: context canceled") h := htu.headSaver.Chain(h12.Hash) @@ -1010,17 +1038,17 @@ func TestHeadTracker_Backfill(t *testing.T) { assert.Equal(t, 4, int(h.ChainLength())) assert.Equal(t, int64(9), h.EarliestInChain().BlockNumber()) }) - t.Run("abandons backfill and returns error when fetching a block by hash fails, indicating a reorg", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{}) + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h11, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, h13.Hash).Return(&h13, nil).Once() htu.ethClient.On("HeadByHash", mock.Anything, h12.Hash).Return(nil, errors.New("not found")).Once() - err := htu.headTracker.Backfill(ctx, &h15, &h11) + err := htu.headTracker.Backfill(ctx, &h15) require.Error(t, err) - require.EqualError(t, err, "fetchAndSaveHead failed: not found") + require.ErrorContains(t, err, "fetchAndSaveHead failed: not found") h := htu.headSaver.Chain(h14.Hash) @@ -1029,9 +1057,10 @@ func TestHeadTracker_Backfill(t *testing.T) { assert.Equal(t, int64(13), h.EarliestInChain().BlockNumber()) }) t.Run("marks head as finalized, if latestHead = finalizedHead (0 finality depth)", func(t *testing.T) { - htu := newHeadTrackerUniverse(t, opts{Heads: []evmtypes.Head{h15}}) + htu := newHeadTrackerUniverse(t, opts{Heads: []evmtypes.Head{h15}, FinalityTagEnabled: true}) finalizedH15 := h15 // copy h15 to have different addresses - err := htu.headTracker.Backfill(ctx, &h15, &finalizedH15) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&finalizedH15, nil).Once() + err := htu.headTracker.Backfill(ctx, &h15) require.NoError(t, err) h := htu.headSaver.LatestChain() @@ -1042,12 +1071,215 @@ func TestHeadTracker_Backfill(t *testing.T) { assert.Equal(t, h15.BlockNumber(), h.BlockNumber()) assert.Equal(t, h15.Hash, h.Hash) }) + t.Run("marks block as finalized according to FinalizedBlockOffset (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{Heads: []evmtypes.Head{h15}, FinalityTagEnabled: true, FinalizedBlockOffset: 2}) + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(&h14, nil).Once() + // calculateLatestFinalizedBlock fetches blocks at LatestFinalized - FinalizedBlockOffset + htu.ethClient.On("HeadByNumber", mock.Anything, big.NewInt(h12.Number)).Return(&h12, nil).Once() + // backfill from 15 to 12 + htu.ethClient.On("HeadByHash", mock.Anything, h12.Hash).Return(&h12, nil).Once() + htu.ethClient.On("HeadByHash", mock.Anything, h13.Hash).Return(&h13, nil).Once() + htu.ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() + err := htu.headTracker.Backfill(ctx, &h15) + require.NoError(t, err) + + h := htu.headSaver.LatestChain() + // h - must contain 15, 14, 13, 12 and only 12 is finalized + assert.Equal(t, 4, int(h.ChainLength())) + for ; h.Hash != h12.Hash; h = h.Parent { + assert.False(t, h.IsFinalized) + } + + assert.True(t, h.IsFinalized) + assert.Equal(t, h12.BlockNumber(), h.BlockNumber()) + assert.Equal(t, h12.Hash, h.Hash) + }) + t.Run("marks block as finalized according to FinalizedBlockOffset (finality depth)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{Heads: []evmtypes.Head{h15}, FinalityDepth: 1, FinalizedBlockOffset: 2}) + htu.ethClient.On("HeadByNumber", mock.Anything, big.NewInt(12)).Return(&h12, nil).Once() + + // backfill from 15 to 12 + htu.ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() + htu.ethClient.On("HeadByHash", mock.Anything, h13.Hash).Return(&h13, nil).Once() + htu.ethClient.On("HeadByHash", mock.Anything, h12.Hash).Return(&h12, nil).Once() + err := htu.headTracker.Backfill(ctx, &h15) + require.NoError(t, err) + + h := htu.headSaver.LatestChain() + // h - must contain 15, 14, 13, 12 and only 12 is finalized + assert.Equal(t, 4, int(h.ChainLength())) + for ; h.Hash != h12.Hash; h = h.Parent { + assert.False(t, h.IsFinalized) + } + + assert.True(t, h.IsFinalized) + assert.Equal(t, h12.BlockNumber(), h.BlockNumber()) + assert.Equal(t, h12.Hash, h.Hash) + }) + t.Run("marks block as finalized according to FinalizedBlockOffset even with instant finality", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{Heads: []evmtypes.Head{h15}, FinalityDepth: 0, FinalizedBlockOffset: 2}) + htu.ethClient.On("HeadByNumber", mock.Anything, big.NewInt(13)).Return(&h13, nil).Once() + + // backfill from 15 to 13 + htu.ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() + htu.ethClient.On("HeadByHash", mock.Anything, h13.Hash).Return(&h13, nil).Once() + err := htu.headTracker.Backfill(ctx, &h15) + require.NoError(t, err) + + h := htu.headSaver.LatestChain() + // h - must contain 15, 14, 13, only 13 is finalized + assert.Equal(t, 3, int(h.ChainLength())) + for ; h.Hash != h13.Hash; h = h.Parent { + assert.False(t, h.IsFinalized) + } + + assert.True(t, h.IsFinalized) + assert.Equal(t, h13.BlockNumber(), h.BlockNumber()) + assert.Equal(t, h13.Hash, h.Hash) + }) +} + +func TestHeadTracker_LatestAndFinalizedBlock(t *testing.T) { + t.Parallel() + + ctx := tests.Context(t) + + h11 := testutils.Head(11) + h11.ParentHash = utils.NewHash() + + h12 := testutils.Head(12) + h12.ParentHash = h11.Hash + + h13 := testutils.Head(13) + h13.ParentHash = h12.Hash + + type opts struct { + Heads []evmtypes.Head + FinalityTagEnabled bool + FinalizedBlockOffset uint32 + FinalityDepth uint32 + } + + newHeadTrackerUniverse := func(t *testing.T, opts opts) *headTrackerUniverse { + evmcfg := testutils.NewTestChainScopedConfig(t, func(c *toml.EVMConfig) { + c.FinalityTagEnabled = ptr(opts.FinalityTagEnabled) + c.FinalizedBlockOffset = ptr(opts.FinalizedBlockOffset) + c.FinalityDepth = ptr(opts.FinalityDepth) + }) + + db := pgtest.NewSqlxDB(t) + orm := headtracker.NewORM(*testutils.FixtureChainID, db) + for i := range opts.Heads { + require.NoError(t, orm.IdempotentInsertHead(tests.Context(t), &opts.Heads[i])) + } + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(testutils.FixtureChainID, nil) + ht := createHeadTracker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm) + _, err := ht.headSaver.Load(tests.Context(t), 0) + require.NoError(t, err) + return ht + } + t.Run("returns error if failed to get latest block", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + const expectedError = "failed to fetch latest block" + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, errors.New(expectedError)).Once() + + _, _, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.ErrorContains(t, err, expectedError) + }) + t.Run("returns error if latest block is invalid", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, nil).Once() + + _, _, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.ErrorContains(t, err, "expected latest block to be valid") + }) + t.Run("returns error if failed to get latest finalized (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + const expectedError = "failed to get latest finalized block" + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, fmt.Errorf(expectedError)).Once() + + _, _, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.ErrorContains(t, err, expectedError) + }) + t.Run("returns error if latest finalized block is not valid (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, nil).Once() + + _, _, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.ErrorContains(t, err, "failed to get valid latest finalized block") + }) + t.Run("returns latest finalized block as is if FinalizedBlockOffset is 0 (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(h11, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL, h13) + assert.Equal(t, actualLF, h11) + }) + t.Run("returns latest finalized block with offset from cache (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true, FinalizedBlockOffset: 1, Heads: []evmtypes.Head{*h13, *h12, *h11}}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(h12, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL.Number, h13.Number) + assert.Equal(t, actualLF.Number, h11.Number) + }) + t.Run("returns latest finalized block with offset from RPC (finality tag)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityTagEnabled: true, FinalizedBlockOffset: 2, Heads: []evmtypes.Head{*h13, *h12, *h11}}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + htu.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(h12, nil).Once() + h10 := testutils.Head(10) + htu.ethClient.On("HeadByNumber", mock.Anything, big.NewInt(10)).Return(h10, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL.Number, h13.Number) + assert.Equal(t, actualLF.Number, h10.Number) + }) + t.Run("returns current head for both latest and finalized for FD = 0 (finality depth)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL.Number, h13.Number) + assert.Equal(t, actualLF.Number, h13.Number) + }) + t.Run("returns latest finalized block with offset from cache (finality depth)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityDepth: 1, FinalizedBlockOffset: 1, Heads: []evmtypes.Head{*h13, *h12, *h11}}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL.Number, h13.Number) + assert.Equal(t, actualLF.Number, h11.Number) + }) + t.Run("returns latest finalized block with offset from RPC (finality depth)", func(t *testing.T) { + htu := newHeadTrackerUniverse(t, opts{FinalityDepth: 1, FinalizedBlockOffset: 2, Heads: []evmtypes.Head{*h13, *h12, *h11}}) + htu.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h13, nil).Once() + h10 := testutils.Head(10) + htu.ethClient.On("HeadByNumber", mock.Anything, big.NewInt(10)).Return(h10, nil).Once() + + actualL, actualLF, err := htu.headTracker.LatestAndFinalizedBlock(ctx) + require.NoError(t, err) + assert.Equal(t, actualL.Number, h13.Number) + assert.Equal(t, actualLF.Number, h10.Number) + }) } // BenchmarkHeadTracker_Backfill - benchmarks HeadTracker's Backfill with focus on efficiency after initial // backfill on start up func BenchmarkHeadTracker_Backfill(b *testing.B) { - evmcfg := testutils.NewTestChainScopedConfig(b, nil) + evmcfg := testutils.NewTestChainScopedConfig(b, func(c *toml.EVMConfig) { + c.FinalityTagEnabled = ptr(true) + }) db := pgtest.NewSqlxDB(b) chainID := big.NewInt(evmclient.NullClientChainID) orm := headtracker.NewORM(*chainID, db) @@ -1068,15 +1300,17 @@ func BenchmarkHeadTracker_Backfill(b *testing.B) { number := hash.Big().Int64() return makeBlock(number), nil }) + ethClient.On("LatestFinalizedBlock", mock.Anything).Return(finalized, nil).Once() // run initial backfill to populate the database - err := ht.headTracker.Backfill(ctx, latest, finalized) + err := ht.headTracker.Backfill(ctx, latest) require.NoError(b, err) b.ResetTimer() // focus benchmark on processing of a new latest block for i := 0; i < b.N; i++ { latest = makeBlock(int64(finalityDepth + i)) finalized = makeBlock(int64(i + 1)) - err := ht.headTracker.Backfill(ctx, latest, finalized) + ethClient.On("LatestFinalizedBlock", mock.Anything).Return(finalized, nil).Once() + err := ht.headTracker.Backfill(ctx, latest) require.NoError(b, err) } } @@ -1129,8 +1363,8 @@ type headTrackerUniverse struct { ethClient *evmclimocks.Client } -func (u *headTrackerUniverse) Backfill(ctx context.Context, head, finalizedHead *evmtypes.Head) error { - return u.headTracker.Backfill(ctx, head, finalizedHead) +func (u *headTrackerUniverse) Backfill(ctx context.Context, head *evmtypes.Head) error { + return u.headTracker.Backfill(ctx, head) } func (u *headTrackerUniverse) Start(t *testing.T) { diff --git a/core/chains/evm/headtracker/simulated_head_tracker.go b/core/chains/evm/headtracker/simulated_head_tracker.go new file mode 100644 index 00000000000..e1e550de992 --- /dev/null +++ b/core/chains/evm/headtracker/simulated_head_tracker.go @@ -0,0 +1,53 @@ +package headtracker + +import ( + "context" + "fmt" + "math/big" + + evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" +) + +// simulatedHeadTracker - simplified version of HeadTracker that works with simulated backed +type simulatedHeadTracker struct { + ec evmclient.Client + useFinalityTag bool + finalityDepth int64 +} + +func NewSimulatedHeadTracker(ec evmclient.Client, useFinalityTag bool, finalityDepth int64) *simulatedHeadTracker { + return &simulatedHeadTracker{ + ec: ec, + useFinalityTag: useFinalityTag, + finalityDepth: finalityDepth, + } +} + +func (ht *simulatedHeadTracker) LatestAndFinalizedBlock(ctx context.Context) (*evmtypes.Head, *evmtypes.Head, error) { + latest, err := ht.ec.HeadByNumber(ctx, nil) + if err != nil { + return nil, nil, err + } + + if latest == nil { + return nil, nil, fmt.Errorf("expected latest block to be valid") + } + + var finalizedBlock *evmtypes.Head + if ht.useFinalityTag { + finalizedBlock, err = ht.ec.LatestFinalizedBlock(ctx) + } else { + finalizedBlock, err = ht.ec.HeadByNumber(ctx, big.NewInt(max(latest.Number-ht.finalityDepth, 0))) + } + + if err != nil { + return nil, nil, fmt.Errorf("simulatedHeadTracker failed to get finalized block") + } + + if finalizedBlock == nil { + return nil, nil, fmt.Errorf("expected finalized block to be valid") + } + + return latest, finalizedBlock, nil +} diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index 3b2a10df6c8..3f589d84d56 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -23,6 +23,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" @@ -68,10 +69,11 @@ func SetupTH(t testing.TB, opts logpoller.Opts) TestHarness { head := esc.Backend().Blockchain().CurrentHeader() esc.Backend().Blockchain().SetFinalized(head) + headTracker := headtracker.NewSimulatedHeadTracker(esc, opts.UseFinalityTag, opts.FinalityDepth) if opts.PollPeriod == 0 { opts.PollPeriod = 1 * time.Hour } - lp := logpoller.NewLogPoller(o, esc, lggr, opts) + lp := logpoller.NewLogPoller(o, esc, lggr, headTracker, opts) emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) emitterAddress2, _, emitter2, err := log_emitter.DeployLogEmitter(owner, ec) diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index bc0dd40e289..333c5b70f8e 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -27,6 +27,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/types/query" "github.com/smartcontractkit/chainlink-common/pkg/utils" "github.com/smartcontractkit/chainlink-common/pkg/utils/mathutil" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -88,6 +89,10 @@ type Client interface { ConfiguredChainID() *big.Int } +type HeadTracker interface { + LatestAndFinalizedBlock(ctx context.Context) (latest, finalized *evmtypes.Head, err error) +} + var ( _ LogPollerTest = &logPoller{} ErrReplayRequestAborted = pkgerrors.New("aborted, replay request cancelled") @@ -100,6 +105,7 @@ type logPoller struct { services.StateMachine ec Client orm ORM + headTracker HeadTracker lggr logger.SugaredLogger pollPeriod time.Duration // poll period set by block production rate useFinalityTag bool // indicates whether logPoller should use chain's finality or pick a fixed depth for finality @@ -150,11 +156,12 @@ type Opts struct { // // How fast that can be done depends largely on network speed and DB, but even for the fastest // support chain, polygon, which has 2s block times, we need RPCs roughly with <= 500ms latency -func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, opts Opts) *logPoller { +func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, headTracker HeadTracker, opts Opts) *logPoller { return &logPoller{ stopCh: make(chan struct{}), ec: ec, orm: orm, + headTracker: headTracker, lggr: logger.Sugared(logger.Named(lggr, "LogPoller")), replayStart: make(chan int64), replayComplete: make(chan error), @@ -1007,38 +1014,15 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int } } -// Returns information about latestBlock, latestFinalizedBlockNumber -// If finality tag is not enabled, latestFinalizedBlockNumber is calculated as latestBlockNumber - lp.finalityDepth (configured param) -// Otherwise, we return last finalized block number returned from chain +// Returns information about latestBlock, latestFinalizedBlockNumber provided by HeadTracker func (lp *logPoller) latestBlocks(ctx context.Context) (*evmtypes.Head, int64, error) { - // If finality is not enabled, we can only fetch the latest block - if !lp.useFinalityTag { - // Example: - // finalityDepth = 2 - // Blocks: 1->2->3->4->5(latestBlock) - // latestFinalizedBlockNumber would be 3 - latestBlock, err := lp.ec.HeadByNumber(ctx, nil) - if err != nil { - return nil, 0, err - } - if latestBlock == nil { - // Shouldn't happen with a real client, but still better rather to - // return error than panic - return nil, 0, errors.New("latest block is nil") - } - // If chain has fewer blocks than finalityDepth, return 0 - return latestBlock, mathutil.Max(latestBlock.Number-lp.finalityDepth, 0), nil - } - - // If finality is enabled, we need to get the latest and finalized blocks. - blocks, err := lp.batchFetchBlocks(ctx, []string{rpc.LatestBlockNumber.String(), rpc.FinalizedBlockNumber.String()}, 2) + latest, finalized, err := lp.headTracker.LatestAndFinalizedBlock(ctx) if err != nil { - return nil, 0, err + return nil, 0, fmt.Errorf("failed to get latest and latest finalized block from HeadTracker: %w", err) } - latest := blocks[0] - finalized := blocks[1] - lp.lggr.Debugw("Latest blocks read from chain", "latest", latest.Number, "finalized", finalized.Number) - return latest, finalized.Number, nil + + lp.lggr.Debugw("Latest blocks read from chain", "latest", latest.Number, "finalized", finalized.BlockNumber()) + return latest, finalized.BlockNumber(), nil } // Find the first place where our chain and their chain have the same block, diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index bc295105874..448710b93f3 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "math/big" - "reflect" "strings" "sync" "testing" @@ -22,10 +21,13 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" + htMocks "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks" evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" @@ -73,7 +75,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := NewLogPoller(orm, nil, lggr, lpOpts) + lp := NewLogPoller(orm, nil, lggr, nil, lpOpts) // We expect a zero Filter if nothing registered yet. f := lp.Filter(nil, nil, nil) @@ -208,8 +210,10 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { db := pgtest.NewSqlxDB(t) orm := NewORM(chainID, db, lggr) latestBlock := int64(4) + const finalityDepth = 2 - head := evmtypes.Head{Number: latestBlock} + head := &evmtypes.Head{Number: latestBlock} + finalizedHead := &evmtypes.Head{Number: latestBlock - finalityDepth} events := []common.Hash{EmitterABI.Events["Log1"].ID} log1 := types.Log{ Index: 0, @@ -222,20 +226,22 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { } ec := evmclimocks.NewClient(t) - ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil) ec.On("ConfiguredChainID").Return(chainID, nil) + headTracker := htMocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(head, finalizedHead, nil) + ctx := testutils.Context(t) lpOpts := Opts{ PollPeriod: time.Hour, - FinalityDepth: 2, + FinalityDepth: finalityDepth, BackfillBatchSize: 3, RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, BackupPollerBlockDelay: 0, } - lp := NewLogPoller(orm, ec, lggr, lpOpts) + lp := NewLogPoller(orm, ec, lggr, headTracker, lpOpts) lp.BackupPollAndSaveLogs(ctx) assert.Equal(t, int64(0), lp.backupPollerNextBlock) assert.Equal(t, 1, observedLogs.FilterMessageSnippet("ran before first successful log poller run").Len()) @@ -309,7 +315,14 @@ func TestLogPoller_Replay(t *testing.T) { KeepFinalizedBlocksDepth: 20, BackupPollerBlockDelay: 0, } - lp := NewLogPoller(orm, ec, lggr, lpOpts) + headTracker := htMocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(func(ctx context.Context) (*evmtypes.Head, *evmtypes.Head, error) { + headCopy := head + finalized := &evmtypes.Head{Number: headCopy.Number - lpOpts.FinalityDepth} + return &headCopy, finalized, nil + }) + lp := NewLogPoller(orm, ec, lggr, headTracker, lpOpts) { ctx := testutils.Context(t) @@ -533,10 +546,6 @@ func (lp *logPoller) reset() { func Test_latestBlockAndFinalityDepth(t *testing.T) { lggr := logger.Test(t) - chainID := testutils.FixtureChainID - db := pgtest.NewSqlxDB(t) - orm := NewORM(chainID, db, lggr) - ctx := testutils.Context(t) lpOpts := Opts{ PollPeriod: time.Hour, @@ -545,71 +554,27 @@ func Test_latestBlockAndFinalityDepth(t *testing.T) { KeepFinalizedBlocksDepth: 20, } - t.Run("pick latest block from chain and use finality from config with finality disabled", func(t *testing.T) { - head := evmtypes.Head{Number: 4} - - lpOpts.UseFinalityTag = false - lpOpts.FinalityDepth = int64(3) - ec := evmclimocks.NewClient(t) - ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + t.Run("headTracker returns an error", func(t *testing.T) { + headTracker := htMocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + const expectedError = "finalized block is not available yet" + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(&evmtypes.Head{}, &evmtypes.Head{}, fmt.Errorf(expectedError)) - lp := NewLogPoller(orm, ec, lggr, lpOpts) - latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(ctx) - require.NoError(t, err) - require.Equal(t, latestBlock.Number, head.Number) - require.Equal(t, lpOpts.FinalityDepth, latestBlock.Number-lastFinalizedBlockNumber) + lp := NewLogPoller(nil, nil, lggr, headTracker, lpOpts) + _, _, err := lp.latestBlocks(tests.Context(t)) + require.ErrorContains(t, err, expectedError) }) - - t.Run("finality tags in use", func(t *testing.T) { - t.Run("client returns data properly", func(t *testing.T) { - expectedLatestBlockNumber := int64(20) - expectedLastFinalizedBlockNumber := int64(12) - ec := evmclimocks.NewClient(t) - ec.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { - return len(b) == 2 && - reflect.DeepEqual(b[0].Args, []interface{}{"latest", false}) && - reflect.DeepEqual(b[1].Args, []interface{}{"finalized", false}) - })).Return(nil).Run(func(args mock.Arguments) { - elems := args.Get(1).([]rpc.BatchElem) - // Latest block details - *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLatestBlockNumber, Hash: utils.RandomBytes32()} - // Finalized block details - *(elems[1].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLastFinalizedBlockNumber, Hash: utils.RandomBytes32()} - }) - - lpOpts.UseFinalityTag = true - lp := NewLogPoller(orm, ec, lggr, lpOpts) - - latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(ctx) - require.NoError(t, err) - require.Equal(t, expectedLatestBlockNumber, latestBlock.Number) - require.Equal(t, expectedLastFinalizedBlockNumber, lastFinalizedBlockNumber) - }) - - t.Run("client returns error for at least one of the calls", func(t *testing.T) { - ec := evmclimocks.NewClient(t) - ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { - elems := args.Get(1).([]rpc.BatchElem) - // Latest block details - *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: 10} - // Finalized block details - elems[1].Error = fmt.Errorf("some error") - }) - - lpOpts.UseFinalityTag = true - lp := NewLogPoller(orm, ec, lggr, lpOpts) - _, _, err := lp.latestBlocks(ctx) - require.Error(t, err) - }) - - t.Run("BatchCall returns an error", func(t *testing.T) { - ec := evmclimocks.NewClient(t) - ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(fmt.Errorf("some error")) - lpOpts.UseFinalityTag = true - lp := NewLogPoller(orm, ec, lggr, lpOpts) - _, _, err := lp.latestBlocks(ctx) - require.Error(t, err) - }) + t.Run("headTracker returns valid chain", func(t *testing.T) { + headTracker := htMocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + finalizedBlock := &evmtypes.Head{Number: 2, IsFinalized: true} + head := &evmtypes.Head{Number: 10} + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(head, finalizedBlock, nil) + + lp := NewLogPoller(nil, nil, lggr, headTracker, lpOpts) + latestBlock, finalizedBlockNumber, err := lp.latestBlocks(tests.Context(t)) + require.NoError(t, err) + require.NotNil(t, latestBlock) + assert.Equal(t, head.BlockNumber(), latestBlock.BlockNumber()) + assert.Equal(t, finalizedBlock.Number, finalizedBlockNumber) }) } @@ -653,7 +618,7 @@ func Test_FetchBlocks(t *testing.T) { errors.New("Received unfinalized block 9 while expecting finalized block (latestFinalizedBlockNumber = 5)"), }} - lp := NewLogPoller(orm, ec, lggr, lpOpts) + lp := NewLogPoller(orm, ec, lggr, nil, lpOpts) for _, tc := range cases { for _, lp.useFinalityTag = range []bool{false, true} { blockValidationReq := latestBlock @@ -693,7 +658,7 @@ func benchmarkFilter(b *testing.B, nFilters, nAddresses, nEvents int) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := NewLogPoller(nil, nil, lggr, lpOpts) + lp := NewLogPoller(nil, nil, lggr, nil, lpOpts) for i := 0; i < nFilters; i++ { var addresses []common.Address var events []common.Hash diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index 6ef16921503..c83efc83b39 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -28,7 +28,9 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" commonutils "github.com/smartcontractkit/chainlink-common/pkg/utils" + htMocks "github.com/smartcontractkit/chainlink/v2/common/headtracker/mocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" @@ -717,7 +719,9 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, lpOpts) + simulatedClient := client.NewSimulatedBackendClient(t, ec, chainID) + ht := headtracker.NewSimulatedHeadTracker(simulatedClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(orm, simulatedClient, lggr, ht, lpOpts) for i := 0; i < finalityDepth; i++ { // Have enough blocks that we could reorg the full finalityDepth-1. ec.Commit() } @@ -1493,7 +1497,7 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, lpOpts) + lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, nil, lpOpts) err = lp.Replay(ctx, 5) // block number too high require.ErrorContains(t, err, "Invalid replay block number") @@ -1548,7 +1552,8 @@ func TestTooManyLogResults(t *testing.T) { RpcBatchSize: 10, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(o, ec, lggr, lpOpts) + headTracker := htMocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + lp := logpoller.NewLogPoller(o, ec, lggr, headTracker, lpOpts) expected := []int64{10, 5, 2, 1} clientErr := client.JsonError{ @@ -1557,9 +1562,13 @@ func TestTooManyLogResults(t *testing.T) { Message: "query returned more than 10000 results. Try with this block range [0x100E698, 0x100E6D4].", } + // Simulate currentBlock = 300 + head := &evmtypes.Head{Number: 300} + finalized := &evmtypes.Head{Number: head.Number - lpOpts.FinalityDepth} + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(head, finalized, nil).Once() call1 := ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(func(ctx context.Context, blockNumber *big.Int) (*evmtypes.Head, error) { if blockNumber == nil { - return &evmtypes.Head{Number: 300}, nil // Simulate currentBlock = 300 + require.FailNow(t, "unexpected call to get current head") } return &evmtypes.Head{Number: blockNumber.Int64()}, nil }) @@ -1601,9 +1610,12 @@ func TestTooManyLogResults(t *testing.T) { // Now jump to block 500, but return error no matter how small the block range gets. // Should exit the loop with a critical error instead of hanging. + head = &evmtypes.Head{Number: 500} + finalized = &evmtypes.Head{Number: head.Number - lpOpts.FinalityDepth} + headTracker.On("LatestAndFinalizedBlock", mock.Anything).Return(head, finalized, nil).Once() call1.On("HeadByNumber", mock.Anything, mock.Anything).Return(func(ctx context.Context, blockNumber *big.Int) (*evmtypes.Head, error) { if blockNumber == nil { - return &evmtypes.Head{Number: 500}, nil // Simulate currentBlock = 300 + require.FailNow(t, "unexpected call to get current head") } return &evmtypes.Head{Number: blockNumber.Int64()}, nil }) @@ -1938,7 +1950,7 @@ func TestFindLCA(t *testing.T) { KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(orm, ec, lggr, lpOpts) + lp := logpoller.NewLogPoller(orm, ec, lggr, nil, lpOpts) t.Run("Fails, if failed to select oldest block", func(t *testing.T) { _, err := lp.FindLCA(ctx) require.ErrorContains(t, err, "failed to select the latest block") diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 4e187571e57..991315e2ee9 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -35,6 +35,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/keystore" ksmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/keystore/mocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" @@ -59,7 +60,9 @@ func makeTestEvmTxm( RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, lpOpts) + + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, ht, lpOpts) // logic for building components (from evm/evm_txm.go) ------- lggr.Infow("Initializing EVM transaction manager", diff --git a/core/chains/evm/types/models.go b/core/chains/evm/types/models.go index 2af5b81ccf8..a9e5cd5841b 100644 --- a/core/chains/evm/types/models.go +++ b/core/chains/evm/types/models.go @@ -118,16 +118,23 @@ func (h *Head) IsInChain(blockHash common.Hash) bool { // HashAtHeight returns the hash of the block at the given height, if it is in the chain. // If not in chain, returns the zero hash func (h *Head) HashAtHeight(blockNum int64) common.Hash { - for { + headAtHeight, err := h.HeadAtHeight(blockNum) + if err != nil { + return common.Hash{} + } + + return headAtHeight.BlockHash() +} + +func (h *Head) HeadAtHeight(blockNum int64) (commontypes.Head[common.Hash], error) { + for h != nil { if h.Number == blockNum { - return h.Hash - } - if h.Parent == nil { - break + return h, nil } + h = h.Parent } - return common.Hash{} + return nil, fmt.Errorf("failed to find head at height %d", blockNum) } // ChainLength returns the length of the chain followed by recursively looking up parents diff --git a/core/chains/evm/types/models_test.go b/core/chains/evm/types/models_test.go index db7b876bb64..6018d68f962 100644 --- a/core/chains/evm/types/models_test.go +++ b/core/chains/evm/types/models_test.go @@ -247,6 +247,26 @@ func TestHead_EarliestInChain(t *testing.T) { assert.Equal(t, int64(1), head.EarliestInChain().BlockNumber()) } +func TestHead_HeadAtHeight(t *testing.T) { + expectedResult := &evmtypes.Head{ + Hash: common.BigToHash(big.NewInt(10)), + Number: 2, + Parent: &evmtypes.Head{ + Number: 1, + }, + } + head := evmtypes.Head{ + Number: 3, + Parent: expectedResult, + } + + headAtHeight, err := head.HeadAtHeight(2) + require.NoError(t, err) + assert.Equal(t, expectedResult, headAtHeight) + _, err = head.HeadAtHeight(0) + assert.Error(t, err, "expected to get an error if head is not in the chain") +} + func TestHead_IsInChain(t *testing.T) { hash1 := utils.NewHash() hash2 := utils.NewHash() diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index 1c94e3d7dfa..b38cd2c4508 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -245,7 +245,7 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod LogPrunePageSize: int64(cfg.EVM().LogPrunePageSize()), BackupPollerBlockDelay: int64(cfg.EVM().BackupLogPollerBlockDelay()), } - logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, opts.DS, l), client, l, lpOpts) + logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, opts.DS, l), client, l, headTracker, lpOpts) } } diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml index a222d5269d7..38c8cb8354f 100644 --- a/core/config/docs/chains-evm.toml +++ b/core/config/docs/chains-evm.toml @@ -85,6 +85,18 @@ RPCDefaultBatchSize = 250 # Default # available from the connected node via RPC, due to race conditions in the code of the remote ETH node. In this case you will get false # "zero" blocks that are missing transactions. RPCBlockQueryDelay = 1 # Default +# FinalizedBlockOffset defines the number of blocks by which the latest finalized block will be shifted/delayed. +# For example, suppose RPC returns block 100 as the latest finalized. In that case, the CL Node will treat block `100 - FinalizedBlockOffset` as the latest finalized block and `latest - FinalityDepth - FinalizedBlockOffset` in case of `FinalityTagEnabled = false.` +# With `EnforceRepeatableRead = true,` RPC is considered healthy only if its most recent finalized block is larger or equal to the highest finalized block observed by the CL Node minus `FinalizedBlockOffset.` +# Higher values of `FinalizedBlockOffset` with `EnforceRepeatableRead = true` reduce the number of false `FinalizedBlockOutOfSync` declarations on healthy RPCs that are slightly lagging behind due to network delays. +# This may increase the number of healthy RPCs and reduce the probability that the CL Node will not have any healthy alternatives to the active RPC. +# CAUTION: Setting this to values higher than 0 may delay transaction creation in products (e.g., CCIP, Automation) that base their decision on finalized on-chain events. +# PoS chains with `FinalityTagEnabled=true` and batched (epochs) blocks finalization (e.g., Ethereum Mainnet) must be treated with special care as a minor increase in the `FinalizedBlockOffset` may lead to significant delays. +# For example, let's say that `FinalizedBlockOffset = 1` and blocks are finalized in batches of 32. +# The latest finalized block on chain is 64, so block 63 is the latest finalized for CL Node. +# Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, +# CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. +FinalizedBlockOffset = 0 # Default [EVM.Transactions] # ForwardersEnabled enables or disables sending transactions through forwarder contracts. @@ -368,7 +380,17 @@ NodeIsSyncingEnabled = false # Default # # Set to 0 to disable. FinalizedBlockPollInterval = '5s' # Default - +# EnforceRepeatableRead defines if Core should only use RPCs whose most recently finalized block is greater or equal to +# `highest finalized block - FinalizedBlockOffset`. In other words, exclude RPCs lagging on latest finalized +# block. +# +# Set false to disable +EnforceRepeatableRead = false # Default +# DeathDeclarationDelay defines the minimum duration an RPC must be in unhealthy state before producing an error log message. +# Larger values might be helpful to reduce the noisiness of health checks like `EnforceRepeatableRead = true', which might be falsely +# trigger declaration of `FinalizedBlockOutOfSync` due to insignificant network delays in broadcasting of the finalized state among RPCs. +# RPC will not be picked to handle a request even if this option is set to a nonzero value. +DeathDeclarationDelay = '10s' # Default # **ADVANCED** # Errors enable the node to provide custom regex patterns to match against error messages from RPCs. [EVM.NodePool.Errors] diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index b6b656c3a08..4db8fbc9482 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -101,8 +101,9 @@ var ( { ChainID: ubig.NewI(1), Chain: evmcfg.Chain{ - FinalityDepth: ptr[uint32](26), - FinalityTagEnabled: ptr[bool](false), + FinalityDepth: ptr[uint32](26), + FinalityTagEnabled: ptr[bool](false), + FinalizedBlockOffset: ptr[uint32](12), }, Nodes: []*evmcfg.Node{ { @@ -504,6 +505,7 @@ func TestConfig_Marshal(t *testing.T) { FinalityDepth: ptr[uint32](42), FinalityTagEnabled: ptr[bool](false), FlagsContractAddress: mustAddress("0xae4E781a6218A8031764928E88d457937A954fC3"), + FinalizedBlockOffset: ptr[uint32](16), GasEstimator: evmcfg.GasEstimator{ Mode: ptr("SuggestedPrice"), @@ -593,6 +595,8 @@ func TestConfig_Marshal(t *testing.T) { LeaseDuration: &zeroSeconds, NodeIsSyncingEnabled: ptr(true), FinalizedBlockPollInterval: &second, + EnforceRepeatableRead: ptr(true), + DeathDeclarationDelay: &minute, Errors: evmcfg.ClientErrors{ NonceTooLow: ptr[string]("(: |^)nonce too low"), NonceTooHigh: ptr[string]("(: |^)nonce too high"), @@ -990,6 +994,7 @@ NoNewHeadsThreshold = '1m0s' OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 16 [EVM.Transactions] ForwardersEnabled = true @@ -1060,6 +1065,8 @@ SyncThreshold = 13 LeaseDuration = '0s' NodeIsSyncingEnabled = true FinalizedBlockPollInterval = '1s' +EnforceRepeatableRead = true +DeathDeclarationDelay = '1m0s' [EVM.NodePool.Errors] NonceTooLow = '(: |^)nonce too low' @@ -1287,10 +1294,11 @@ func TestConfig_Validate(t *testing.T) { - WSURL: missing: required for primary nodes - HTTPURL: missing: required for all nodes - 1.HTTPURL: missing: required for all nodes - - 1: 9 errors: + - 1: 10 errors: - ChainType: invalid value (Foo): must not be set with this chain id - Nodes: missing: must have at least one node - ChainType: invalid value (Foo): must be one of arbitrum, celo, gnosis, kroma, metis, optimismBedrock, scroll, wemix, xlayer, zkevm, zksync or omitted + - HeadTracker.HistoryDepth: invalid value (30): must be greater than or equal to FinalizedBlockOffset - GasEstimator.BumpThreshold: invalid value (0): cannot be 0 if auto-purge feature is enabled for Foo - Transactions.AutoPurge.Threshold: missing: needs to be set if auto-purge feature is enabled for Foo - Transactions.AutoPurge.MinAttempts: missing: needs to be set if auto-purge feature is enabled for Foo diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index fd51d523576..c041a2857b4 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -287,6 +287,7 @@ NoNewHeadsThreshold = '1m0s' OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 16 [EVM.Transactions] ForwardersEnabled = true @@ -357,6 +358,8 @@ SyncThreshold = 13 LeaseDuration = '0s' NodeIsSyncingEnabled = true FinalizedBlockPollInterval = '1s' +EnforceRepeatableRead = true +DeathDeclarationDelay = '1m0s' [EVM.NodePool.Errors] NonceTooLow = '(: |^)nonce too low' diff --git a/core/services/chainlink/testdata/config-invalid.toml b/core/services/chainlink/testdata/config-invalid.toml index 68feeeb0451..30fbfff4729 100644 --- a/core/services/chainlink/testdata/config-invalid.toml +++ b/core/services/chainlink/testdata/config-invalid.toml @@ -53,6 +53,7 @@ SendOnly = true ChainID = '1' ChainType = 'Foo' FinalityDepth = 32 +FinalizedBlockOffset = 64 [EVM.Transactions.AutoPurge] Enabled = true diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index 13aac2db7fa..22a49476dc4 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -274,6 +274,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 12 [EVM.Transactions] ForwardersEnabled = false @@ -328,6 +329,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 @@ -370,6 +373,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -424,6 +428,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 @@ -460,6 +466,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -514,6 +521,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/core/services/chainlink/testdata/config-multi-chain.toml b/core/services/chainlink/testdata/config-multi-chain.toml index e45255a4373..5373e0e62d3 100644 --- a/core/services/chainlink/testdata/config-multi-chain.toml +++ b/core/services/chainlink/testdata/config-multi-chain.toml @@ -39,6 +39,7 @@ CPUProfileRate = 7 ChainID = '1' FinalityDepth = 26 FinalityTagEnabled = false +FinalizedBlockOffset = 12 [[EVM.Nodes]] Name = 'primary' diff --git a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go index ea8f64c02fa..8529ad89450 100644 --- a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go +++ b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go @@ -20,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" @@ -84,8 +85,9 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) lp := logpoller.NewLogPoller( - logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts) + logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, ht, lpOpts) servicetest.Run(t, lp) cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, 0) @@ -156,8 +158,9 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) lp := &mockLogPoller{ - LogPoller: logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts), + LogPoller: logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, ht, lpOpts), LatestBlockFn: func(ctx context.Context) (int64, error) { return 0, nil }, @@ -198,7 +201,8 @@ func Test_ChannelDefinitionCache_Integration(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr), ethClient, lggr, ht, lpOpts) servicetest.Run(t, lp) cdc := llo.NewChannelDefinitionCache(lggr, orm, lp, configStoreAddress, channel2Block.Number().Int64()+1) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index ace17ca2dbc..cdd800071da 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -6,8 +6,6 @@ import ( "testing" "time" - "github.com/smartcontractkit/chainlink-automation/pkg/v3/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -17,10 +15,13 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" + "github.com/smartcontractkit/chainlink-automation/pkg/v3/types" + ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" @@ -505,7 +506,8 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, ht, lpOpts) return lp, ethClient } diff --git a/core/services/promreporter/prom_reporter_test.go b/core/services/promreporter/prom_reporter_test.go index f17b4aafed2..95164ecc9a6 100644 --- a/core/services/promreporter/prom_reporter_test.go +++ b/core/services/promreporter/prom_reporter_test.go @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" @@ -45,7 +46,8 @@ func newLegacyChainContainer(t *testing.T, db *sqlx.DB) legacyevm.LegacyChainCon RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr), ethClient, lggr, ht, lpOpts) txm, err := txmgr.NewTxm( db, diff --git a/core/services/registrysyncer/syncer_test.go b/core/services/registrysyncer/syncer_test.go index 6804e4bec44..56818c81dc3 100644 --- a/core/services/registrysyncer/syncer_test.go +++ b/core/services/registrysyncer/syncer_test.go @@ -18,7 +18,9 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-common/pkg/types" + evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" @@ -83,13 +85,16 @@ func newContractReaderFactory(t *testing.T, simulatedBackend *backends.Simulated testutils.SimulatedChainID, ) db := pgtest.NewSqlxDB(t) + const finalityDepth = 2 + ht := headtracker.NewSimulatedHeadTracker(client, false, finalityDepth) lp := logpoller.NewLogPoller( logpoller.NewORM(testutils.SimulatedChainID, db, lggr), client, lggr, + ht, logpoller.Opts{ PollPeriod: 100 * time.Millisecond, - FinalityDepth: 2, + FinalityDepth: finalityDepth, BackfillBatchSize: 3, RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 8c02c4e2e7e..caf48caf490 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -32,6 +32,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" evmClientMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" evmutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" @@ -99,7 +100,8 @@ func TestConfigPoller(t *testing.T) { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp = logpoller.NewLogPoller(lorm, ethClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp = logpoller.NewLogPoller(lorm, ethClient, lggr, ht, lpOpts) servicetest.Run(t, lp) } diff --git a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go index b9f141b2d0b..92407809351 100644 --- a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go +++ b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go @@ -17,6 +17,7 @@ import ( . "github.com/smartcontractkit/chainlink-common/pkg/types/interfacetests" //nolint common practice to import test mods with . "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/chain_reader_tester" _ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" // force binding for tx type @@ -199,7 +200,8 @@ func (it *EVMChainReaderInterfaceTester[T]) GetChainReader(t T) clcommontypes.Co RpcBatchSize: 1, KeepFinalizedBlocksDepth: 10000, } - lp := logpoller.NewLogPoller(logpoller.NewORM(it.Helper.ChainID(), db, lggr), it.client, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(it.client, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(logpoller.NewORM(it.Helper.ChainID(), db, lggr), it.client, lggr, ht, lpOpts) require.NoError(t, lp.Start(ctx)) // encode and decode the config to ensure the test covers type issues diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index 2d96b2fd15d..c44d64c5ba7 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -23,6 +23,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" @@ -88,7 +89,8 @@ func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDig RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, ht, lpOpts) servicetest.Run(t, lp) configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) require.NoError(t, err) diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go index f2923696bfc..c7c59bf2e11 100644 --- a/core/services/relay/evm/mercury/helpers_test.go +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -20,6 +20,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest" evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier" @@ -174,7 +175,8 @@ func SetupTH(t *testing.T, feedID common.Hash) TestHarness { RpcBatchSize: 2, KeepFinalizedBlocksDepth: 1000, } - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, ht, lpOpts) servicetest.Run(t, lp) configPoller, err := NewConfigPoller(testutils.Context(t), lggr, lp, verifierAddress, feedID) diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go index 5b827a5291d..08652455047 100644 --- a/core/services/vrf/v2/listener_v2_log_listener_test.go +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -21,6 +21,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -102,7 +103,8 @@ func setupVRFLogPollerListenerTH(t *testing.T, RpcBatchSize: rpcBatchSize, KeepFinalizedBlocksDepth: keepFinalizedBlocksDepth, } - lp := logpoller.NewLogPoller(o, esc, lggr, lpOpts) + ht := headtracker.NewSimulatedHeadTracker(esc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + lp := logpoller.NewLogPoller(o, esc, lggr, ht, lpOpts) emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index d69f0aa5064..76326f9a4d4 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -287,6 +287,7 @@ NoNewHeadsThreshold = '1m0s' OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = true @@ -356,6 +357,8 @@ SyncThreshold = 13 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.NodePool.Errors] NonceTooLow = '(: |^)nonce too low' diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index 13aac2db7fa..8e6682dee3f 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -274,6 +274,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -328,6 +329,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 @@ -370,6 +373,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -424,6 +428,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 @@ -460,6 +466,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -514,6 +521,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/core/web/resolver/testdata/config-multi-chain.toml b/core/web/resolver/testdata/config-multi-chain.toml index 3598e92cdc2..9abb1719402 100644 --- a/core/web/resolver/testdata/config-multi-chain.toml +++ b/core/web/resolver/testdata/config-multi-chain.toml @@ -39,6 +39,7 @@ CPUProfileRate = 7 ChainID = '1' FinalityDepth = 26 FinalityTagEnabled = false +FinalizedBlockOffset = 0 [EVM.OCR2] [EVM.OCR2.Automation] diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 5f40d9fa69d..c35d90211a8 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1776,6 +1776,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -1830,6 +1831,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -1866,6 +1869,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -1920,6 +1924,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -1956,6 +1962,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2010,6 +2017,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2046,6 +2055,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2100,6 +2110,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2137,6 +2149,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2191,6 +2204,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -2227,6 +2242,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2281,6 +2297,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2317,6 +2335,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2371,6 +2390,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2408,6 +2429,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2462,6 +2484,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2498,6 +2522,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2552,6 +2577,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2587,6 +2614,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2641,6 +2669,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2676,6 +2706,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2730,6 +2761,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2766,6 +2799,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2820,6 +2854,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2857,6 +2893,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -2911,6 +2948,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -2947,6 +2986,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3001,6 +3041,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3037,6 +3079,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3091,6 +3134,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3127,6 +3172,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3181,6 +3227,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -3217,6 +3265,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3271,6 +3320,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -3307,6 +3358,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3361,6 +3413,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3397,6 +3451,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3451,6 +3506,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -3487,6 +3544,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3541,6 +3599,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3577,6 +3637,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3631,6 +3692,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3667,6 +3730,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3721,6 +3785,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -3758,6 +3824,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3812,6 +3879,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -3848,6 +3917,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3902,6 +3972,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -3937,6 +4009,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -3991,6 +4064,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4027,6 +4102,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4081,6 +4157,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4117,6 +4195,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4171,6 +4250,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4207,6 +4288,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4261,6 +4343,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4297,6 +4381,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4351,6 +4436,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4386,6 +4473,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4440,6 +4528,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4476,6 +4566,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4530,6 +4621,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4566,6 +4659,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4620,6 +4714,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4656,6 +4752,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4710,6 +4807,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4746,6 +4845,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4800,6 +4900,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -4835,6 +4937,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4889,6 +4992,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -4925,6 +5030,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -4979,6 +5085,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5015,6 +5123,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5069,6 +5178,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -5106,6 +5217,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5160,6 +5272,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5196,6 +5310,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5250,6 +5365,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5286,6 +5403,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5340,6 +5458,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5376,6 +5496,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5430,6 +5551,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5466,6 +5589,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5520,6 +5644,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5555,6 +5681,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5609,6 +5736,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -5644,6 +5773,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5698,6 +5828,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -5733,6 +5865,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5787,6 +5920,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -5823,6 +5958,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5877,6 +6013,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -5913,6 +6051,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -5967,6 +6106,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -6002,6 +6143,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6056,6 +6198,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -6092,6 +6236,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6146,6 +6291,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6182,6 +6329,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6236,6 +6384,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6273,6 +6423,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6327,6 +6478,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6364,6 +6517,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6418,6 +6572,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6454,6 +6610,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6508,6 +6665,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6544,6 +6703,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6598,6 +6758,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6634,6 +6796,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6688,6 +6851,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6724,6 +6889,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6778,6 +6944,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -6814,6 +6982,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6868,6 +7037,8 @@ SyncThreshold = 10 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 1 @@ -6904,6 +7075,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -6958,6 +7130,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -6994,6 +7168,7 @@ NonceAutoSync = true NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [Transactions] ForwardersEnabled = false @@ -7048,6 +7223,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [OCR] ContractConfirmations = 4 @@ -7236,6 +7413,22 @@ block, but it is possible to receive a head BEFORE that block is actually available from the connected node via RPC, due to race conditions in the code of the remote ETH node. In this case you will get false "zero" blocks that are missing transactions. +### FinalizedBlockOffset +```toml +FinalizedBlockOffset = 0 # Default +``` +FinalizedBlockOffset defines the number of blocks by which the latest finalized block will be shifted/delayed. +For example, suppose RPC returns block 100 as the latest finalized. In that case, the CL Node will treat block `100 - FinalizedBlockOffset` as the latest finalized block and `latest - FinalityDepth - FinalizedBlockOffset` in case of `FinalityTagEnabled = false.` +With `EnforceRepeatableRead = true,` RPC is considered healthy only if its most recent finalized block is larger or equal to the highest finalized block observed by the CL Node minus `FinalizedBlockOffset.` +Higher values of `FinalizedBlockOffset` with `EnforceRepeatableRead = true` reduce the number of false `FinalizedBlockOutOfSync` declarations on healthy RPCs that are slightly lagging behind due to network delays. +This may increase the number of healthy RPCs and reduce the probability that the CL Node will not have any healthy alternatives to the active RPC. +CAUTION: Setting this to values higher than 0 may delay transaction creation in products (e.g., CCIP, Automation) that base their decision on finalized on-chain events. +PoS chains with `FinalityTagEnabled=true` and batched (epochs) blocks finalization (e.g., Ethereum Mainnet) must be treated with special care as a minor increase in the `FinalizedBlockOffset` may lead to significant delays. +For example, let's say that `FinalizedBlockOffset = 1` and blocks are finalized in batches of 32. +The latest finalized block on chain is 64, so block 63 is the latest finalized for CL Node. +Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, +CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. + ## EVM.Transactions ```toml [EVM.Transactions] @@ -7743,6 +7936,8 @@ SyncThreshold = 5 # Default LeaseDuration = '0s' # Default NodeIsSyncingEnabled = false # Default FinalizedBlockPollInterval = '5s' # Default +EnforceRepeatableRead = false # Default +DeathDeclarationDelay = '10s' # Default ``` The node pool manages multiple RPC endpoints. @@ -7816,6 +8011,25 @@ reported based on latest block and finality depth. Set to 0 to disable. +### EnforceRepeatableRead +```toml +EnforceRepeatableRead = false # Default +``` +EnforceRepeatableRead defines if Core should only use RPCs whose most recently finalized block is greater or equal to +`highest finalized block - FinalizedBlockOffset`. In other words, exclude RPCs lagging on latest finalized +block. + +Set false to disable + +### DeathDeclarationDelay +```toml +DeathDeclarationDelay = '10s' # Default +``` +DeathDeclarationDelay defines the minimum duration an RPC must be in unhealthy state before producing an error log message. +Larger values might be helpful to reduce the noisiness of health checks like `EnforceRepeatableRead = true', which might be falsely +trigger declaration of `FinalizedBlockOutOfSync` due to insignificant network delays in broadcasting of the finalized state among RPCs. +RPC will not be picked to handle a request even if this option is set to a nonzero value. + ## EVM.NodePool.Errors :warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ ```toml diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 6e2a40beb7f..e8e1046cef5 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -330,6 +330,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -384,6 +385,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index 5db4e8527d3..c7b651e3aeb 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -330,6 +330,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -384,6 +385,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index bcf054cbca3..1eee9f595c3 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -330,6 +330,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -384,6 +385,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 6b5932cfefd..73d557c0ae7 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -320,6 +320,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -374,6 +375,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4 diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index 688829513e9..76a134b7761 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -327,6 +327,7 @@ NoNewHeadsThreshold = '3m0s' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 +FinalizedBlockOffset = 0 [EVM.Transactions] ForwardersEnabled = false @@ -381,6 +382,8 @@ SyncThreshold = 5 LeaseDuration = '0s' NodeIsSyncingEnabled = false FinalizedBlockPollInterval = '5s' +EnforceRepeatableRead = false +DeathDeclarationDelay = '10s' [EVM.OCR] ContractConfirmations = 4