From 1eaf5e087a5ac204e0b472e1c307722887104678 Mon Sep 17 00:00:00 2001 From: Dmytro Haidashenko <34754799+dhaidashenko@users.noreply.github.com> Date: Tue, 30 Jul 2024 12:24:52 +0200 Subject: [PATCH] No new finalized Heads Implementation (#13907) * No new finalized Heads Implementation * fixed tests * update defaults for NoNewFinalizedHeadsThreshold * Update common/client/node_lifecycle.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * Update common/client/node_lifecycle_test.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * Update common/client/node_lifecycle_test.go Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> * rename HeadIsNotIncreasing to NoNewHead * move and add docs for syncIssue consts * rename syncIssue to syncStatus --------- Co-authored-by: amit-momin <108959691+amit-momin@users.noreply.github.com> --- .changeset/wild-seals-look.md | 5 + common/client/mock_node_client_test.go | 194 +++++-- common/client/mock_rpc_test.go | 138 ++++- common/client/mocks/config.go | 13 +- common/client/models.go | 45 ++ common/client/models_test.go | 34 ++ common/client/multi_node.go | 9 + common/client/node.go | 1 + common/client/node_fsm.go | 7 +- common/client/node_lifecycle.go | 363 ++++++++---- common/client/node_lifecycle_test.go | 524 ++++++++++++++---- common/client/types.go | 6 +- core/chains/evm/client/chain_client_test.go | 2 +- core/chains/evm/client/config_builder.go | 30 +- core/chains/evm/client/config_builder_test.go | 5 +- core/chains/evm/client/evm_client.go | 4 +- core/chains/evm/client/evm_client_test.go | 4 +- core/chains/evm/client/helpers_test.go | 4 +- core/chains/evm/client/mocks/rpc_client.go | 138 ++++- core/chains/evm/client/rpc_client.go | 65 ++- core/chains/evm/client/rpc_client_test.go | 16 +- core/chains/evm/config/chain_scoped.go | 4 + core/chains/evm/config/config.go | 1 + core/chains/evm/config/toml/config.go | 43 +- core/chains/evm/config/toml/defaults.go | 4 + .../config/toml/defaults/Avalanche_Fuji.toml | 1 + .../toml/defaults/Avalanche_Mainnet.toml | 1 + .../evm/config/toml/defaults/BSC_Mainnet.toml | 1 + .../evm/config/toml/defaults/BSC_Testnet.toml | 1 + .../config/toml/defaults/Base_Mainnet.toml | 1 + .../config/toml/defaults/Base_Sepolia.toml | 1 + .../config/toml/defaults/Celo_Mainnet.toml | 1 + .../config/toml/defaults/Celo_Testnet.toml | 1 + .../toml/defaults/Ethereum_Mainnet.toml | 1 + .../config/toml/defaults/Gnosis_Chiado.toml | 1 + .../config/toml/defaults/Gnosis_Mainnet.toml | 1 + .../toml/defaults/Optimism_Mainnet.toml | 1 + .../toml/defaults/Optimism_Sepolia.toml | 1 + .../config/toml/defaults/Polygon_Amoy.toml | 1 + .../config/toml/defaults/Polygon_Mainnet.toml | 1 + .../config/toml/defaults/WeMix_Mainnet.toml | 1 + .../config/toml/defaults/WeMix_Testnet.toml | 1 + .../evm/config/toml/defaults/fallback.toml | 1 + core/config/docs/chains-evm.toml | 5 + core/services/chainlink/config_test.go | 28 +- .../chainlink/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 3 + core/web/resolver/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 3 + docs/CONFIG.md | 68 +++ .../disk-based-logging-disabled.txtar | 1 + .../validate/disk-based-logging-no-dir.txtar | 1 + .../node/validate/disk-based-logging.txtar | 1 + testdata/scripts/node/validate/invalid.txtar | 1 + testdata/scripts/node/validate/valid.txtar | 1 + 55 files changed, 1452 insertions(+), 339 deletions(-) create mode 100644 .changeset/wild-seals-look.md diff --git a/.changeset/wild-seals-look.md b/.changeset/wild-seals-look.md new file mode 100644 index 00000000000..3cd854f0e61 --- /dev/null +++ b/.changeset/wild-seals-look.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Added new health check that ensures RPC provides new finalized heads at least every `NoNewFinalizedHeadsThreshold` #added diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go index 5b7abe82121..5643dcde90e 100644 --- a/common/client/mock_node_client_test.go +++ b/common/client/mock_node_client_test.go @@ -400,62 +400,6 @@ func (_c *mockNodeClient_IsSyncing_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(c return _c } -// LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) LatestFinalizedBlock(ctx context.Context) (HEAD, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for LatestFinalizedBlock") - } - - var r0 HEAD - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (HEAD, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) HEAD); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(HEAD) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// mockNodeClient_LatestFinalizedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestFinalizedBlock' -type mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID types.ID, HEAD Head] struct { - *mock.Call -} - -// LatestFinalizedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) LatestFinalizedBlock(ctx interface{}) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - return &mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("LatestFinalizedBlock", ctx)} -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) Return(_a0 HEAD, _a1 error) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (HEAD, error)) *mockNodeClient_LatestFinalizedBlock_Call[CHAIN_ID, HEAD] { - _c.Call.Return(run) - return _c -} - // SetAliveLoopSub provides a mock function with given fields: _a0 func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription) { _m.Called(_a0) @@ -538,8 +482,8 @@ func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Run(run func(ctx return _c } -func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Return(_a0 types.Subscription, _a1 error) *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD] { - _c.Call.Return(_a0, _a1) +func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) Return(s types.Subscription, err error) *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD] { + _c.Call.Return(s, err) return _c } @@ -548,6 +492,140 @@ func (_c *mockNodeClient_SubscribeNewHead_Call[CHAIN_ID, HEAD]) RunAndReturn(run return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockNodeClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(_a0 interface{}) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + return &mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Run(run func(_a0 context.Context)) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockNodeClient_SubscribeToFinalizedHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockNodeClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type mockNodeClient_SubscribeToHeads_Call[CHAIN_ID types.ID, HEAD Head] struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockNodeClient_Expecter[CHAIN_ID, HEAD]) SubscribeToHeads(ctx interface{}) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + return &mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Run(run func(ctx context.Context)) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) Return(ch <-chan HEAD, sub types.Subscription, err error) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockNodeClient_SubscribeToHeads_Call[CHAIN_ID, HEAD] { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribersCount() int32 { ret := _m.Called() diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go index 36beae901c6..00473c66369 100644 --- a/common/client/mock_rpc_test.go +++ b/common/client/mock_rpc_test.go @@ -1508,8 +1508,8 @@ func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_ return _c } -func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(_a0 types.Subscription, _a1 error) *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { - _c.Call.Return(_a0, _a1) +func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(s types.Subscription, err error) *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(s, err) return _c } @@ -1518,6 +1518,140 @@ func (_c *mockRPC_SubscribeNewHead_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_ return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPC_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], BATCH_ELEM interface{}] struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *mockRPC_Expecter[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToFinalizedHeads(_a0 interface{}) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + return &mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Run(run func(_a0 context.Context)) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(_a0 <-chan HEAD, _a1 types.Subscription, _a2 error) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPC_SubscribeToFinalizedHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockRPC_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type mockRPC_SubscribeToHeads_Call[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], BATCH_ELEM interface{}] struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *mockRPC_Expecter[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeToHeads(ctx interface{}) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + return &mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Run(run func(ctx context.Context)) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) Return(ch <-chan HEAD, sub types.Subscription, err error) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) RunAndReturn(run func(context.Context) (<-chan HEAD, types.Subscription, error)) *mockRPC_SubscribeToHeads_Call[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM] { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribersCount() int32 { ret := _m.Called() diff --git a/common/client/mocks/config.go b/common/client/mocks/config.go index d1007f39f0f..95b57cce0c3 100644 --- a/common/client/mocks/config.go +++ b/common/client/mocks/config.go @@ -3,10 +3,11 @@ package mocks import "time" type ChainConfig struct { - IsFinalityTagEnabled bool - FinalityDepthVal uint32 - NoNewHeadsThresholdVal time.Duration - FinalizedBlockOffsetVal uint32 + IsFinalityTagEnabled bool + FinalityDepthVal uint32 + NoNewHeadsThresholdVal time.Duration + FinalizedBlockOffsetVal uint32 + NoNewFinalizedHeadsThresholdVal time.Duration } func (t ChainConfig) NodeNoNewHeadsThreshold() time.Duration { @@ -24,3 +25,7 @@ func (t ChainConfig) FinalityTagEnabled() bool { func (t ChainConfig) FinalizedBlockOffset() uint32 { return t.FinalizedBlockOffsetVal } + +func (t ChainConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return t.NoNewFinalizedHeadsThresholdVal +} diff --git a/common/client/models.go b/common/client/models.go index 8b616137669..526bb25c887 100644 --- a/common/client/models.go +++ b/common/client/models.go @@ -1,6 +1,7 @@ package client import ( + "bytes" "fmt" ) @@ -74,3 +75,47 @@ func (n NodeTier) String() string { return fmt.Sprintf("NodeTier(%d)", n) } } + +// syncStatus - defines problems related to RPC's state synchronization. Can be used as a bitmask to define multiple issues +type syncStatus int + +const ( + // syncStatusSynced - RPC is fully synced + syncStatusSynced = 0 + // syncStatusNotInSyncWithPool - RPC is lagging behind the highest block observed within the pool of RPCs + syncStatusNotInSyncWithPool syncStatus = 1 << iota + // syncStatusNoNewHead - RPC failed to produce a new head for too long + syncStatusNoNewHead + // syncStatusNoNewFinalizedHead - RPC failed to produce a new finalized head for too long + syncStatusNoNewFinalizedHead + syncStatusLen +) + +func (s syncStatus) String() string { + if s == syncStatusSynced { + return "Synced" + } + var result bytes.Buffer + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i = i << 1 { + if i&s == 0 { + continue + } + result.WriteString(i.string()) + result.WriteString(",") + } + result.Truncate(result.Len() - 1) + return result.String() +} + +func (s syncStatus) string() string { + switch s { + case syncStatusNotInSyncWithPool: + return "NotInSyncWithRPCPool" + case syncStatusNoNewHead: + return "NoNewHead" + case syncStatusNoNewFinalizedHead: + return "NoNewFinalizedHead" + default: + return fmt.Sprintf("syncStatus(%d)", s) + } +} diff --git a/common/client/models_test.go b/common/client/models_test.go index 2d5dc31b373..a10592c3b68 100644 --- a/common/client/models_test.go +++ b/common/client/models_test.go @@ -3,6 +3,8 @@ package client import ( "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestSendTxReturnCode_String(t *testing.T) { @@ -14,3 +16,35 @@ func TestSendTxReturnCode_String(t *testing.T) { } } } + +func TestSyncStatus_String(t *testing.T) { + t.Run("All of the statuses have proper string representation", func(t *testing.T) { + for i := syncStatusNotInSyncWithPool; i < syncStatusLen; i <<= 1 { + // ensure that i's string representation is not equal to `syncStatus(%d)` + assert.NotContains(t, i.String(), "syncStatus(") + } + }) + t.Run("Unwraps mask", func(t *testing.T) { + testCases := []struct { + Mask syncStatus + ExpectedStr string + }{ + { + ExpectedStr: "Synced", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead", + }, + { + Mask: syncStatusNotInSyncWithPool | syncStatusNoNewHead | syncStatusNoNewFinalizedHead, + ExpectedStr: "NotInSyncWithRPCPool,NoNewHead,NoNewFinalizedHead", + }, + } + for _, testCase := range testCases { + t.Run(testCase.ExpectedStr, func(t *testing.T) { + assert.Equal(t, testCase.ExpectedStr, testCase.Mask.String()) + }) + } + }) +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 4d4ea925fe8..c9250a1d620 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -15,6 +15,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/assets" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" + feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types" "github.com/smartcontractkit/chainlink/v2/common/types" ) @@ -821,6 +822,14 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return n.RPC().SubscribeNewHead(ctx, channel) } +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SubscribeToHeads(ctx context.Context) (ch <-chan HEAD, sub types.Subscription, err error) { + n, err := c.selectNode() + if err != nil { + return nil, nil, err + } + return n.RPC().SubscribeToHeads(ctx) +} + func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) { n, err := c.selectNode() if err != nil { diff --git a/common/client/node.go b/common/client/node.go index 5ea31d65961..d6543c772a8 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -49,6 +49,7 @@ type NodeConfig interface { type ChainConfig interface { NodeNoNewHeadsThreshold() time.Duration + NoNewFinalizedHeadsThreshold() time.Duration FinalityDepth() uint32 FinalityTagEnabled() bool FinalizedBlockOffset() uint32 diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index 5a5e2554431..e58de071fbc 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "math/big" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -240,11 +239,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { // declareOutOfSync puts a node into OutOfSync state, disconnecting all current // clients and making it unavailable for use until back in-sync. -func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { +func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(syncIssues syncStatus) { n.transitionToOutOfSync(func() { - n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state, "syncIssues", syncIssues) n.wg.Add(1) - go n.outOfSyncLoop(isOutOfSync) + go n.outOfSyncLoop(syncIssues) }) } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 39e17bb4972..40d9a9ef6ef 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -7,6 +7,8 @@ import ( "math/big" "time" + "github.com/smartcontractkit/chainlink/v2/common/types" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -86,33 +88,37 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() pollInterval := n.nodePoolCfg.PollInterval() lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) lggr.Tracew("Alive loop starting", "nodeState", n.getCachedState()) - headsC := make(chan HEAD) - sub, err := n.rpc.SubscribeNewHead(ctx, headsC) + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + n.chainCfg.NodeNoNewHeadsThreshold(), n.rpc.SubscribeToHeads) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState()) + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.getCachedState(), "err", err) n.declareUnreachable() return } - // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll - // falsely transition this node to unreachable state - n.rpc.SetAliveLoopSub(sub) - defer sub.Unsubscribe() - - var outOfSyncT *time.Ticker - var outOfSyncTC <-chan time.Time - if noNewHeadsTimeoutThreshold > 0 { - lggr.Debugw("Head liveness checking enabled", "nodeState", n.getCachedState()) - outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) - defer outOfSyncT.Stop() - outOfSyncTC = outOfSyncT.C - } else { - lggr.Debug("Head liveness checking disabled") + + // TODO: will be removed as part of merging effort with BCI-2875 + n.rpc.SetAliveLoopSub(headsSub.sub) + + defer headsSub.Unsubscribe() + + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + n.chainCfg.NoNewFinalizedHeadsThreshold(), n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Failed to subscribe to finalized heads", "err", err) + n.declareUnreachable() + return + } + + defer finalizedHeadsSub.Unsubscribe() } var pollCh <-chan time.Time @@ -131,14 +137,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Debug("Polling disabled") } - var pollFinalizedHeadCh <-chan time.Time - if n.chainCfg.FinalityTagEnabled() && n.nodePoolCfg.FinalizedBlockPollInterval() > 0 { - lggr.Debugw("Finalized block polling enabled") - pollT := time.NewTicker(n.nodePoolCfg.FinalizedBlockPollInterval()) - defer pollT.Stop() - pollFinalizedHeadCh = pollT.C - } - localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 @@ -149,7 +147,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Polling for version", "nodeState", n.getCachedState(), "pollFailures", pollFailures) - version, err := func(ctx context.Context) (string, error) { + var version string + version, err = func(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, pollInterval) defer cancel() return n.RPC().ClientVersion(ctx) @@ -177,47 +176,33 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - _, ci := n.StateAndLatest() - if outOfSync, liveNodes := n.syncStatus(ci.BlockNumber, ci.TotalDifficulty); outOfSync { + _, latestChainInfo := n.StateAndLatest() + if outOfSync, liveNodes := n.isOutOfSyncWithPool(latestChainInfo); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", ci.BlockNumber, "totalDifficulty", ci.TotalDifficulty, "nodeState", n.getCachedState()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", latestChainInfo.BlockNumber, "totalDifficulty", latestChainInfo.TotalDifficulty, "nodeState", n.getCachedState()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue } - n.declareOutOfSync(n.isOutOfSync) + n.declareOutOfSync(syncStatusNotInSyncWithPool) return } - case bh, open := <-headsC: + case bh, open := <-headsSub.Heads: if !open { lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } - promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Got head", "head", bh) - if bh.BlockNumber() > localHighestChainInfo.BlockNumber { - promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) - localHighestChainInfo.BlockNumber = bh.BlockNumber() - } else { - lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.getCachedState()) - } - if outOfSyncT != nil { - outOfSyncT.Reset(noNewHeadsTimeoutThreshold) - } - if !n.chainCfg.FinalityTagEnabled() { - latestFinalizedBN := max(bh.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) - if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { - promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN - } + + receivedNewHead := n.onNewHead(lggr, &localHighestChainInfo, bh) + if receivedNewHead && noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) } - case err := <-sub.Err(): + case err = <-headsSub.Errors: lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.getCachedState()) n.declareUnreachable() return - case <-outOfSyncTC: + case <-headsSub.NoNewHeads: // We haven't received a head on the channel for at least the // threshold amount of time, mark it broken lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, localHighestChainInfo.BlockNumber), "nodeState", n.getCachedState(), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) @@ -226,47 +211,151 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) // We don't necessarily want to wait the full timeout to check again, we should // check regularly and log noisily in this state - outOfSyncT.Reset(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)) + headsSub.ResetTimer(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)) continue } } - n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < localHighestChainInfo.BlockNumber }) + n.declareOutOfSync(syncStatusNoNewHead) return - case <-pollFinalizedHeadCh: - latestFinalized, err := func(ctx context.Context) (HEAD, error) { - ctx, cancel := context.WithTimeout(ctx, n.nodePoolCfg.FinalizedBlockPollInterval()) - defer cancel() - return n.RPC().LatestFinalizedBlock(ctx) - }(ctx) - if err != nil { - lggr.Warnw("Failed to fetch latest finalized block", "err", err) - continue + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + n.declareUnreachable() + return } - if !latestFinalized.IsValid() { - lggr.Warn("Latest finalized block is not valid") - continue + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if receivedNewHead && noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) } - - latestFinalizedBN := latestFinalized.BlockNumber() - if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { - promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) - localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN + case <-finalizedHeadsSub.NoNewHeads: + // We haven't received a finalized head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was %v)", noNewFinalizedBlocksTimeoutThreshold, localHighestChainInfo.FinalizedBlockNumber), "latestReceivedBlockNumber", localHighestChainInfo.BlockNumber) + if n.poolInfoProvider != nil { + if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 2 { + lggr.Criticalf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + finalizedHeadsSub.ResetTimer(zombieNodeCheckInterval(noNewFinalizedBlocksTimeoutThreshold)) + continue + } } + n.declareOutOfSync(syncStatusNoNewFinalizedHead) + return + case <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized heads subscription was terminated", "err", err) + n.declareUnreachable() + return } } } -func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSync(num int64, td *big.Int) (outOfSync bool) { - outOfSync, _ = n.syncStatus(num, td) - return +type headSubscription[HEAD any] struct { + Heads <-chan HEAD + Errors <-chan error + NoNewHeads <-chan time.Time + + noNewHeadsTicker *time.Ticker + sub types.Subscription + cleanUpTasks []func() +} + +func (sub *headSubscription[HEAD]) ResetTimer(duration time.Duration) { + sub.noNewHeadsTicker.Reset(duration) +} + +func (sub *headSubscription[HEAD]) Unsubscribe() { + for _, doCleanUp := range sub.cleanUpTasks { + doCleanUp() + } +} + +func (n *node[CHAIN_ID, HEAD, PRC]) registerNewSubscription(ctx context.Context, lggr logger.SugaredLogger, + noNewDataThreshold time.Duration, newSub func(ctx context.Context) (<-chan HEAD, types.Subscription, error)) (headSubscription[HEAD], error) { + result := headSubscription[HEAD]{} + var err error + var sub types.Subscription + result.Heads, sub, err = newSub(ctx) + if err != nil { + return result, err + } + + result.Errors = sub.Err() + lggr.Debug("Successfully subscribed") + + // TODO: will be removed as part of merging effort with BCI-2875 + result.sub = sub + //n.stateMu.Lock() + //n.healthCheckSubs = append(n.healthCheckSubs, sub) + //n.stateMu.Unlock() + + result.cleanUpTasks = append(result.cleanUpTasks, sub.Unsubscribe) + + if noNewDataThreshold > 0 { + lggr.Debugw("Subscription liveness checking enabled") + result.noNewHeadsTicker = time.NewTicker(noNewDataThreshold) + result.NoNewHeads = result.noNewHeadsTicker.C + result.cleanUpTasks = append(result.cleanUpTasks, result.noNewHeadsTicker.Stop) + } else { + lggr.Debug("Subscription liveness checking disabled") + } + + return result, nil } -// syncStatus returns outOfSync true if num or td is more than SyncThresold behind the best node. +func (n *node[CHAIN_ID, HEAD, RPC]) onNewFinalizedHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, latestFinalized HEAD) bool { + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + return false + } + + latestFinalizedBN := latestFinalized.BlockNumber() + lggr.Tracew("Got latest finalized head", "latestFinalized", latestFinalized) + if latestFinalizedBN <= chainInfo.FinalizedBlockNumber { + lggr.Tracew("Ignoring previously seen finalized block number") + return false + } + + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + return true +} + +func (n *node[CHAIN_ID, HEAD, RPC]) onNewHead(lggr logger.SugaredLogger, chainInfo *ChainInfo, head HEAD) bool { + if !head.IsValid() { + lggr.Warn("Latest head is not valid") + return false + } + + promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Got head", "head", head) + lggr = lggr.With("latestReceivedBlockNumber", chainInfo.BlockNumber, "blockNumber", head.BlockNumber(), "nodeState", n.getCachedState()) + if head.BlockNumber() <= chainInfo.BlockNumber { + lggr.Tracew("Ignoring previously seen block number") + return false + } + + promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(head.BlockNumber())) + chainInfo.BlockNumber = head.BlockNumber() + + if !n.chainCfg.FinalityTagEnabled() { + latestFinalizedBN := max(head.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) + if latestFinalizedBN > chainInfo.FinalizedBlockNumber { + promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + chainInfo.FinalizedBlockNumber = latestFinalizedBN + } + } + + return true +} + +// isOutOfSyncWithPool returns outOfSync true if num or td is more than SyncThresold behind the best node. // Always returns outOfSync false for SyncThreshold 0. // liveNodes is only included when outOfSync is true. -func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSync bool, liveNodes int) { +func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSyncWithPool(localState ChainInfo) (outOfSync bool, liveNodes int) { if n.poolInfoProvider == nil { + n.lfcLog.Warn("skipping sync state against the pool - should only occur in tests") return // skip for tests } threshold := n.nodePoolCfg.SyncThreshold() @@ -278,22 +367,23 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSyn mode := n.nodePoolCfg.SelectionMode() switch mode { case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: - return num < ci.BlockNumber-int64(threshold), ln + return localState.BlockNumber < ci.BlockNumber-int64(threshold), ln case NodeSelectionModeTotalDifficulty: bigThreshold := big.NewInt(int64(threshold)) - return td.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln + return localState.TotalDifficulty.Cmp(bigmath.Sub(ci.TotalDifficulty, bigThreshold)) < 0, ln default: panic("unrecognized NodeSelectionMode: " + mode) } } const ( - msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" - msgInSync = "RPC node back in sync" + msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" + msgReceivedFinalizedBlock = "Received new finalized block for RPC node, waiting until back in-sync to mark as live again" + msgInSync = "RPC node back in sync" ) // outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status -func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { +func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(syncIssues syncStatus) { defer n.wg.Done() ctx, cancel := n.newCtx() defer cancel() @@ -312,8 +402,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() - lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) - lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.getCachedState()) + // set logger name to OutOfSync or FinalizedBlockOutOfSync + lggr := logger.Sugared(logger.Named(n.lfcLog, n.getCachedState().String())).With("nodeState", n.getCachedState()) + lggr.Debugw("Trying to revive out-of-sync RPC node") // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) @@ -322,46 +413,118 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.getCachedState()) - - ch := make(chan HEAD) - sub, err := n.rpc.SubscribeNewHead(ctx, ch) + noNewHeadsTimeoutThreshold := n.chainCfg.NodeNoNewHeadsThreshold() + headsSub, err := n.registerNewSubscription(ctx, lggr.With("subscriptionType", "heads"), + noNewHeadsTimeoutThreshold, n.rpc.SubscribeToHeads) if err != nil { - lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.getCachedState(), "err", err) + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "err", err) n.declareUnreachable() return } - defer sub.Unsubscribe() + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node") + defer headsSub.Unsubscribe() + + noNewFinalizedBlocksTimeoutThreshold := n.chainCfg.NoNewFinalizedHeadsThreshold() + var finalizedHeadsSub headSubscription[HEAD] + if n.chainCfg.FinalityTagEnabled() { + finalizedHeadsSub, err = n.registerNewSubscription(ctx, lggr.With("subscriptionType", "finalizedHeads"), + noNewFinalizedBlocksTimeoutThreshold, n.rpc.SubscribeToFinalizedHeads) + if err != nil { + lggr.Errorw("Subscribe to finalized heads failed on out-of-sync RPC node", "err", err) + n.declareUnreachable() + return + } + + lggr.Tracew("Successfully subscribed to finalized heads feed on out-of-sync RPC node") + defer finalizedHeadsSub.Unsubscribe() + } + + _, localHighestChainInfo := n.rpc.GetInterceptedChainInfo() for { + if syncIssues == syncStatusSynced { + // back in-sync! flip back into alive loop + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt))) + n.declareInSync() + return + } + select { case <-ctx.Done(): return - case head, open := <-ch: + case head, open := <-headsSub.Heads: if !open { - lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.getCachedState()) n.declareUnreachable() return } - if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { - // back in-sync! flip back into alive loop - lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) - n.declareInSync() - return + + if !n.onNewHead(lggr, &localHighestChainInfo, head) { + continue + } + + // received a new head - clear NoNewHead flag + syncIssues &= ^syncStatusNoNewHead + if outOfSync, _ := n.isOutOfSyncWithPool(localHighestChainInfo); !outOfSync { + // we caught up with the pool - clear NotInSyncWithPool flag + syncIssues &= ^syncStatusNotInSyncWithPool + } else { + // we've received new head, but lagging behind the pool, add NotInSyncWithPool flag to prevent false transition to alive + syncIssues |= syncStatusNotInSyncWithPool + } + + if noNewHeadsTimeoutThreshold > 0 { + headsSub.ResetTimer(noNewHeadsTimeoutThreshold) } - lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.getCachedState()) - case <-time.After(zombieNodeCheckInterval(n.chainCfg.NodeNoNewHeadsThreshold())): + + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "syncIssues", syncIssues) + case <-time.After(zombieNodeCheckInterval(noNewHeadsTimeoutThreshold)): if n.poolInfoProvider != nil { if l, _ := n.poolInfoProvider.LatestChainInfo(); l < 1 { - lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + lggr.Criticalw("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state", "syncIssues", syncIssues) n.declareInSync() return } } - case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "nodeState", n.getCachedState(), "err", err) + case err := <-headsSub.Errors: + lggr.Errorw("Subscription was terminated", "err", err) + n.declareUnreachable() + return + case <-headsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewHead until it's removed on new head. + syncIssues |= syncStatusNoNewHead + lggr.Debugw(fmt.Sprintf("No new heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewHeadsTimeoutThreshold, syncIssues)) + case latestFinalized, open := <-finalizedHeadsSub.Heads: + if !open { + lggr.Errorw("Finalized heads subscription channel unexpectedly closed") + n.declareUnreachable() + return + } + if !latestFinalized.IsValid() { + lggr.Warn("Latest finalized block is not valid") + continue + } + + receivedNewHead := n.onNewFinalizedHead(lggr, &localHighestChainInfo, latestFinalized) + if !receivedNewHead { + continue + } + + // on new finalized head remove NoNewFinalizedHead flag from the mask + syncIssues &= ^syncStatusNoNewFinalizedHead + if noNewFinalizedBlocksTimeoutThreshold > 0 { + finalizedHeadsSub.ResetTimer(noNewFinalizedBlocksTimeoutThreshold) + } + + lggr.Debugw(msgReceivedFinalizedBlock, "blockNumber", latestFinalized.BlockNumber(), "syncIssues", syncIssues) + case err := <-finalizedHeadsSub.Errors: + lggr.Errorw("Finalized head subscription was terminated", "err", err) n.declareUnreachable() return + case <-finalizedHeadsSub.NoNewHeads: + // we are not resetting the timer, as there is no need to add syncStatusNoNewFinalizedHead until it's removed on new finalized head. + syncIssues |= syncStatusNoNewFinalizedHead + lggr.Debugw(fmt.Sprintf("No new finalized heads received for %s. Node stays out-of-sync due to sync issues: %s", noNewFinalizedBlocksTimeoutThreshold, syncIssues)) } } } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 863a15a1fad..833bccf7f29 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math/big" + "sync" "sync/atomic" "testing" @@ -49,7 +50,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("DisconnectAll").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -74,7 +75,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() // disconnects all on transfer to unreachable rpc.On("DisconnectAll").Once() @@ -89,7 +90,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - opts.rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) opts.rpc.On("SetAliveLoopSub", sub).Once() return newDialedNode(t, opts) } @@ -105,7 +106,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() node.declareAlive() - tests.AssertLogEventually(t, observedLogs, "Head liveness checking disabled") + tests.AssertLogEventually(t, observedLogs, "Subscription liveness checking disabled") tests.AssertLogEventually(t, observedLogs, "Polling disabled") assert.Equal(t, nodeStateAlive, node.State()) }) @@ -340,18 +341,21 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) assert.Equal(t, nodeStateAlive, node.State()) }) - t.Run("rpc closed head channel", func(t *testing.T) { - t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + newSub := func(t *testing.T) *mocks.Subscription { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + return sub + } + t.Run("rpc closed head channel", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), newSub(t), nil).Once() + rpc.On("SetAliveLoopSub", mock.Anything).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newDialedNode(t, testNodeOpts{ lggr: lggr, @@ -380,10 +384,10 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { const finalityDepth = 10 const expectedBlock = 990 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: blockNumber - 1}, head{BlockNumber: blockNumber}, head{BlockNumber: blockNumber - 1}) - }).Return(sub, nil).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() name := "node-" + rand.Str(5) node := newDialedNode(t, testNodeOpts{ @@ -403,18 +407,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return float64(expectedBlock) == m.Gauge.GetValue() }) }) - t.Run("Logs warning if failed to get finalized block", func(t *testing.T) { + t.Run("If fails to subscribe to latest finalized blocks, transitions to unreachable ", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + expectedError := errors.New("failed to subscribe to finalized heads") + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, mocks.NewSubscription(t), expectedError).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) - node := newDialedNode(t, testNodeOpts{ + node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{ finalizedBlockPollInterval: tests.TestInterval, }, @@ -425,26 +424,31 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { lggr: lggr, }) defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() - tests.AssertLogEventually(t, observedLogs, "Failed to fetch latest finalized block") + tests.AssertLogEventually(t, observedLogs, "Failed to subscribe to finalized heads") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) + ch := make(chan Head, 1) head := newMockHead(t) head.On("IsValid").Return(false) - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head, nil) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + ch <- head + }).Return((<-chan Head)(ch), newSub(t), nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() + rpc.On("SetAliveLoopSub", mock.Anything).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ - config: testNodeConfig{ - finalizedBlockPollInterval: tests.TestInterval, - }, + config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ IsFinalityTagEnabled: true, }, @@ -455,29 +459,17 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Latest finalized block is not valid") }) - t.Run("If finality tag and finalized block polling are enabled updates latest finalized block metric", func(t *testing.T) { + t.Run("On new finalized block updates corresponding metric", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock - 1}.ToMockHead(t), nil).Once() - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock}.ToMockHead(t), nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting - // the metric - go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) - }).Return(sub, nil).Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("SetAliveLoopSub", sub).Once() name := "node-" + rand.Str(5) - node := newDialedNode(t, testNodeOpts{ - config: testNodeConfig{ - finalizedBlockPollInterval: tests.TestInterval, - }, + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ FinalityDepthVal: finalityDepth, IsFinalityTagEnabled: true, @@ -488,6 +480,12 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() node.declareAlive() + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}, head{BlockNumber: expectedBlock - 1}) + }() tests.AssertEventually(t, func() bool { metric, err := promPoolRPCNodeHighestFinalizedBlock.GetMetricWithLabelValues(big.NewInt(1).String(), name) require.NoError(t, err) @@ -496,6 +494,123 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return float64(expectedBlock) == m.Gauge.GetValue() }) }) + t.Run("If finalized heads channel is closed, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head) + close(ch) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) + t.Run("when no new finalized heads received for threshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + ch := make(chan Head, 1) + ch <- head{BlockNumber: 10}.ToMockHead(t) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Maybe() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; no new finalized heads received for %s (last finalized head received was 10)", noNewFinalizedHeadsThreshold)) + tests.AssertEventually(t, func() bool { + // right after outOfSync we'll transfer to unreachable due to returned error on Dial + // we check that we were in out of sync state on first Dial call + return node.State() == nodeStateUnreachable + }) + }) + t.Run("when no new finalized heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), newSub(t), nil).Once() + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + noNewFinalizedHeadsThreshold := tests.TestInterval + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + chainConfig: clientMocks.ChainConfig{ + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeadsThreshold, + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: 20, + TotalDifficulty: big.NewInt(10), + }).Once() + node.SetPoolChainInfoProvider(poolInfo) + node.declareAlive() + tests.AssertLogEventually(t, observed, fmt.Sprintf("RPC's finalized state is out of sync; %s %s", msgCannotDisable, msgDegradedState)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("If finalized subscription returns an error, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("DisconnectAll").Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + sub := mocks.NewSubscription(t) + errCh := make(chan error, 1) + errCh <- errors.New("subscription failed") + sub.On("Err").Return((<-chan error)(errCh)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), sub, nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription was terminated") + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) } type head struct { @@ -525,9 +640,10 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() aliveSubscription := mocks.NewSubscription(t) - aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() + aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(aliveSubscription, nil).Maybe() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Maybe() } @@ -544,22 +660,18 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { return node } - stubIsOutOfSync := func(num int64, td *big.Int) bool { - return false - } - t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) node.setState(nodeStateClosed) node.wg.Add(1) - node.outOfSyncLoop(stubIsOutOfSync) + node.outOfSyncLoop(syncStatusNotInSyncWithPool) }) t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) nodeChainID := types.RandomID() - lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + lggr := logger.Test(t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -569,21 +681,27 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 13}).Once() outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - go writeHeads(t, ch, heads...) - }).Return(outOfSyncSubscription, nil).Once() + ch := make(chan Head) + var wg sync.WaitGroup + wg.Add(1) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go func() { + defer wg.Done() + writeHeads(t, ch, heads...) + }() + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(func(num int64, td *big.Int) bool { - return true - }) - tests.AssertLogCountEventually(t, observedLogs, msgReceivedBlock, len(heads)) + node.declareOutOfSync(syncStatusNoNewHead) + // wait until all heads are consumed + wg.Wait() assert.Equal(t, nodeStateOutOfSync, node.State()) }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { @@ -597,7 +715,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to dial rpc") // might be called again in unreachable loop, so no need to set once rpc.On("Dial", mock.Anything).Return(expectedError) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -617,7 +735,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to get chain ID") // might be called multiple times rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -637,7 +755,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Twice() // might be called multiple times rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateInvalidChainID }) @@ -657,7 +775,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(true, nil) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateSyncing }) @@ -680,7 +798,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing")) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -698,9 +816,9 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() expectedError := errors.New("failed to subscribe") - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, expectedError) + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) @@ -719,15 +837,15 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable @@ -747,22 +865,22 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) }) - t.Run("becomes alive if it receives a newer head", func(t *testing.T) { t.Parallel() rpc := newMockNodeClient[types.ID, Head](t) @@ -782,17 +900,14 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() const highestBlock = 1000 - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) - go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}) - }).Return(outOfSyncSubscription, nil).Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: highestBlock}, ChainInfo{BlockNumber: highestBlock}) - setupRPCForAliveLoop(t, rpc) - node.declareOutOfSync(func(num int64, td *big.Int) bool { - return num < highestBlock - }) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) tests.AssertLogEventually(t, observedLogs, msgInSync) tests.AssertEventually(t, func() bool { @@ -819,7 +934,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { TotalDifficulty: big.NewInt(200), }) node.SetPoolChainInfoProvider(poolInfo) - rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: 0}, ChainInfo{BlockNumber: 0}) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -827,16 +942,225 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() - rpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(outOfSyncSubscription, nil).Once() - + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), outOfSyncSubscription, nil).Once() setupRPCForAliveLoop(t, rpc) - node.declareOutOfSync(stubIsOutOfSync) + node.declareOutOfSync(syncStatusNoNewHead) tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") tests.AssertEventually(t, func() bool { return node.State() == nodeStateAlive }) }) + t.Run("Stays out-of-sync if received new head, but lags behind pool", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + chainConfig: clientMocks.ChainConfig{ + NoNewHeadsThresholdVal: tests.TestInterval, + }, + config: testNodeConfig{ + syncThreshold: 1, + selectionMode: NodeSelectionModeHighestHead, + }, + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + poolInfo := newMockPoolChainInfoProvider(t) + const highestBlock = 20 + poolInfo.On("LatestChainInfo").Return(1, ChainInfo{ + BlockNumber: highestBlock * 2, + TotalDifficulty: big.NewInt(200), + }) + node.SetPoolChainInfoProvider(poolInfo) + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{BlockNumber: highestBlock}) + + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}, head{BlockNumber: highestBlock + 1}) + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) + tests.AssertLogEventually(t, observedLogs, "No new heads received for") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateOutOfSync + }) + }) + + // creates RPC mock with all calls necessary to create heads subscription that won't produce any events + newRPCWithNoOpHeads := func(t *testing.T, chainID types.ID) *mockNodeClient[types.ID, Head] { + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("Dial", mock.Anything).Return(nil).Once() + rpc.On("ChainID", mock.Anything).Return(chainID, nil).Once() + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + return rpc + } + + t.Run("if fails to subscribe to finalized, becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(nil), nil, errors.New("failed to subscribe")).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on subscription termination becomes unreachable", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized head subscription was terminated") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + close(ch) + }).Return((<-chan Head)(ch), sub, nil).Once() + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() + // unreachable + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(syncStatusNoNewHead) + tests.AssertLogEventually(t, observedLogs, "Finalized heads subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes alive on new finalized block", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr := logger.Test(t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: tests.TestInterval, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(syncStatusNoNewFinalizedHead) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + writeHeads(t, ch, head{BlockNumber: highestBlock + 1}) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("adds finalized block is not increasing flag, if there is no new finalized heads for too long", func(t *testing.T) { + t.Parallel() + nodeChainID := types.RandomID() + rpc := newRPCWithNoOpHeads(t, nodeChainID) + lggr, observed := logger.TestObserved(t, zap.DebugLevel) + const noNewFinalizedHeads = tests.TestInterval + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + chainConfig: clientMocks.ChainConfig{ + IsFinalityTagEnabled: true, + NoNewFinalizedHeadsThresholdVal: noNewFinalizedHeads, + }, + }) + defer func() { assert.NoError(t, node.close()) }() + + const highestBlock = 13 + rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{FinalizedBlockNumber: highestBlock}).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + ch := make(chan Head) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + + node.declareOutOfSync(syncStatusNotInSyncWithPool) + heads := []head{{BlockNumber: highestBlock - 1}, {BlockNumber: highestBlock}} + writeHeads(t, ch, heads...) + assert.Equal(t, nodeStateOutOfSync, node.State()) + tests.AssertLogEventually(t, observed, fmt.Sprintf("No new finalized heads received for %s. Node stays "+ + "out-of-sync due to sync issues: NotInSyncWithRPCPool,NoNewFinalizedHead", noNewFinalizedHeads)) + }) } func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { @@ -1296,11 +1620,11 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) } -func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { +func TestUnit_NodeLifecycle_outOfSyncWithPool(t *testing.T) { t.Parallel() t.Run("skip if nLiveNodes is not configured", func(t *testing.T) { node := newTestNode(t, testNodeOpts{}) - outOfSync, liveNodes := node.syncStatus(0, nil) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{}) assert.Equal(t, false, outOfSync) assert.Equal(t, 0, liveNodes) }) @@ -1308,7 +1632,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { node := newTestNode(t, testNodeOpts{}) poolInfo := newMockPoolChainInfoProvider(t) node.SetPoolChainInfoProvider(poolInfo) - outOfSync, liveNodes := node.syncStatus(0, nil) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{}) assert.Equal(t, false, outOfSync) assert.Equal(t, 0, liveNodes) }) @@ -1320,7 +1644,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { poolInfo.On("LatestChainInfo").Return(1, ChainInfo{}).Once() node.SetPoolChainInfoProvider(poolInfo) assert.Panics(t, func() { - _, _ = node.syncStatus(0, nil) + _, _ = node.isOutOfSyncWithPool(ChainInfo{}) }) }) t.Run("block height selection mode", func(t *testing.T) { @@ -1371,7 +1695,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { - outOfSync, liveNodes := node.syncStatus(testCase.blockNumber, big.NewInt(td)) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{BlockNumber: testCase.blockNumber, TotalDifficulty: big.NewInt(td)}) assert.Equal(t, nodesNum, liveNodes) assert.Equal(t, testCase.outOfSync, outOfSync) }) @@ -1427,7 +1751,7 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} { for _, testCase := range testCases { t.Run(fmt.Sprintf("%s: SelectionModeVal: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { - outOfSync, liveNodes := node.syncStatus(hb, big.NewInt(testCase.totalDifficulty)) + outOfSync, liveNodes := node.isOutOfSyncWithPool(ChainInfo{BlockNumber: hb, TotalDifficulty: big.NewInt(testCase.totalDifficulty)}) assert.Equal(t, nodesNum, liveNodes) assert.Equal(t, testCase.outOfSync, outOfSync) }) diff --git a/common/client/types.go b/common/client/types.go index b07f57eb8fb..c9b6a3580eb 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -68,7 +68,7 @@ type NodeClient[ SetAliveLoopSub(types.Subscription) UnsubscribeAllExceptAliveLoop() IsSyncing(ctx context.Context) (bool, error) - LatestFinalizedBlock(ctx context.Context) (HEAD, error) + SubscribeToFinalizedHeads(_ context.Context) (<-chan HEAD, types.Subscription, error) // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. // latest ChainInfo is the most recent value received within a NodeClient's current lifecycle between Dial and DisconnectAll. // highestUserObservations ChainInfo is the highest ChainInfo observed excluding health checks calls. @@ -151,7 +151,9 @@ type connection[ ] interface { ChainID(ctx context.Context) (CHAIN_ID, error) Dial(ctx context.Context) error - SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) + SubscribeToHeads(ctx context.Context) (ch <-chan HEAD, sub types.Subscription, err error) + // TODO: remove as part of merge with BCI-2875 + SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (s types.Subscription, err error) } // PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 33955c16451..a0b89cabbc0 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -751,7 +751,7 @@ func newMockRpc(t *testing.T) *mocks.RPCClient { mockRpc.On("Close").Return(nil).Once() mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes - mockRpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() + mockRpc.On("SubscribeToHeads", mock.Anything).Return(nil, client.NewMockSubscription(), nil).Maybe() mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() return mockRpc } diff --git a/core/chains/evm/client/config_builder.go b/core/chains/evm/client/config_builder.go index 19e0f14fd67..fa702bac111 100644 --- a/core/chains/evm/client/config_builder.go +++ b/core/chains/evm/client/config_builder.go @@ -41,6 +41,8 @@ func NewClientConfigs( finalizedBlockOffset *uint32, enforceRepeatableRead *bool, deathDeclarationDelay time.Duration, + noNewFinalizedHeadsThreshold time.Duration, + finalizedBlockPollInterval time.Duration, ) (commonclient.ChainConfig, evmconfig.NodePool, []*toml.Node, error) { nodes, err := parseNodeConfigs(nodeCfgs) @@ -48,24 +50,26 @@ func NewClientConfigs( return nil, nil, nil, err } nodePool := toml.NodePool{ - SelectionMode: selectionMode, - LeaseDuration: commonconfig.MustNewDuration(leaseDuration), - PollFailureThreshold: pollFailureThreshold, - PollInterval: commonconfig.MustNewDuration(pollInterval), - SyncThreshold: syncThreshold, - NodeIsSyncingEnabled: nodeIsSyncingEnabled, - EnforceRepeatableRead: enforceRepeatableRead, - DeathDeclarationDelay: commonconfig.MustNewDuration(deathDeclarationDelay), + SelectionMode: selectionMode, + LeaseDuration: commonconfig.MustNewDuration(leaseDuration), + PollFailureThreshold: pollFailureThreshold, + PollInterval: commonconfig.MustNewDuration(pollInterval), + SyncThreshold: syncThreshold, + NodeIsSyncingEnabled: nodeIsSyncingEnabled, + EnforceRepeatableRead: enforceRepeatableRead, + DeathDeclarationDelay: commonconfig.MustNewDuration(deathDeclarationDelay), + FinalizedBlockPollInterval: commonconfig.MustNewDuration(finalizedBlockPollInterval), } nodePoolCfg := &evmconfig.NodePoolConfig{C: nodePool} chainConfig := &evmconfig.EVMConfig{ C: &toml.EVMConfig{ Chain: toml.Chain{ - ChainType: chaintype.NewChainTypeConfig(chainType), - FinalityDepth: finalityDepth, - FinalityTagEnabled: finalityTagEnabled, - NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), - FinalizedBlockOffset: finalizedBlockOffset, + ChainType: chaintype.NewChainTypeConfig(chainType), + FinalityDepth: finalityDepth, + FinalityTagEnabled: finalityTagEnabled, + NoNewHeadsThreshold: commonconfig.MustNewDuration(noNewHeadsThreshold), + FinalizedBlockOffset: finalizedBlockOffset, + NoNewFinalizedHeadsThreshold: commonconfig.MustNewDuration(noNewFinalizedHeadsThreshold), }, }, } diff --git a/core/chains/evm/client/config_builder_test.go b/core/chains/evm/client/config_builder_test.go index 7c08bf18c1d..403c6c2d619 100644 --- a/core/chains/evm/client/config_builder_test.go +++ b/core/chains/evm/client/config_builder_test.go @@ -26,6 +26,7 @@ func TestClientConfigBuilder(t *testing.T) { finalizedBlockOffset := ptr[uint32](16) enforceRepeatableRead := ptr(true) deathDeclarationDelay := time.Second * 3 + noNewFinalizedBlocksThreshold := time.Second nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -38,7 +39,7 @@ func TestClientConfigBuilder(t *testing.T) { noNewHeadsThreshold := time.Second chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, - finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay, noNewFinalizedBlocksThreshold, pollInterval) require.NoError(t, err) // Validate node pool configs @@ -50,6 +51,7 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, *nodeIsSyncingEnabled, nodePool.NodeIsSyncingEnabled()) require.Equal(t, *enforceRepeatableRead, nodePool.EnforceRepeatableRead()) require.Equal(t, deathDeclarationDelay, nodePool.DeathDeclarationDelay()) + require.Equal(t, pollInterval, nodePool.FinalizedBlockPollInterval()) // Validate node configs require.Equal(t, *nodeConfigs[0].Name, *nodes[0].Name) @@ -61,6 +63,7 @@ func TestClientConfigBuilder(t *testing.T) { require.Equal(t, *finalityDepth, chainCfg.FinalityDepth()) require.Equal(t, *finalityTagEnabled, chainCfg.FinalityTagEnabled()) require.Equal(t, *finalizedBlockOffset, chainCfg.FinalizedBlockOffset()) + require.Equal(t, noNewFinalizedBlocksThreshold, chainCfg.NoNewFinalizedHeadsThreshold()) // let combiler tell us, when we do not have sufficient data to create evm client _ = client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), big.NewInt(10), nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index fd7fa5868a4..c2373ee775f 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -20,13 +20,13 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli for i, node := range nodes { if node.SendOnly != nil && *node.SendOnly { rpc := NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary) + commonclient.Secondary, cfg.FinalizedBlockPollInterval()) sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), *node.Name, chainID, rpc) sendonlys = append(sendonlys, sendonly) } else { rpc := NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), - chainID, commonclient.Primary) + chainID, commonclient.Primary, cfg.FinalizedBlockPollInterval()) primaryNode := commonclient.NewNode(cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") diff --git a/core/chains/evm/client/evm_client_test.go b/core/chains/evm/client/evm_client_test.go index 9ad25f96025..bdfcf426744 100644 --- a/core/chains/evm/client/evm_client_test.go +++ b/core/chains/evm/client/evm_client_test.go @@ -27,6 +27,8 @@ func TestNewEvmClient(t *testing.T) { finalizedBlockOffset := ptr[uint32](16) enforceRepeatableRead := ptr(true) deathDeclarationDelay := time.Second * 3 + noNewFinalizedBlocksThreshold := time.Second * 5 + finalizedBlockPollInterval := time.Second * 4 nodeConfigs := []client.NodeConfig{ { Name: ptr("foo"), @@ -38,7 +40,7 @@ func TestNewEvmClient(t *testing.T) { finalityTagEnabled := ptr(true) chainCfg, nodePool, nodes, err := client.NewClientConfigs(selectionMode, leaseDuration, chainTypeStr, nodeConfigs, pollFailureThreshold, pollInterval, syncThreshold, nodeIsSyncingEnabled, noNewHeadsThreshold, finalityDepth, - finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay) + finalityTagEnabled, finalizedBlockOffset, enforceRepeatableRead, deathDeclarationDelay, noNewFinalizedBlocksThreshold, finalizedBlockPollInterval) require.NoError(t, err) client := client.NewEvmClient(nodePool, chainCfg, nil, logger.Test(t), testutils.FixtureChainID, nodes, chaintype.ChainType(chainTypeStr)) diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index e1017a5564f..a2a55e17918 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -140,7 +140,7 @@ func NewChainClientWithTestNode( } lggr := logger.Test(t) - rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) + rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary, 0) n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") @@ -152,7 +152,7 @@ func NewChainClientWithTestNode( return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL - rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) + rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary, 0) s := commonclient.NewSendOnlyNode[*big.Int, RPCClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) diff --git a/core/chains/evm/client/mocks/rpc_client.go b/core/chains/evm/client/mocks/rpc_client.go index fa866af29e0..06f79efd551 100644 --- a/core/chains/evm/client/mocks/rpc_client.go +++ b/core/chains/evm/client/mocks/rpc_client.go @@ -1883,8 +1883,8 @@ func (_c *RPCClient_SubscribeNewHead_Call) Run(run func(ctx context.Context, cha return _c } -func (_c *RPCClient_SubscribeNewHead_Call) Return(_a0 commontypes.Subscription, _a1 error) *RPCClient_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) +func (_c *RPCClient_SubscribeNewHead_Call) Return(s commontypes.Subscription, err error) *RPCClient_SubscribeNewHead_Call { + _c.Call.Return(s, err) return _c } @@ -1893,6 +1893,140 @@ func (_c *RPCClient_SubscribeNewHead_Call) RunAndReturn(run func(context.Context return _c } +// SubscribeToFinalizedHeads provides a mock function with given fields: _a0 +func (_m *RPCClient) SubscribeToFinalizedHeads(_a0 context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(_a0) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RPCClient_SubscribeToFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToFinalizedHeads' +type RPCClient_SubscribeToFinalizedHeads_Call struct { + *mock.Call +} + +// SubscribeToFinalizedHeads is a helper method to define mock.On call +// - _a0 context.Context +func (_e *RPCClient_Expecter) SubscribeToFinalizedHeads(_a0 interface{}) *RPCClient_SubscribeToFinalizedHeads_Call { + return &RPCClient_SubscribeToFinalizedHeads_Call{Call: _e.mock.On("SubscribeToFinalizedHeads", _a0)} +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) Run(run func(_a0 context.Context)) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) Return(_a0 <-chan *types.Head, _a1 commontypes.Subscription, _a2 error) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *RPCClient_SubscribeToFinalizedHeads_Call) RunAndReturn(run func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)) *RPCClient_SubscribeToFinalizedHeads_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *RPCClient) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RPCClient_SubscribeToHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToHeads' +type RPCClient_SubscribeToHeads_Call struct { + *mock.Call +} + +// SubscribeToHeads is a helper method to define mock.On call +// - ctx context.Context +func (_e *RPCClient_Expecter) SubscribeToHeads(ctx interface{}) *RPCClient_SubscribeToHeads_Call { + return &RPCClient_SubscribeToHeads_Call{Call: _e.mock.On("SubscribeToHeads", ctx)} +} + +func (_c *RPCClient_SubscribeToHeads_Call) Run(run func(ctx context.Context)) *RPCClient_SubscribeToHeads_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *RPCClient_SubscribeToHeads_Call) Return(ch <-chan *types.Head, sub commontypes.Subscription, err error) *RPCClient_SubscribeToHeads_Call { + _c.Call.Return(ch, sub, err) + return _c +} + +func (_c *RPCClient_SubscribeToHeads_Call) RunAndReturn(run func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)) *RPCClient_SubscribeToHeads_Call { + _c.Call.Return(run) + return _c +} + // SubscribersCount provides a mock function with given fields: func (_m *RPCClient) SubscribersCount() int32 { ret := _m.Called() diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 9ab5fd135b4..1a0023227af 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "errors" "fmt" "math/big" "net/url" @@ -102,6 +103,8 @@ type RPCClient interface { GetInterceptedChainInfo() (latest, highestUserObservations commonclient.ChainInfo) } +const rpcSubscriptionMethodNewHeads = "newHeads" + type rawclient struct { rpc *rpc.Client geth *ethclient.Client @@ -109,11 +112,12 @@ type rawclient struct { } type rpcClient struct { - rpcLog logger.SugaredLogger - name string - id int32 - chainID *big.Int - tier commonclient.NodeTier + rpcLog logger.SugaredLogger + name string + id int32 + chainID *big.Int + tier commonclient.NodeTier + finalizedBlockPollInterval time.Duration ws rawclient http *rawclient @@ -147,6 +151,7 @@ func NewRPCClient( id int32, chainID *big.Int, tier commonclient.NodeTier, + finalizedBlockPollInterval time.Duration, ) RPCClient { r := new(rpcClient) r.name = name @@ -154,6 +159,7 @@ func NewRPCClient( r.chainID = chainID r.tier = tier r.ws.uri = wsuri + r.finalizedBlockPollInterval = finalizedBlockPollInterval if httpuri != nil { r.http = &rawclient{uri: *httpuri} } @@ -403,6 +409,7 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err return err } +// TODO: Full transition from SubscribeNewHead to SubscribeToHeads is done in BCI-2875 func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtypes.Head) (_ commontypes.Subscription, err error) { ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) defer cancel() @@ -434,6 +441,54 @@ func (r *rpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtyp return subForwarder, nil } +func (r *rpcClient) SubscribeToHeads(ctx context.Context) (ch <-chan *evmtypes.Head, sub commontypes.Subscription, err error) { + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) + defer cancel() + + args := []interface{}{rpcSubscriptionMethodNewHeads} + start := time.Now() + lggr := r.newRqLggr().With("args", args) + + lggr.Debug("RPC call: evmclient.Client#EthSubscribe") + defer func() { + duration := time.Since(start) + r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + err = r.wrapWS(err) + }() + + channel := make(chan *evmtypes.Head) + forwarder := newSubForwarder(channel, func(head *evmtypes.Head) *evmtypes.Head { + head.EVMChainID = ubig.New(r.chainID) + r.onNewHead(ctx, chStopInFlight, head) + return head + }, r.wrapRPCClientError) + + err = forwarder.start(ws.rpc.EthSubscribe(ctx, forwarder.srcCh, args...)) + if err != nil { + return nil, nil, err + } + + err = r.registerSub(forwarder, chStopInFlight) + if err != nil { + return nil, nil, err + } + + return channel, forwarder, err +} + +func (r *rpcClient) SubscribeToFinalizedHeads(_ context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { + interval := r.finalizedBlockPollInterval + if interval == 0 { + return nil, nil, errors.New("FinalizedBlockPollInterval is 0") + } + timeout := interval + poller, channel := commonclient.NewPoller[*evmtypes.Head](interval, r.LatestFinalizedBlock, timeout, r.rpcLog) + if err := poller.Start(); err != nil { + return nil, nil, err + } + return channel, &poller, nil +} + // GethClient wrappers func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index 682c4352457..9b21aedbea8 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -56,7 +56,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) // set to default values @@ -106,7 +106,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -129,7 +129,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Block's chain ID matched configured", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) ch := make(chan *evmtypes.Head) @@ -146,7 +146,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -156,7 +156,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Subscription error is properly wrapper", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) @@ -184,7 +184,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -201,7 +201,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { return resp }) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary, 0) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -250,7 +250,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { } server := createRPCServer() - rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary, 0) require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() server.Head = &evmtypes.Head{Number: 128} diff --git a/core/chains/evm/config/chain_scoped.go b/core/chains/evm/config/chain_scoped.go index db598e3e82b..b9b19cdc2c0 100644 --- a/core/chains/evm/config/chain_scoped.go +++ b/core/chains/evm/config/chain_scoped.go @@ -183,3 +183,7 @@ func (e *EVMConfig) LogPrunePageSize() uint32 { func (e *EVMConfig) FinalizedBlockOffset() uint32 { return *e.C.FinalizedBlockOffset } + +func (e *EVMConfig) NoNewFinalizedHeadsThreshold() time.Duration { + return e.C.NoNewFinalizedHeadsThreshold.Duration() +} diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go index ea0d52f5705..b0a5772f739 100644 --- a/core/chains/evm/config/config.go +++ b/core/chains/evm/config/config.go @@ -46,6 +46,7 @@ type EVM interface { RPCDefaultBatchSize() uint32 NodeNoNewHeadsThreshold() time.Duration FinalizedBlockOffset() uint32 + NoNewFinalizedHeadsThreshold() time.Duration IsEnabled() bool TOMLString() (string, error) diff --git a/core/chains/evm/config/toml/config.go b/core/chains/evm/config/toml/config.go index 3e35bb4b55c..2cb29d97696 100644 --- a/core/chains/evm/config/toml/config.go +++ b/core/chains/evm/config/toml/config.go @@ -338,27 +338,28 @@ func (c *EVMConfig) TOMLString() (string, error) { } type Chain struct { - AutoCreateKey *bool - BlockBackfillDepth *uint32 - BlockBackfillSkip *bool - ChainType *chaintype.ChainTypeConfig - FinalityDepth *uint32 - FinalityTagEnabled *bool - FlagsContractAddress *types.EIP55Address - LinkContractAddress *types.EIP55Address - LogBackfillBatchSize *uint32 - LogPollInterval *commonconfig.Duration - LogKeepBlocksDepth *uint32 - LogPrunePageSize *uint32 - BackupLogPollerBlockDelay *uint64 - MinIncomingConfirmations *uint32 - MinContractPayment *commonassets.Link - NonceAutoSync *bool - NoNewHeadsThreshold *commonconfig.Duration - OperatorFactoryAddress *types.EIP55Address - RPCDefaultBatchSize *uint32 - RPCBlockQueryDelay *uint16 - FinalizedBlockOffset *uint32 + AutoCreateKey *bool + BlockBackfillDepth *uint32 + BlockBackfillSkip *bool + ChainType *chaintype.ChainTypeConfig + FinalityDepth *uint32 + FinalityTagEnabled *bool + FlagsContractAddress *types.EIP55Address + LinkContractAddress *types.EIP55Address + LogBackfillBatchSize *uint32 + LogPollInterval *commonconfig.Duration + LogKeepBlocksDepth *uint32 + LogPrunePageSize *uint32 + BackupLogPollerBlockDelay *uint64 + MinIncomingConfirmations *uint32 + MinContractPayment *commonassets.Link + NonceAutoSync *bool + NoNewHeadsThreshold *commonconfig.Duration + OperatorFactoryAddress *types.EIP55Address + RPCDefaultBatchSize *uint32 + RPCBlockQueryDelay *uint16 + FinalizedBlockOffset *uint32 + NoNewFinalizedHeadsThreshold *commonconfig.Duration Transactions Transactions `toml:",omitempty"` BalanceMonitor BalanceMonitor `toml:",omitempty"` diff --git a/core/chains/evm/config/toml/defaults.go b/core/chains/evm/config/toml/defaults.go index 38eef40bf76..c3f087da8c5 100644 --- a/core/chains/evm/config/toml/defaults.go +++ b/core/chains/evm/config/toml/defaults.go @@ -165,6 +165,10 @@ func (c *Chain) SetFrom(f *Chain) { c.FinalizedBlockOffset = v } + if v := f.NoNewFinalizedHeadsThreshold; v != nil { + c.NoNewFinalizedHeadsThreshold = v + } + c.Transactions.setFrom(&f.Transactions) c.BalanceMonitor.setFrom(&f.BalanceMonitor) c.GasEstimator.setFrom(&f.GasEstimator) diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml index d7cbad8157c..882a91f1acc 100644 --- a/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml +++ b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml @@ -6,6 +6,7 @@ MinIncomingConfirmations = 1 NoNewHeadsThreshold = '30s' OCR.ContractConfirmations = 1 RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '25 gwei' diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml index 95d4bf75460..78d3bbba77a 100644 --- a/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml @@ -6,6 +6,7 @@ MinIncomingConfirmations = 1 NoNewHeadsThreshold = '30s' OCR.ContractConfirmations = 1 RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '25 gwei' diff --git a/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml index 384a798e32a..1b248a8c451 100644 --- a/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml @@ -6,6 +6,7 @@ LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' LogPollInterval = '3s' NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '45s' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/BSC_Testnet.toml b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml index 364bae0c9f1..252f90accdd 100644 --- a/core/chains/evm/config/toml/defaults/BSC_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml @@ -6,6 +6,7 @@ LinkContractAddress = '0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06' LogPollInterval = '3s' NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 2 +NoNewFinalizedHeadsThreshold = '40s' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Base_Mainnet.toml b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml index 314c12f8c54..f0896fba414 100644 --- a/core/chains/evm/config/toml/defaults/Base_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml @@ -4,6 +4,7 @@ FinalityDepth = 200 LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '15m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Base_Sepolia.toml b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml index 6458dda87f7..1fc0b51f1f3 100644 --- a/core/chains/evm/config/toml/defaults/Base_Sepolia.toml +++ b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml @@ -4,6 +4,7 @@ FinalityDepth = 200 LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '12m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml index b48cb25b325..a4948620370 100644 --- a/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml @@ -5,6 +5,7 @@ LogPollInterval = '5s' MinIncomingConfirmations = 1 NoNewHeadsThreshold = '1m' OCR.ContractConfirmations = 1 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Celo_Testnet.toml b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml index d3f595baac6..eb43f080b7d 100644 --- a/core/chains/evm/config/toml/defaults/Celo_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml @@ -5,6 +5,7 @@ LogPollInterval = '5s' MinIncomingConfirmations = 1 NoNewHeadsThreshold = '1m' OCR.ContractConfirmations = 1 +NoNewFinalizedHeadsThreshold = '1m' [GasEstimator] PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml index 2e65cce6330..20bb0d8e72a 100644 --- a/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml @@ -2,6 +2,7 @@ ChainID = '1' LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' MinContractPayment = '0.1 link' OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +NoNewFinalizedHeadsThreshold = '9m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml b/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml index 1b14da2b540..379377a2266 100644 --- a/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml +++ b/core/chains/evm/config/toml/defaults/Gnosis_Chiado.toml @@ -3,6 +3,7 @@ ChainID = '10200' FinalityDepth = 100 ChainType = 'gnosis' LogPollInterval = '5s' +NoNewFinalizedHeadsThreshold = '2m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml b/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml index 587f0083b70..628646364f5 100644 --- a/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Gnosis_Mainnet.toml @@ -9,6 +9,7 @@ ChainID = '100' ChainType = 'gnosis' LinkContractAddress = '0xE2e73A1c69ecF83F464EFCE6A5be353a37cA09b2' LogPollInterval = '5s' +NoNewFinalizedHeadsThreshold = '2m' [GasEstimator] PriceDefault = '1 gwei' diff --git a/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml index fd4dd9f32f0..3510aef7047 100644 --- a/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml @@ -5,6 +5,7 @@ LinkContractAddress = '0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6' LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '13m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml b/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml index 116ae9d680b..8da575a5936 100644 --- a/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml +++ b/core/chains/evm/config/toml/defaults/Optimism_Sepolia.toml @@ -4,6 +4,7 @@ FinalityDepth = 200 LogPollInterval = '2s' NoNewHeadsThreshold = '40s' MinIncomingConfirmations = 1 +NoNewFinalizedHeadsThreshold = '15m' [GasEstimator] EIP1559DynamicFees = true diff --git a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml index 6a1687fec48..77438343e29 100644 --- a/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml +++ b/core/chains/evm/config/toml/defaults/Polygon_Amoy.toml @@ -5,6 +5,7 @@ MinIncomingConfirmations = 5 NoNewHeadsThreshold = '30s' RPCBlockQueryDelay = 10 RPCDefaultBatchSize = 100 +NoNewFinalizedHeadsThreshold = '12m' [Transactions] MaxQueued = 5000 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml index 50057a6893a..2a520563302 100644 --- a/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml @@ -9,6 +9,7 @@ NoNewHeadsThreshold = '30s' # Must be set to something large here because Polygon has so many re-orgs that otherwise we are constantly refetching RPCBlockQueryDelay = 10 RPCDefaultBatchSize = 100 +NoNewFinalizedHeadsThreshold = '6m' [Transactions] # Matic nodes under high mempool pressure are liable to drop txes, we need to ensure we keep sending them diff --git a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml index 35cd4a90a2f..7fcbd18890b 100644 --- a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml +++ b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml @@ -5,6 +5,7 @@ MinIncomingConfirmations = 1 # WeMix emits a block every 1 second, regardless of transactions LogPollInterval = '3s' NoNewHeadsThreshold = '30s' +NoNewFinalizedHeadsThreshold = '40s' [OCR] ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml index 417718d87eb..83c483d0348 100644 --- a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml +++ b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml @@ -5,6 +5,7 @@ MinIncomingConfirmations = 1 # WeMix emits a block every 1 second, regardless of transactions LogPollInterval = '3s' NoNewHeadsThreshold = '30s' +NoNewFinalizedHeadsThreshold = '40s' [OCR] ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/fallback.toml b/core/chains/evm/config/toml/defaults/fallback.toml index a11e646e08b..a47e56bc918 100644 --- a/core/chains/evm/config/toml/defaults/fallback.toml +++ b/core/chains/evm/config/toml/defaults/fallback.toml @@ -15,6 +15,7 @@ NoNewHeadsThreshold = '3m' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0' [Transactions] ForwardersEnabled = false diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml index 38c8cb8354f..460f6f6500a 100644 --- a/core/config/docs/chains-evm.toml +++ b/core/config/docs/chains-evm.toml @@ -97,6 +97,11 @@ RPCBlockQueryDelay = 1 # Default # Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, # CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. FinalizedBlockOffset = 0 # Default +# NoNewFinalizedHeadsThreshold controls how long to wait for new finalized block before `NodePool` marks rpc endpoints as +# out-of-sync. Only applicable if `FinalityTagEnabled=true` +# +# Set to zero to disable. +NoNewFinalizedHeadsThreshold = '0' # Default [EVM.Transactions] # ForwardersEnabled enables or disables sending transactions through forwarder contracts. diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index c8cd5ec4790..ba182b8f606 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -553,19 +553,20 @@ func TestConfig_Marshal(t *testing.T) { }, }, - LinkContractAddress: mustAddress("0x538aAaB4ea120b2bC2fe5D296852D948F07D849e"), - LogBackfillBatchSize: ptr[uint32](17), - LogPollInterval: &minute, - LogKeepBlocksDepth: ptr[uint32](100000), - LogPrunePageSize: ptr[uint32](0), - BackupLogPollerBlockDelay: ptr[uint64](532), - MinContractPayment: commonassets.NewLinkFromJuels(math.MaxInt64), - MinIncomingConfirmations: ptr[uint32](13), - NonceAutoSync: ptr(true), - NoNewHeadsThreshold: &minute, - OperatorFactoryAddress: mustAddress("0xa5B85635Be42F21f94F28034B7DA440EeFF0F418"), - RPCDefaultBatchSize: ptr[uint32](17), - RPCBlockQueryDelay: ptr[uint16](10), + LinkContractAddress: mustAddress("0x538aAaB4ea120b2bC2fe5D296852D948F07D849e"), + LogBackfillBatchSize: ptr[uint32](17), + LogPollInterval: &minute, + LogKeepBlocksDepth: ptr[uint32](100000), + LogPrunePageSize: ptr[uint32](0), + BackupLogPollerBlockDelay: ptr[uint64](532), + MinContractPayment: commonassets.NewLinkFromJuels(math.MaxInt64), + MinIncomingConfirmations: ptr[uint32](13), + NonceAutoSync: ptr(true), + NoNewHeadsThreshold: &minute, + OperatorFactoryAddress: mustAddress("0xa5B85635Be42F21f94F28034B7DA440EeFF0F418"), + RPCDefaultBatchSize: ptr[uint32](17), + RPCBlockQueryDelay: ptr[uint16](10), + NoNewFinalizedHeadsThreshold: &hour, Transactions: evmcfg.Transactions{ MaxInFlight: ptr[uint32](19), @@ -996,6 +997,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 16 +NoNewFinalizedHeadsThreshold = '1h0m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index 78f52805dfe..21d68c23ada 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -290,6 +290,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 16 +NoNewFinalizedHeadsThreshold = '1h0m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index 61c5e3fa266..c56e755d360 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -277,6 +277,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 12 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false @@ -376,6 +377,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [EVM.Transactions] ForwardersEnabled = false @@ -469,6 +471,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 3e083bd1844..1672eb1b41d 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -290,6 +290,7 @@ OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' RPCDefaultBatchSize = 17 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [EVM.Transactions] ForwardersEnabled = true diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index f391804b7cd..0e12af9a7e4 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -277,6 +277,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false @@ -376,6 +377,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [EVM.Transactions] ForwardersEnabled = false @@ -469,6 +471,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 7a4d3ca62ca..0d670b3515b 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1786,6 +1786,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [Transactions] ForwardersEnabled = false @@ -1879,6 +1880,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -1972,6 +1974,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2065,6 +2068,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2159,6 +2163,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '13m0s' [Transactions] ForwardersEnabled = false @@ -2252,6 +2257,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2345,6 +2351,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2439,6 +2446,7 @@ OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2532,6 +2540,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '45s' [Transactions] ForwardersEnabled = false @@ -2624,6 +2633,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2716,6 +2726,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -2809,6 +2820,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -2903,6 +2915,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '2m0s' [Transactions] ForwardersEnabled = false @@ -2996,6 +3009,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3089,6 +3103,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '6m0s' [Transactions] ForwardersEnabled = false @@ -3182,6 +3197,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3275,6 +3291,7 @@ NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3368,6 +3385,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3461,6 +3479,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3554,6 +3573,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3647,6 +3667,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3740,6 +3761,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3834,6 +3856,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -3927,6 +3950,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4019,6 +4043,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4112,6 +4137,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4205,6 +4231,7 @@ NoNewHeadsThreshold = '6m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 15 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4298,6 +4325,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -4391,6 +4419,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '40s' [Transactions] ForwardersEnabled = false @@ -4483,6 +4512,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4576,6 +4606,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4669,6 +4700,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4762,6 +4794,7 @@ NoNewHeadsThreshold = '12m0s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4855,6 +4888,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -4947,6 +4981,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5040,6 +5075,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [Transactions] ForwardersEnabled = false @@ -5133,6 +5169,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '2m0s' [Transactions] ForwardersEnabled = false @@ -5227,6 +5264,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5320,6 +5358,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -5413,6 +5452,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -5506,6 +5546,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 2 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -5599,6 +5640,7 @@ NoNewHeadsThreshold = '1m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '1m0s' [Transactions] ForwardersEnabled = false @@ -5691,6 +5733,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5783,6 +5826,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5875,6 +5919,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -5968,6 +6013,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6061,6 +6107,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6153,6 +6200,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 100 RPCBlockQueryDelay = 10 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '12m0s' [Transactions] ForwardersEnabled = false @@ -6246,6 +6294,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6339,6 +6388,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '12m0s' [Transactions] ForwardersEnabled = false @@ -6433,6 +6483,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6527,6 +6578,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6620,6 +6672,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6713,6 +6766,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6806,6 +6860,7 @@ NoNewHeadsThreshold = '0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6899,6 +6954,7 @@ NoNewHeadsThreshold = '3m0s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -6992,6 +7048,7 @@ NoNewHeadsThreshold = '40s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '15m0s' [Transactions] ForwardersEnabled = false @@ -7085,6 +7142,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7178,6 +7236,7 @@ NoNewHeadsThreshold = '30s' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '0s' [Transactions] ForwardersEnabled = false @@ -7438,6 +7497,15 @@ The latest finalized block on chain is 64, so block 63 is the latest finalized f Block 64 will be treated as finalized by CL Node only when chain's latest finalized block is 65. As chain finalizes blocks in batches of 32, CL Node has to wait for a whole new batch to be finalized to treat block 64 as finalized. +### NoNewFinalizedHeadsThreshold +```toml +NoNewFinalizedHeadsThreshold = '0' # Default +``` +NoNewFinalizedHeadsThreshold controls how long to wait for new finalized block before `NodePool` marks rpc endpoints as +out-of-sync. Only applicable if `FinalityTagEnabled=true` + +Set to zero to disable. + ## EVM.Transactions ```toml [EVM.Transactions] diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 327e84c51bb..56ce1ea7ba8 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -333,6 +333,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index 724b59e52d3..e534c67a2f3 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -333,6 +333,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index e0eefcba85b..29bc189e561 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -333,6 +333,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 1955e919da3..60c42c7c399 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -323,6 +323,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index 3ba20f6f9d6..719bb8bcc47 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -330,6 +330,7 @@ OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' RPCDefaultBatchSize = 250 RPCBlockQueryDelay = 1 FinalizedBlockOffset = 0 +NoNewFinalizedHeadsThreshold = '9m0s' [EVM.Transactions] ForwardersEnabled = false