From 86a5d583e4371e5bc8ea0189993c40afd373467d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 13 May 2024 09:44:11 -0400 Subject: [PATCH 01/58] Update node --- common/client/node.go | 80 ++++++++++++++++++++------------------- common/client/node_fsm.go | 15 +++++--- common/client/types.go | 3 +- 3 files changed, 52 insertions(+), 46 deletions(-) diff --git a/common/client/node.go b/common/client/node.go index 6450b086f10..d1b74a8d1b2 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -57,24 +57,39 @@ type ChainConfig interface { ChainType() commonconfig.ChainType } +// ChainInfo - represents RPC’s view of the chain +type ChainInfo struct { + // BlockNumber - block number of the most recent block observed by the Node + BlockNumber int64 + // BlockDifficulty - difficulty of the most recent block observed by the Node + BlockDifficulty *big.Int + // LatestFinalizedBlock - block number of the most recently finalized block + LatestFinalizedBlock int64 +} + //go:generate mockery --quiet --name Node --structname mockNode --filename "mock_node_test.go" --inpackage --case=underscore type Node[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] interface { - // State returns nodeState + // State returns health state of the underlying RPC State() nodeState - // StateAndLatest returns nodeState with the latest received block number & total difficulty. - StateAndLatest() (nodeState, int64, *big.Int) + // StateAndLatest returns health state with the latest received block number & total difficulty. + StateAndLatest() (nodeState, ChainInfo) // Name is a unique identifier for this node. Name() string + // String - returns string representation of the node, useful for debugging (name + URLS used to connect to the RPC) String() string - RPC() RPC - SubscribersCount() int32 - UnsubscribeAllExceptAliveLoop() + // RPC - returns the underlying RPC_CLIENT + RPC() RPC_CLIENT + // UnsubscribeAll - terminates all client subscriptions. Called by MultiNode to trigger clients to resubscribe to + // new best RPC + UnsubscribeAll() ConfiguredChainID() CHAIN_ID + // Order - returns priority order configured for the RPC Order() int32 + // Start - starts health checks Start(context.Context) error Close() error } @@ -82,7 +97,7 @@ type Node[ type node[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] struct { services.StateMachine lfcLog logger.Logger @@ -97,7 +112,7 @@ type node[ ws url.URL http *url.URL - rpc RPC + rpc RPC_CLIENT stateMu sync.RWMutex // protects state* fields state nodeState @@ -123,7 +138,7 @@ type node[ func NewNode[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ]( nodeCfg NodeConfig, chainCfg ChainConfig, @@ -134,10 +149,10 @@ func NewNode[ id int32, chainID CHAIN_ID, nodeOrder int32, - rpc RPC, + rpc RPC_CLIENT, chainFamily string, -) Node[CHAIN_ID, HEAD, RPC] { - n := new(node[CHAIN_ID, HEAD, RPC]) +) Node[CHAIN_ID, HEAD, RPC_CLIENT] { + n := new(node[CHAIN_ID, HEAD, RPC_CLIENT]) n.name = name n.id = id n.chainID = chainID @@ -164,7 +179,7 @@ func NewNode[ return n } -func (n *node[CHAIN_ID, HEAD, RPC]) String() string { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { s := fmt.Sprintf("(%s)%s:%s", Primary.String(), n.name, n.ws.String()) if n.http != nil { s = s + fmt.Sprintf(":%s", n.http.String()) @@ -172,31 +187,27 @@ func (n *node[CHAIN_ID, HEAD, RPC]) String() string { return s } -func (n *node[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() (chainID CHAIN_ID) { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() (chainID CHAIN_ID) { return n.chainID } -func (n *node[CHAIN_ID, HEAD, RPC]) Name() string { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { return n.name } -func (n *node[CHAIN_ID, HEAD, RPC]) RPC() RPC { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { return n.rpc } -func (n *node[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 { - return n.rpc.SubscribersCount() -} - -func (n *node[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { - n.rpc.UnsubscribeAllExceptAliveLoop() +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { + n.rpc.UnsubscribeAllExcept() } -func (n *node[CHAIN_ID, HEAD, RPC]) Close() error { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { return n.StopOnce(n.name, n.close) } -func (n *node[CHAIN_ID, HEAD, RPC]) close() error { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) close() error { defer func() { n.wg.Wait() n.rpc.Close() @@ -214,7 +225,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) close() error { // Should only be called once in a node's lifecycle // Return value is necessary to conform to interface but this will never // actually return an error. -func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Start(startCtx context.Context) error { return n.StartOnce(n.name, func() error { n.start(startCtx) return nil @@ -226,7 +237,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error { // Not thread-safe. // Node lifecycle is synchronous: only one goroutine should be running at a // time. -func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { if n.state != nodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", n.state)) } @@ -245,7 +256,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) { // verifyChainID checks that connection to the node matches the given chain ID // Not thread-safe // Pure verifyChainID: does not mutate node "state" field. -func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Context, lggr logger.Logger) nodeState { promPoolRPCNodeVerifies.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() promFailed := func() { promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() @@ -288,7 +299,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyChainID(callerCtx context.Context, lgg // createVerifiedConn - establishes new connection with the RPC and verifies that it's valid: chainID matches, and it's not syncing. // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. -func (n *node[CHAIN_ID, HEAD, RPC]) createVerifiedConn(ctx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Context, lggr logger.Logger) nodeState { if err := n.rpc.Dial(ctx); err != nil { n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.State()) return nodeStateUnreachable @@ -299,7 +310,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) createVerifiedConn(ctx context.Context, lggr // verifyConn - verifies that current connection is valid: chainID matches, and it's not syncing. // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. -func (n *node[CHAIN_ID, HEAD, RPC]) verifyConn(ctx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr logger.Logger) nodeState { state := n.verifyChainID(ctx, lggr) if state != nodeStateAlive { return state @@ -321,13 +332,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyConn(ctx context.Context, lggr logger. return nodeStateAlive } -// disconnectAll disconnects all clients connected to the node -// WARNING: NOT THREAD-SAFE -// This must be called from within the n.stateMu lock -func (n *node[CHAIN_ID, HEAD, RPC]) disconnectAll() { - n.rpc.DisconnectAll() -} - -func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Order() int32 { return n.order } diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index e9105dcc060..a98db7d60b9 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -120,10 +120,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState { return n.state } -func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { +func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { n.stateMu.RLock() defer n.stateMu.RUnlock() - return n.state, n.stateLatestBlockNumber, n.stateLatestTotalDifficulty + return n.state, ChainInfo{ + BlockNumber: n.stateLatestBlockNumber, + BlockDifficulty: n.stateLatestTotalDifficulty, + LatestFinalizedBlock: n.stateLatestFinalizedBlockNumber} } // setState is only used by internal state management methods. @@ -209,7 +212,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { } switch n.state { case nodeStateAlive: - n.disconnectAll() + n.UnsubscribeAll() n.state = nodeStateOutOfSync default: panic(transitionFail(n.state, nodeStateOutOfSync)) @@ -234,7 +237,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { } switch n.state { case nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing: - n.disconnectAll() + n.UnsubscribeAll() n.state = nodeStateUnreachable default: panic(transitionFail(n.state, nodeStateUnreachable)) @@ -277,7 +280,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { } switch n.state { case nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing: - n.disconnectAll() + n.UnsubscribeAll() n.state = nodeStateInvalidChainID default: panic(transitionFail(n.state, nodeStateInvalidChainID)) @@ -302,7 +305,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { } switch n.state { case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID: - n.disconnectAll() + n.UnsubscribeAll() n.state = nodeStateSyncing default: panic(transitionFail(n.state, nodeStateSyncing)) diff --git a/common/client/types.go b/common/client/types.go index 0857fa4d869..2184ec27cac 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -2,7 +2,6 @@ package client import ( "context" - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "math/big" "github.com/smartcontractkit/chainlink-common/pkg/assets" @@ -16,7 +15,7 @@ import ( //go:generate mockery --quiet --name RPCClient --output ./mocks --case=underscore type RPCClient[ CHAIN_ID types.ID, - HEAD *evmtypes.Head, + HEAD Head, ] interface { // ChainID - fetches ChainID from the RPC to verify that it matches config ChainID(ctx context.Context) (CHAIN_ID, error) From 281f93fe70d3718962fe3c45243996195f68c80e Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 14 May 2024 14:33:06 -0400 Subject: [PATCH 02/58] Update Multinode --- common/client/multi_node.go | 686 +++--------------- common/client/multi_node_test.go | 34 +- common/client/node_lifecycle.go | 53 +- common/client/node_selector.go | 4 +- common/client/node_selector_highest_head.go | 7 +- .../client/node_selector_highest_head_test.go | 4 +- common/client/node_selector_priority_level.go | 12 +- common/client/node_selector_round_robin.go | 4 +- .../client/node_selector_total_difficulty.go | 8 +- core/chains/evm/client/chain_client.go | 193 +++-- 10 files changed, 297 insertions(+), 708 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index fa413df91aa..7547ec4a05f 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -3,22 +3,17 @@ package client import ( "context" "fmt" - "math" "math/big" - "slices" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/smartcontractkit/chainlink-common/pkg/assets" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/utils" - "github.com/smartcontractkit/chainlink/v2/common/config" - feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types" "github.com/smartcontractkit/chainlink/v2/common/types" ) @@ -26,76 +21,45 @@ var ( // PromMultiNodeRPCNodeStates reports current RPC node state PromMultiNodeRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "multi_node_states", - Help: "The number of RPC nodes currently in the given state for the given chain", + Help: "The number of RPC primaryNodes currently in the given state for the given chain", }, []string{"network", "chainId", "state"}) // PromMultiNodeInvariantViolations reports violation of our assumptions PromMultiNodeInvariantViolations = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "multi_node_invariant_violations", Help: "The number of invariant violations", }, []string{"network", "chainId", "invariant"}) - ErroringNodeError = fmt.Errorf("no live nodes available") + ErroringNodeError = fmt.Errorf("no live primaryNodes available") ) // MultiNode is a generalized multi node client interface that includes methods to interact with different chains. // It also handles multiple node RPC connections simultaneously. type MultiNode[ CHAIN_ID types.ID, - SEQ types.Sequence, - ADDR types.Hashable, BLOCK_HASH types.Hashable, - TX any, - TX_HASH types.Hashable, - EVENT any, - EVENT_OPS any, - TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], - FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM], - BATCH_ELEM any, + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] interface { - clientAPI[ - CHAIN_ID, - SEQ, - ADDR, - BLOCK_HASH, - TX, - TX_HASH, - EVENT, - EVENT_OPS, - TX_RECEIPT, - FEE, - HEAD, - BATCH_ELEM, - ] + // SelectRPC - returns the best healthy RPCClient + SelectRPC() (RPC_CLIENT, error) + // DoAll - calls `do` sequentially on all healthy RPCClients. + // `do` can abort subsequent calls by returning `false`. + // Returns error if `do` was not called or context returns an error. + DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error + // NodeStates - returns RPCs' states + NodeStates() map[string]nodeState Close() error - NodeStates() map[string]string - SelectNodeRPC() (RPC_CLIENT, error) - - BatchCallContextAll(ctx context.Context, b []BATCH_ELEM) error - ConfiguredChainID() CHAIN_ID - IsL2() bool } type multiNode[ CHAIN_ID types.ID, - SEQ types.Sequence, - ADDR types.Hashable, BLOCK_HASH types.Hashable, - TX any, - TX_HASH types.Hashable, - EVENT any, - EVENT_OPS any, - TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], - FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM], - BATCH_ELEM any, + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] struct { services.StateMachine - nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] - sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendOnlyNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] chainID CHAIN_ID - chainType config.ChainType lggr logger.SugaredLogger selectionMode string noNewHeadsThreshold time.Duration @@ -104,66 +68,44 @@ type multiNode[ leaseTicker *time.Ticker chainFamily string reportInterval time.Duration - sendTxSoftTimeout time.Duration // defines max waiting time from first response til responses evaluation activeMu sync.RWMutex activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT] chStop services.StopChan wg sync.WaitGroup - - classifySendTxError func(tx TX, err error) SendTxReturnCode } func NewMultiNode[ CHAIN_ID types.ID, - SEQ types.Sequence, - ADDR types.Hashable, BLOCK_HASH types.Hashable, - TX any, - TX_HASH types.Hashable, - EVENT any, - EVENT_OPS any, - TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], - FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM], - BATCH_ELEM any, + RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ]( lggr logger.Logger, - selectionMode string, - leaseDuration time.Duration, - noNewHeadsThreshold time.Duration, - nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], - sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT], - chainID CHAIN_ID, - chainType config.ChainType, - chainFamily string, - classifySendTxError func(tx TX, err error) SendTxReturnCode, - sendTxSoftTimeout time.Duration, -) MultiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM] { - nodeSelector := newNodeSelector(selectionMode, nodes) + selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) + leaseDuration time.Duration, // defines interval on which new "best" RPC should be selected + primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], + sendOnlyNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], + chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) + chainFamily string, // name of the chain family - used in the metrics +) MultiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT] { + // TODO: does node selector only need primary nodes, or all nodes? + nodeSelector := newNodeSelector(selectionMode, primaryNodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) const reportInterval = 6500 * time.Millisecond - if sendTxSoftTimeout == 0 { - sendTxSoftTimeout = QueryTimeout / 2 - } - c := &multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]{ - nodes: nodes, - sendonlys: sendonlys, - chainID: chainID, - chainType: chainType, - lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), - selectionMode: selectionMode, - noNewHeadsThreshold: noNewHeadsThreshold, - nodeSelector: nodeSelector, - chStop: make(services.StopChan), - leaseDuration: leaseDuration, - chainFamily: chainFamily, - classifySendTxError: classifySendTxError, - reportInterval: reportInterval, - sendTxSoftTimeout: sendTxSoftTimeout, + c := &multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]{ + primaryNodes: primaryNodes, + sendOnlyNodes: sendOnlyNodes, + chainID: chainID, + lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), + selectionMode: selectionMode, + nodeSelector: nodeSelector, + chStop: make(services.StopChan), + leaseDuration: leaseDuration, + chainFamily: chainFamily, + reportInterval: reportInterval, } c.lggr.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode) @@ -171,17 +113,54 @@ func NewMultiNode[ return c } +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { + runDo := func(nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], isSendOnly bool) error { + for _, n := range nodes { + if ctx.Err() != nil { + return ctx.Err() + } + if n.State() == nodeStateAlive { + if !do(ctx, n.RPC(), isSendOnly) { + if ctx.Err() != nil { + return ctx.Err() + } + return fmt.Errorf("do aborted on node %s", n.String()) + } + } + } + return nil + } + + if err := runDo(c.primaryNodes, false); err != nil { + return err + } + if err := runDo(c.sendOnlyNodes, true); err != nil { + return err + } + return nil +} + +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]nodeState { + states := map[string]nodeState{} + allNodes := append(c.primaryNodes, c.sendOnlyNodes...) + for _, n := range allNodes { + states[n.String()] = n.State() + } + return states +} + // Dial starts every node in the pool // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) Dial(ctx context.Context) error { +// TODO: Remove Dial() from MultiNode? Who will start the nodes? +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { - if len(c.nodes) == 0 { + if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) } var ms services.MultiStart - for _, n := range c.nodes { + for _, n := range c.primaryNodes { if n.ConfiguredChainID().String() != c.chainID.String() { return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) } @@ -189,8 +168,8 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP if ok { // This is a bit hacky but it allows the node to be aware of // pool state and prevent certain state transitions that might - // otherwise leave no nodes available. It is better to have one - // node in a degraded state than no nodes at all. + // otherwise leave no primaryNodes available. It is better to have one + // node in a degraded state than no primaryNodes at all. rawNode.nLiveNodes = c.nLiveNodes } // node will handle its own redialing and automatic recovery @@ -198,7 +177,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return err } } - for _, s := range c.sendonlys { + for _, s := range c.sendOnlyNodes { if s.ConfiguredChainID().String() != c.chainID.String() { return ms.CloseBecause(fmt.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String())) } @@ -222,28 +201,27 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } // Close tears down the MultiNode and closes all nodes -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) Close() error { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Close() error { return c.StopOnce("MultiNode", func() error { close(c.chStop) c.wg.Wait() - return services.CloseAll(services.MultiCloser(c.nodes), services.MultiCloser(c.sendonlys)) + return services.CloseAll(services.MultiCloser(c.primaryNodes), services.MultiCloser(c.sendOnlyNodes)) }) } -// SelectNodeRPC returns an RPC of an active node. If there are no active nodes it returns an error. +// SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error. // Call this method from your chain-specific client implementation to access any chain-specific rpc calls. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SelectNodeRPC() (rpc RPC_CLIENT, err error) { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { n, err := c.selectNode() if err != nil { return rpc, err } return n.RPC(), nil - } // selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() @@ -262,8 +240,8 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP c.activeNode = c.nodeSelector.Select() if c.activeNode == nil { - c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) - errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) + c.lggr.Criticalw("No live RPC primaryNodes available", "NodeSelectionMode", c.nodeSelector.Name()) + errmsg := fmt.Errorf("no live primaryNodes available for chain %s", c.chainID.String()) c.SvcErrBuffer.Append(errmsg) err = ErroringNodeError } @@ -273,30 +251,30 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP // nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. // totalDifficulty will be 0 if all nodes return nil. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { totalDifficulty = big.NewInt(0) - for _, n := range c.nodes { - if s, num, td := n.StateAndLatest(); s == nodeStateAlive { + for _, n := range c.primaryNodes { + if s, chainInfo := n.StateAndLatest(); s == nodeStateAlive { nLiveNodes++ - if num > blockNumber { - blockNumber = num + if chainInfo.BlockNumber > blockNumber { + blockNumber = chainInfo.BlockNumber } - if td != nil && td.Cmp(totalDifficulty) > 0 { - totalDifficulty = td + if chainInfo.BlockDifficulty != nil && chainInfo.BlockDifficulty.Cmp(totalDifficulty) > 0 { + totalDifficulty = chainInfo.BlockDifficulty } } } return } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) checkLease() { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLease() { bestNode := c.nodeSelector.Select() - for _, n := range c.nodes { + for _, n := range c.primaryNodes { // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription - if n.State() == nodeStateAlive && n != bestNode && n.SubscribersCount() > 1 { + if n.State() == nodeStateAlive && n != bestNode { c.lggr.Infof("Switching to best node from %q to %q", n.String(), bestNode.String()) - n.UnsubscribeAllExceptAliveLoop() + n.UnsubscribeAll() } } @@ -307,7 +285,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP c.activeMu.Unlock() } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) checkLeaseLoop() { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLeaseLoop() { defer c.wg.Done() c.leaseTicker = time.NewTicker(c.leaseDuration) defer c.leaseTicker.Stop() @@ -322,7 +300,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) runLoop() { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) runLoop() { defer c.wg.Done() c.report() @@ -340,7 +318,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } } -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) report() { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) report() { type nodeWithState struct { Node string State string @@ -348,8 +326,8 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP var total, dead int counts := make(map[nodeState]int) - nodeStates := make([]nodeWithState, len(c.nodes)) - for i, n := range c.nodes { + nodeStates := make([]nodeWithState, len(c.primaryNodes)) + for i, n := range c.primaryNodes { state := n.State() nodeStates[i] = nodeWithState{n.String(), state.String()} total++ @@ -373,465 +351,3 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) } } - -// ClientAPI methods -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) BalanceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (*big.Int, error) { - n, err := c.selectNode() - if err != nil { - return nil, err - } - return n.RPC().BalanceAt(ctx, account, blockNumber) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) BatchCallContext(ctx context.Context, b []BATCH_ELEM) error { - n, err := c.selectNode() - if err != nil { - return err - } - return n.RPC().BatchCallContext(ctx, b) -} - -// BatchCallContextAll calls BatchCallContext for every single node including -// sendonlys. -// CAUTION: This should only be used for mass re-transmitting transactions, it -// might have unexpected effects to use it for anything else. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) BatchCallContextAll(ctx context.Context, b []BATCH_ELEM) error { - var wg sync.WaitGroup - defer wg.Wait() - - main, selectionErr := c.selectNode() - var all []SendOnlyNode[CHAIN_ID, RPC_CLIENT] - for _, n := range c.nodes { - all = append(all, n) - } - all = append(all, c.sendonlys...) - for _, n := range all { - if n == main { - // main node is used at the end for the return value - continue - } - - if n.State() != nodeStateAlive { - continue - } - // Parallel call made to all other nodes with ignored return value - wg.Add(1) - go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { - defer wg.Done() - err := n.RPC().BatchCallContext(ctx, b) - if err != nil { - c.lggr.Debugw("Secondary node BatchCallContext failed", "err", err) - } else { - c.lggr.Trace("Secondary node BatchCallContext success") - } - }(n) - } - - if selectionErr != nil { - return selectionErr - } - return main.RPC().BatchCallContext(ctx, b) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) BlockByHash(ctx context.Context, hash BLOCK_HASH) (h HEAD, err error) { - n, err := c.selectNode() - if err != nil { - return h, err - } - return n.RPC().BlockByHash(ctx, hash) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) BlockByNumber(ctx context.Context, number *big.Int) (h HEAD, err error) { - n, err := c.selectNode() - if err != nil { - return h, err - } - return n.RPC().BlockByNumber(ctx, number) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - n, err := c.selectNode() - if err != nil { - return err - } - return n.RPC().CallContext(ctx, result, method, args...) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) CallContract( - ctx context.Context, - attempt interface{}, - blockNumber *big.Int, -) (rpcErr []byte, extractErr error) { - n, err := c.selectNode() - if err != nil { - return rpcErr, err - } - return n.RPC().CallContract(ctx, attempt, blockNumber) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) PendingCallContract( - ctx context.Context, - attempt interface{}, -) (rpcErr []byte, extractErr error) { - n, err := c.selectNode() - if err != nil { - return rpcErr, err - } - return n.RPC().PendingCallContract(ctx, attempt) -} - -// ChainID makes a direct RPC call. In most cases it should be better to use the configured chain id instead by -// calling ConfiguredChainID. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) ChainID(ctx context.Context) (id CHAIN_ID, err error) { - n, err := c.selectNode() - if err != nil { - return id, err - } - return n.RPC().ChainID(ctx) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) ChainType() config.ChainType { - return c.chainType -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) (code []byte, err error) { - n, err := c.selectNode() - if err != nil { - return code, err - } - return n.RPC().CodeAt(ctx, account, blockNumber) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) ConfiguredChainID() CHAIN_ID { - return c.chainID -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) EstimateGas(ctx context.Context, call any) (gas uint64, err error) { - n, err := c.selectNode() - if err != nil { - return gas, err - } - return n.RPC().EstimateGas(ctx, call) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) FilterEvents(ctx context.Context, query EVENT_OPS) (e []EVENT, err error) { - n, err := c.selectNode() - if err != nil { - return e, err - } - return n.RPC().FilterEvents(ctx, query) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) IsL2() bool { - return c.ChainType().IsL2() -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) LatestBlockHeight(ctx context.Context) (h *big.Int, err error) { - n, err := c.selectNode() - if err != nil { - return h, err - } - return n.RPC().LatestBlockHeight(ctx) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) LINKBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (b *assets.Link, err error) { - n, err := c.selectNode() - if err != nil { - return b, err - } - return n.RPC().LINKBalance(ctx, accountAddress, linkAddress) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) NodeStates() (states map[string]string) { - states = make(map[string]string) - for _, n := range c.nodes { - states[n.Name()] = n.State().String() - } - for _, s := range c.sendonlys { - states[s.Name()] = s.State().String() - } - return -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) PendingSequenceAt(ctx context.Context, addr ADDR) (s SEQ, err error) { - n, err := c.selectNode() - if err != nil { - return s, err - } - return n.RPC().PendingSequenceAt(ctx, addr) -} - -type sendTxErrors map[SendTxReturnCode][]error - -// String - returns string representation of the errors map. Required by logger to properly represent the value -func (errs sendTxErrors) String() string { - return fmt.Sprint(map[SendTxReturnCode][]error(errs)) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SendEmptyTransaction( - ctx context.Context, - newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error), - seq SEQ, - gasLimit uint32, - fee FEE, - fromAddress ADDR, -) (txhash string, err error) { - n, err := c.selectNode() - if err != nil { - return txhash, err - } - return n.RPC().SendEmptyTransaction(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) -} - -type sendTxResult struct { - Err error - ResultCode SendTxReturnCode -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) broadcastTxAsync(ctx context.Context, - n SendOnlyNode[CHAIN_ID, RPC_CLIENT], tx TX) sendTxResult { - txErr := n.RPC().SendTransaction(ctx, tx) - c.lggr.Debugw("Node sent transaction", "name", n.String(), "tx", tx, "err", txErr) - resultCode := c.classifySendTxError(tx, txErr) - if !slices.Contains(sendTxSuccessfulCodes, resultCode) { - c.lggr.Warnw("RPC returned error", "name", n.String(), "tx", tx, "err", txErr) - } - - return sendTxResult{Err: txErr, ResultCode: resultCode} -} - -// collectTxResults - refer to SendTransaction comment for implementation details, -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) collectTxResults(ctx context.Context, tx TX, healthyNodesNum int, txResults <-chan sendTxResult) error { - if healthyNodesNum == 0 { - return ErroringNodeError - } - // combine context and stop channel to ensure we stop, when signal received - ctx, cancel := c.chStop.Ctx(ctx) - defer cancel() - requiredResults := int(math.Ceil(float64(healthyNodesNum) * sendTxQuorum)) - errorsByCode := sendTxErrors{} - var softTimeoutChan <-chan time.Time - var resultsCount int -loop: - for { - select { - case <-ctx.Done(): - c.lggr.Debugw("Failed to collect of the results before context was done", "tx", tx, "errorsByCode", errorsByCode) - return ctx.Err() - case result := <-txResults: - errorsByCode[result.ResultCode] = append(errorsByCode[result.ResultCode], result.Err) - resultsCount++ - if slices.Contains(sendTxSuccessfulCodes, result.ResultCode) || resultsCount >= requiredResults { - break loop - } - case <-softTimeoutChan: - c.lggr.Debugw("Send Tx soft timeout expired - returning responses we've collected so far", "tx", tx, "resultsCount", resultsCount, "requiredResults", requiredResults) - break loop - } - - if softTimeoutChan == nil { - tm := time.NewTimer(c.sendTxSoftTimeout) - softTimeoutChan = tm.C - // we are fine with stopping timer at the end of function - //nolint - defer tm.Stop() - } - } - - // ignore critical error as it's reported in reportSendTxAnomalies - result, _ := aggregateTxResults(errorsByCode) - return result - -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) reportSendTxAnomalies(tx TX, txResults <-chan sendTxResult) { - defer c.wg.Done() - resultsByCode := sendTxErrors{} - // txResults eventually will be closed - for txResult := range txResults { - resultsByCode[txResult.ResultCode] = append(resultsByCode[txResult.ResultCode], txResult.Err) - } - - _, criticalErr := aggregateTxResults(resultsByCode) - if criticalErr != nil { - c.lggr.Criticalw("observed invariant violation on SendTransaction", "tx", tx, "resultsByCode", resultsByCode, "err", criticalErr) - c.SvcErrBuffer.Append(criticalErr) - PromMultiNodeInvariantViolations.WithLabelValues(c.chainFamily, c.chainID.String(), criticalErr.Error()).Inc() - } -} - -func aggregateTxResults(resultsByCode sendTxErrors) (txResult error, err error) { - severeErrors, hasSevereErrors := findFirstIn(resultsByCode, sendTxSevereErrors) - successResults, hasSuccess := findFirstIn(resultsByCode, sendTxSuccessfulCodes) - if hasSuccess { - // We assume that primary node would never report false positive txResult for a transaction. - // Thus, if such case occurs it's probably due to misconfiguration or a bug and requires manual intervention. - if hasSevereErrors { - const errMsg = "found contradictions in nodes replies on SendTransaction: got success and severe error" - // return success, since at least 1 node has accepted our broadcasted Tx, and thus it can now be included onchain - return successResults[0], fmt.Errorf(errMsg) - } - - // other errors are temporary - we are safe to return success - return successResults[0], nil - } - - if hasSevereErrors { - return severeErrors[0], nil - } - - // return temporary error - for _, result := range resultsByCode { - return result[0], nil - } - - err = fmt.Errorf("expected at least one response on SendTransaction") - return err, err -} - -const sendTxQuorum = 0.7 - -// SendTransaction - broadcasts transaction to all the send-only and primary nodes regardless of their health. -// A returned nil or error does not guarantee that the transaction will or won't be included. Additional checks must be -// performed to determine the final state. -// -// Send-only nodes' results are ignored as they tend to return false-positive responses. Broadcast to them is necessary -// to speed up the propagation of TX in the network. -// -// Handling of primary nodes' results consists of collection and aggregation. -// In the collection step, we gather as many results as possible while minimizing waiting time. This operation succeeds -// on one of the following conditions: -// * Received at least one success -// * Received at least one result and `sendTxSoftTimeout` expired -// * Received results from the sufficient number of nodes defined by sendTxQuorum. -// The aggregation is based on the following conditions: -// * If there is at least one success - returns success -// * If there is at least one terminal error - returns terminal error -// * If there is both success and terminal error - returns success and reports invariant violation -// * Otherwise, returns any (effectively random) of the errors. -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SendTransaction(ctx context.Context, tx TX) error { - if len(c.nodes) == 0 { - return ErroringNodeError - } - - healthyNodesNum := 0 - txResults := make(chan sendTxResult, len(c.nodes)) - // Must wrap inside IfNotStopped to avoid waitgroup racing with Close - ok := c.IfNotStopped(func() { - // fire-n-forget, as sendOnlyNodes can not be trusted with result reporting - for _, n := range c.sendonlys { - if n.State() != nodeStateAlive { - continue - } - c.wg.Add(1) - go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { - defer c.wg.Done() - c.broadcastTxAsync(ctx, n, tx) - }(n) - } - - var primaryBroadcastWg sync.WaitGroup - txResultsToReport := make(chan sendTxResult, len(c.nodes)) - for _, n := range c.nodes { - if n.State() != nodeStateAlive { - continue - } - - healthyNodesNum++ - primaryBroadcastWg.Add(1) - go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { - defer primaryBroadcastWg.Done() - result := c.broadcastTxAsync(ctx, n, tx) - // both channels are sufficiently buffered, so we won't be locked - txResultsToReport <- result - txResults <- result - }(n) - } - - c.wg.Add(1) - go func() { - // wait for primary nodes to finish the broadcast before closing the channel - primaryBroadcastWg.Wait() - close(txResultsToReport) - close(txResults) - c.wg.Done() - }() - - c.wg.Add(1) - go c.reportSendTxAnomalies(tx, txResultsToReport) - - }) - if !ok { - return fmt.Errorf("aborted while broadcasting tx - multiNode is stopped: %w", context.Canceled) - } - - return c.collectTxResults(ctx, tx, healthyNodesNum, txResults) -} - -// findFirstIn - returns first existing value for the slice of keys -func findFirstIn[K comparable, V any](set map[K]V, keys []K) (V, bool) { - for _, k := range keys { - if v, ok := set[k]; ok { - return v, true - } - } - var v V - return v, false -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SequenceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (s SEQ, err error) { - n, err := c.selectNode() - if err != nil { - return s, err - } - return n.RPC().SequenceAt(ctx, account, blockNumber) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) SimulateTransaction(ctx context.Context, tx TX) error { - n, err := c.selectNode() - if err != nil { - return err - } - return n.RPC().SimulateTransaction(ctx, tx) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (s types.Subscription, err error) { - n, err := c.selectNode() - if err != nil { - return s, err - } - return n.RPC().Subscribe(ctx, channel, args...) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) { - n, err := c.selectNode() - if err != nil { - return b, err - } - return n.RPC().TokenBalance(ctx, account, tokenAddr) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TransactionByHash(ctx context.Context, txHash TX_HASH) (tx TX, err error) { - n, err := c.selectNode() - if err != nil { - return tx, err - } - return n.RPC().TransactionByHash(ctx, txHash) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) TransactionReceipt(ctx context.Context, txHash TX_HASH) (txr TX_RECEIPT, err error) { - n, err := c.selectNode() - if err != nil { - return txr, err - } - return n.RPC().TransactionReceipt(ctx, txHash) -} - -func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) LatestFinalizedBlock(ctx context.Context) (head HEAD, err error) { - n, err := c.selectNode() - if err != nil { - return head, err - } - - return n.RPC().LatestFinalizedBlock(ctx) -} diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 9f6904fcaf2..20945b254f3 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -17,30 +17,23 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink/v2/common/config" "github.com/smartcontractkit/chainlink/v2/common/types" ) -type multiNodeRPCClient RPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], any] +type multiNodeRPCClient RPCClient[types.ID, types.Head[Hashable]] type testMultiNode struct { - *multiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient, any] + *multiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient] } type multiNodeOpts struct { - logger logger.Logger - selectionMode string - leaseDuration time.Duration - noNewHeadsThreshold time.Duration - nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] - sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] - chainID types.ID - chainType config.ChainType - chainFamily string - classifySendTxError func(tx any, err error) SendTxReturnCode - sendTxSoftTimeout time.Duration + logger logger.Logger + selectionMode string + leaseDuration time.Duration + nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + sendonlys []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + chainID types.ID + chainFamily string } func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { @@ -48,13 +41,10 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { opts.logger = logger.Test(t) } - result := NewMultiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient, any](opts.logger, - opts.selectionMode, opts.leaseDuration, opts.noNewHeadsThreshold, opts.nodes, opts.sendonlys, - opts.chainID, opts.chainType, opts.chainFamily, opts.classifySendTxError, opts.sendTxSoftTimeout) + result := NewMultiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient]( + opts.logger, opts.selectionMode, opts.leaseDuration, opts.nodes, opts.sendonlys, opts.chainID, opts.chainFamily) return testMultiNode{ - result.(*multiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient, any]), + result.(*multiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient]), } } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 4707a60426f..e2dfd0c4e81 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "github.com/smartcontractkit/chainlink/v2/common/types" "math" "math/big" "time" @@ -99,8 +100,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) lggr.Tracew("Alive loop starting", "nodeState", n.State()) - headsC := make(chan HEAD) - sub, err := n.rpc.Subscribe(n.nodeCtx, headsC, rpcSubscriptionMethodNewHeads) + headsC, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) n.declareUnreachable() @@ -108,7 +108,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll // falsely transition this node to unreachable state - n.rpc.SetAliveLoopSub(sub) + // TODO: Do we need this SetAliveLoopSub??? + //TODO: Delete this?: n.rpc.SetAliveLoopSub(sub) defer sub.Unsubscribe() var outOfSyncT *time.Ticker @@ -138,15 +139,21 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Debug("Polling disabled") } - var pollFinalizedHeadCh <-chan time.Time + var finalizedHeadCh <-chan HEAD + var finalizedHeadSub types.Subscription if n.chainCfg.FinalityTagEnabled() && n.nodePoolCfg.FinalizedBlockPollInterval() > 0 { lggr.Debugw("Finalized block polling enabled") - pollT := time.NewTicker(n.nodePoolCfg.FinalizedBlockPollInterval()) - defer pollT.Stop() - pollFinalizedHeadCh = pollT.C + finalizedHeadCh, finalizedHeadSub, err = n.rpc.SubscribeToFinalizedHeads(n.nodeCtx) + if err != nil { + lggr.Errorw("Failed to subscribe to finalized heads", "err", err) + n.declareUnreachable() + return + } + defer finalizedHeadSub.Unsubscribe() } - _, highestReceivedBlockNumber, _ := n.StateAndLatest() + _, chainInfo := n.StateAndLatest() + highestReceivedBlockNumber := chainInfo.BlockNumber var pollFailures uint32 for { @@ -154,12 +161,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-n.nodeCtx.Done(): return case <-pollCh: - var version string - promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) - version, err := n.RPC().ClientVersion(ctx) + err := n.RPC().Ping(ctx) cancel() + + promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) + if err != nil { // prevent overflow if pollFailures < math.MaxUint32 { @@ -168,7 +176,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) } else { - lggr.Debugw("Version poll successful", "nodeState", n.State(), "clientVersion", version) + lggr.Debugw("Ping successful", "nodeState", n.State()) promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures = 0 } @@ -183,10 +191,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - _, num, td := n.StateAndLatest() - if outOfSync, liveNodes := n.syncStatus(num, td); outOfSync { + _, chainInfo := n.StateAndLatest() + if outOfSync, liveNodes := n.syncStatus(chainInfo.BlockNumber, chainInfo.BlockDifficulty); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", chainInfo.BlockNumber, "totalDifficulty", chainInfo.BlockDifficulty, "nodeState", n.State()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue @@ -239,15 +247,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < highestReceivedBlockNumber }) return - case <-pollFinalizedHeadCh: - ctx, cancel := context.WithTimeout(n.nodeCtx, n.nodePoolCfg.FinalizedBlockPollInterval()) - latestFinalized, err := n.RPC().LatestFinalizedBlock(ctx) - cancel() - if err != nil { - lggr.Warnw("Failed to fetch latest finalized block", "err", err) - continue - } - + case latestFinalized := <-finalizedHeadCh: if !latestFinalized.IsValid() { lggr.Warn("Latest finalized block is not valid") continue @@ -328,8 +328,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) - ch := make(chan HEAD) - sub, err := n.rpc.Subscribe(n.nodeCtx, ch, rpcSubscriptionMethodNewHeads) + ch, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) n.declareUnreachable() diff --git a/common/client/node_selector.go b/common/client/node_selector.go index 45604ebe8d9..f928dabca6f 100644 --- a/common/client/node_selector.go +++ b/common/client/node_selector.go @@ -17,7 +17,7 @@ const ( type NodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] interface { // Select returns a Node, or nil if none can be selected. // Implementation must be thread-safe. @@ -29,7 +29,7 @@ type NodeSelector[ func newNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](selectionMode string, nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { switch selectionMode { case NodeSelectionModeHighestHead: diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index 99a130004a9..b341d91b5ef 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -9,13 +9,13 @@ import ( type highestHeadNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] []Node[CHAIN_ID, HEAD, RPC] func NewHighestHeadNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return highestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) } @@ -24,7 +24,8 @@ func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HE var highestHeadNumber int64 = math.MinInt64 var highestHeadNodes []Node[CHAIN_ID, HEAD, RPC] for _, n := range s { - state, currentHeadNumber, _ := n.StateAndLatest() + state, chainInfo := n.StateAndLatest() + currentHeadNumber := chainInfo.BlockNumber if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber { if highestHeadNumber < currentHeadNumber { highestHeadNumber = currentHeadNumber diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index 6e47bbedcae..b8b0296f181 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -9,14 +9,14 @@ import ( ) func TestHighestHeadNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) + selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead) } func TestHighestHeadNodeSelector(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go index 45cc62de077..e137932479a 100644 --- a/common/client/node_selector_priority_level.go +++ b/common/client/node_selector_priority_level.go @@ -11,7 +11,7 @@ import ( type priorityLevelNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] struct { nodes []Node[CHAIN_ID, HEAD, RPC] roundRobinCount []atomic.Uint32 @@ -20,7 +20,7 @@ type priorityLevelNodeSelector[ type nodeWithPriority[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] struct { node Node[CHAIN_ID, HEAD, RPC] priority int32 @@ -29,7 +29,7 @@ type nodeWithPriority[ func NewPriorityLevelNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return &priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]{ nodes: nodes, @@ -77,7 +77,7 @@ func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveT func removeLowerTiers[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC]) []nodeWithPriority[CHAIN_ID, HEAD, RPC] { sort.SliceStable(nodes, func(i, j int) bool { return nodes[i].priority > nodes[j].priority @@ -99,7 +99,7 @@ func removeLowerTiers[ func nrOfPriorityTiers[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) int32 { highestPriority := int32(0) for _, n := range nodes { @@ -115,7 +115,7 @@ func nrOfPriorityTiers[ func firstOrHighestPriority[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) Node[CHAIN_ID, HEAD, RPC] { hp := int32(math.MaxInt32) var node Node[CHAIN_ID, HEAD, RPC] diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go index 5cdad7f52ee..8b5c1bc8b0f 100644 --- a/common/client/node_selector_round_robin.go +++ b/common/client/node_selector_round_robin.go @@ -9,7 +9,7 @@ import ( type roundRobinSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] struct { nodes []Node[CHAIN_ID, HEAD, RPC] roundRobinCount atomic.Uint32 @@ -18,7 +18,7 @@ type roundRobinSelector[ func NewRoundRobinSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return &roundRobinSelector[CHAIN_ID, HEAD, RPC]{ nodes: nodes, diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go index 35491503bcc..a0e1dce5335 100644 --- a/common/client/node_selector_total_difficulty.go +++ b/common/client/node_selector_total_difficulty.go @@ -9,13 +9,13 @@ import ( type totalDifficultyNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ] []Node[CHAIN_ID, HEAD, RPC] func NewTotalDifficultyNodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC NodeClient[CHAIN_ID, HEAD], + RPC RPCClient[CHAIN_ID, HEAD], ](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) } @@ -27,11 +27,13 @@ func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID var aliveNodes []Node[CHAIN_ID, HEAD, RPC] for _, n := range s { - state, _, currentTD := n.StateAndLatest() + state, chainInfo := n.StateAndLatest() if state != nodeStateAlive { continue } + currentTD := chainInfo.BlockDifficulty + aliveNodes = append(aliveNodes, n) if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) { if highestTD == nil || currentTD.Cmp(highestTD) > 0 { diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 3ee10a600da..5a9fb7bbc89 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -10,14 +10,13 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" + ethrpc "github.com/ethereum/go-ethereum/rpc" commonassets "github.com/smartcontractkit/chainlink-common/pkg/assets" "github.com/smartcontractkit/chainlink-common/pkg/logger" commonclient "github.com/smartcontractkit/chainlink/v2/common/client" "github.com/smartcontractkit/chainlink/v2/common/config" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) @@ -27,18 +26,9 @@ var _ Client = (*chainClient)(nil) type chainClient struct { multiNode commonclient.MultiNode[ *big.Int, - evmtypes.Nonce, - common.Address, common.Hash, - *types.Transaction, - common.Hash, - types.Log, - ethereum.FilterQuery, - *evmtypes.Receipt, - *assets.Wei, *evmtypes.Head, - RPCClient, - rpc.BatchElem, + *RpcClient, ] logger logger.SugaredLogger chainType config.ChainType @@ -50,8 +40,8 @@ func NewChainClient( selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, - nodes []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient], - sendonlys []commonclient.SendOnlyNode[*big.Int, RPCClient], + nodes []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient], + sendonlys []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient], chainID *big.Int, chainType config.ChainType, clientErrors evmconfig.ClientErrors, @@ -60,16 +50,10 @@ func NewChainClient( lggr, selectionMode, leaseDuration, - noNewHeadsThreshold, nodes, sendonlys, chainID, - chainType, "EVM", - func(tx *types.Transaction, err error) commonclient.SendTxReturnCode { - return ClassifySendError(err, clientErrors, logger.Sugared(logger.Nop()), tx, common.Address{}, chainType.IsL2()) - }, - 0, // use the default value provided by the implementation ) return &chainClient{ multiNode: multiNode, @@ -79,24 +63,37 @@ func NewChainClient( } func (c *chainClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { - return c.multiNode.BalanceAt(ctx, account, blockNumber) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.BalanceAt(ctx, account, blockNumber) } // Request specific errors for batch calls are returned to the individual BatchElem. // Ensure the same BatchElem slice provided by the caller is passed through the call stack // to ensure the caller has access to the errors. -func (c *chainClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { - return c.multiNode.BatchCallContext(ctx, b) +func (c *chainClient) BatchCallContext(ctx context.Context, b []ethrpc.BatchElem) error { + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return err + } + return rpc.BatchCallContext(ctx, b) } // Similar to BatchCallContext, ensure the provided BatchElem slice is passed through -func (c *chainClient) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { - return c.multiNode.BatchCallContextAll(ctx, b) +func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchElem) error { + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return err + } + // TODO: What should we do here? c.multiNode.DoAll()? + return rpc.BatchCallContextAll(ctx, b) } // TODO-1663: return custom Block type instead of geth's once client.go is deprecated. func (c *chainClient) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Block, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return b, err } @@ -105,7 +102,7 @@ func (c *chainClient) BlockByHash(ctx context.Context, hash common.Hash) (b *typ // TODO-1663: return custom Block type instead of geth's once client.go is deprecated. func (c *chainClient) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Block, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return b, err } @@ -113,48 +110,83 @@ func (c *chainClient) BlockByNumber(ctx context.Context, number *big.Int) (b *ty } func (c *chainClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - return c.multiNode.CallContext(ctx, result, method, args...) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return err + } + return rpc.CallContext(ctx, result, method, args...) } func (c *chainClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - return c.multiNode.CallContract(ctx, msg, blockNumber) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.CallContract(ctx, msg, blockNumber) } func (c *chainClient) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { - return c.multiNode.PendingCallContract(ctx, msg) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.PendingCallContract(ctx, msg) } // TODO-1663: change this to actual ChainID() call once client.go is deprecated. func (c *chainClient) ChainID() (*big.Int, error) { - //return c.multiNode.ChainID(ctx), nil - return c.multiNode.ConfiguredChainID(), nil + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.chainID, nil } func (c *chainClient) Close() { - c.multiNode.Close() + _ = c.multiNode.Close() } func (c *chainClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { - return c.multiNode.CodeAt(ctx, account, blockNumber) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.CodeAt(ctx, account, blockNumber) } func (c *chainClient) ConfiguredChainID() *big.Int { - return c.multiNode.ConfiguredChainID() + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil + } + return rpc.chainID } func (c *chainClient) Dial(ctx context.Context) error { - return c.multiNode.Dial(ctx) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return err + } + return rpc.Dial(ctx) } func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - return c.multiNode.EstimateGas(ctx, call) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.EstimateGas(ctx, call) } func (c *chainClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - return c.multiNode.FilterEvents(ctx, q) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.FilterEvents(ctx, q) } func (c *chainClient) HeaderByHash(ctx context.Context, h common.Hash) (head *types.Header, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return head, err } @@ -162,7 +194,7 @@ func (c *chainClient) HeaderByHash(ctx context.Context, h common.Hash) (head *ty } func (c *chainClient) HeaderByNumber(ctx context.Context, n *big.Int) (head *types.Header, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return head, err } @@ -170,31 +202,49 @@ func (c *chainClient) HeaderByNumber(ctx context.Context, n *big.Int) (head *typ } func (c *chainClient) HeadByHash(ctx context.Context, h common.Hash) (*evmtypes.Head, error) { - return c.multiNode.BlockByHash(ctx, h) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.BlockByHash(ctx, h) } func (c *chainClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { - return c.multiNode.BlockByNumber(ctx, n) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.BlockByNumber(ctx, n) } func (c *chainClient) IsL2() bool { + // TODO: Where should this come from? return c.multiNode.IsL2() } func (c *chainClient) LINKBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*commonassets.Link, error) { - return c.multiNode.LINKBalance(ctx, address, linkAddress) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.LINKBalance(ctx, address, linkAddress) } func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { - return c.multiNode.LatestBlockHeight(ctx) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, err + } + return rpc.LatestBlockHeight(ctx) } func (c *chainClient) NodeStates() map[string]string { + // TODO: Should nodeState be public and returned here? return c.multiNode.NodeStates() } func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return b, err } @@ -203,12 +253,20 @@ func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) // TODO-1663: change this to evmtypes.Nonce(int64) once client.go is deprecated. func (c *chainClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - n, err := c.multiNode.PendingSequenceAt(ctx, account) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return b, err + } + n, err := rpc.PendingSequenceAt(ctx, account) return uint64(n), err } func (c *chainClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { - return c.multiNode.SendTransaction(ctx, tx) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return err + } + return rpc.SendTransaction(ctx, tx) } func (c *chainClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) { @@ -218,18 +276,29 @@ func (c *chainClient) SendTransactionReturnCode(ctx context.Context, tx *types.T } func (c *chainClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { - return c.multiNode.SequenceAt(ctx, account, blockNumber) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return b, err + } + return rpc.SequenceAt(ctx, account, blockNumber) } func (c *chainClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (s ethereum.Subscription, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return s, err } return rpc.SubscribeFilterLogs(ctx, q, ch) } -func (c *chainClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { +func (c *chainClient) SubscribeNewHead(ctx context.Context) (chan<- *evmtypes.Head, ethereum.Subscription, error) { + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return nil, nil, err + } + + // TODO: Implement this + rpc.SubscribeToHeads(ctx) csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) err := csf.start(c.multiNode.Subscribe(ctx, csf.srcCh, "newHeads")) if err != nil { @@ -239,7 +308,7 @@ func (c *chainClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes. } func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return p, err } @@ -247,7 +316,7 @@ func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err erro } func (c *chainClient) SuggestGasTipCap(ctx context.Context) (t *big.Int, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return t, err } @@ -255,16 +324,24 @@ func (c *chainClient) SuggestGasTipCap(ctx context.Context) (t *big.Int, err err } func (c *chainClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { - return c.multiNode.TokenBalance(ctx, address, contractAddress) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return p, err + } + return rpc.TokenBalance(ctx, address, contractAddress) } func (c *chainClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { - return c.multiNode.TransactionByHash(ctx, txHash) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return p, err + } + return rpc.TransactionByHash(ctx, txHash) } // TODO-1663: return custom Receipt type instead of geth's once client.go is deprecated. func (c *chainClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error) { - rpc, err := c.multiNode.SelectNodeRPC() + rpc, err := c.multiNode.SelectRPC() if err != nil { return r, err } @@ -273,7 +350,11 @@ func (c *chainClient) TransactionReceipt(ctx context.Context, txHash common.Hash } func (c *chainClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { - return c.multiNode.LatestFinalizedBlock(ctx) + rpc, err := c.multiNode.SelectRPC() + if err != nil { + return p, err + } + return rpc.LatestFinalizedBlock(ctx) } func (c *chainClient) CheckTxValidity(ctx context.Context, from common.Address, to common.Address, data []byte) *SendError { From 6813259617b2bf896f721f852a07f15d5f6280b5 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 15 May 2024 09:04:19 -0400 Subject: [PATCH 03/58] Fix build + generate --- common/client/mock_node_selector_test.go | 4 +- common/client/mock_node_test.go | 77 +++----- common/client/mocks/rpc_client.go | 218 +++++++++++++++++++++++ common/client/multi_node.go | 7 + common/client/types.go | 2 +- common/headtracker/head_listener.go | 7 +- common/headtracker/types/client.go | 2 +- core/chains/evm/client/chain_client.go | 43 ++--- core/chains/evm/client/client.go | 9 +- core/chains/evm/client/evm_client.go | 23 ++- core/chains/evm/client/mocks/client.go | 37 ++-- core/chains/evm/client/null_client.go | 4 +- core/chains/evm/client/rpc_client.go | 2 +- 13 files changed, 319 insertions(+), 116 deletions(-) create mode 100644 common/client/mocks/rpc_client.go diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go index 996d064daa4..b645d9e69ba 100644 --- a/common/client/mock_node_selector_test.go +++ b/common/client/mock_node_selector_test.go @@ -8,7 +8,7 @@ import ( ) // mockNodeSelector is an autogenerated mock type for the NodeSelector type -type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct { +type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD]] struct { mock.Mock } @@ -52,7 +52,7 @@ func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, R // newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface { +func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD]](t interface { mock.TestingT Cleanup(func()) }) *mockNodeSelector[CHAIN_ID, HEAD, RPC] { diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index ee2cacb9274..a7bd79e29a9 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -4,20 +4,18 @@ package client import ( context "context" - big "math/big" - - mock "github.com/stretchr/testify/mock" types "github.com/smartcontractkit/chainlink/v2/common/types" + mock "github.com/stretchr/testify/mock" ) // mockNode is an autogenerated mock type for the Node type -type mockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct { +type mockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT RPCClient[CHAIN_ID, HEAD]] struct { mock.Mock } // Close provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Close() error { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { ret := _m.Called() if len(ret) == 0 { @@ -35,7 +33,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Close() error { } // ConfiguredChainID provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() CHAIN_ID { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID { ret := _m.Called() if len(ret) == 0 { @@ -53,7 +51,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() CHAIN_ID { } // Name provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Name() string { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { ret := _m.Called() if len(ret) == 0 { @@ -71,7 +69,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Name() string { } // Order provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Order() int32 { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Order() int32 { ret := _m.Called() if len(ret) == 0 { @@ -89,25 +87,25 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Order() int32 { } // RPC provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) RPC() RPC { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for RPC") } - var r0 RPC - if rf, ok := ret.Get(0).(func() RPC); ok { + var r0 RPC_CLIENT + if rf, ok := ret.Get(0).(func() RPC_CLIENT); ok { r0 = rf() } else { - r0 = ret.Get(0).(RPC) + r0 = ret.Get(0).(RPC_CLIENT) } return r0 } // Start provides a mock function with given fields: _a0 -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Start(_a0 context.Context) error { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Start(_a0 context.Context) error { ret := _m.Called(_a0) if len(ret) == 0 { @@ -125,7 +123,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Start(_a0 context.Context) error { } // State provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) State() nodeState { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) State() nodeState { ret := _m.Called() if len(ret) == 0 { @@ -143,7 +141,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) State() nodeState { } // StateAndLatest provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) StateAndLatest() (nodeState, ChainInfo) { ret := _m.Called() if len(ret) == 0 { @@ -151,9 +149,8 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *bi } var r0 nodeState - var r1 int64 - var r2 *big.Int - if rf, ok := ret.Get(0).(func() (nodeState, int64, *big.Int)); ok { + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (nodeState, ChainInfo)); ok { return rf() } if rf, ok := ret.Get(0).(func() nodeState); ok { @@ -162,25 +159,17 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *bi r0 = ret.Get(0).(nodeState) } - if rf, ok := ret.Get(1).(func() int64); ok { + if rf, ok := ret.Get(1).(func() ChainInfo); ok { r1 = rf() } else { - r1 = ret.Get(1).(int64) - } - - if rf, ok := ret.Get(2).(func() *big.Int); ok { - r2 = rf() - } else { - if ret.Get(2) != nil { - r2 = ret.Get(2).(*big.Int) - } + r1 = ret.Get(1).(ChainInfo) } - return r0, r1, r2 + return r0, r1 } // String provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) String() string { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { ret := _m.Called() if len(ret) == 0 { @@ -197,36 +186,18 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC]) String() string { return r0 } -// SubscribersCount provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for SubscribersCount") - } - - var r0 int32 - if rf, ok := ret.Get(0).(func() int32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int32) - } - - return r0 -} - -// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { +// UnsubscribeAll provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { _m.Called() } // newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface { +func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT RPCClient[CHAIN_ID, HEAD]](t interface { mock.TestingT Cleanup(func()) -}) *mockNode[CHAIN_ID, HEAD, RPC] { - mock := &mockNode[CHAIN_ID, HEAD, RPC]{} +}) *mockNode[CHAIN_ID, HEAD, RPC_CLIENT] { + mock := &mockNode[CHAIN_ID, HEAD, RPC_CLIENT]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/common/client/mocks/rpc_client.go b/common/client/mocks/rpc_client.go new file mode 100644 index 00000000000..7ad29397b7a --- /dev/null +++ b/common/client/mocks/rpc_client.go @@ -0,0 +1,218 @@ +// Code generated by mockery v2.42.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "github.com/smartcontractkit/chainlink/v2/common/client" + + mock "github.com/stretchr/testify/mock" + + types "github.com/smartcontractkit/chainlink/v2/common/types" +) + +// RPCClient is an autogenerated mock type for the RPCClient type +type RPCClient[CHAIN_ID types.ID, HEAD client.Head] struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: ctx +func (_m *RPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *RPCClient[CHAIN_ID, HEAD]) Close() { + _m.Called() +} + +// Dial provides a mock function with given fields: ctx +func (_m *RPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// IsSyncing provides a mock function with given fields: ctx +func (_m *RPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsSyncing") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Ping provides a mock function with given fields: _a0 +func (_m *RPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Ping") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeToFinalizedHeads provides a mock function with given fields: ctx +func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (chan HEAD, types.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) chan HEAD); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan HEAD) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UnsubscribeAllExcept provides a mock function with given fields: subs +func (_m *RPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { + _va := make([]interface{}, len(subs)) + for _i := range subs { + _va[_i] = subs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + _m.Called(_ca...) +} + +// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCClient[CHAIN_ID types.ID, HEAD client.Head](t interface { + mock.TestingT + Cleanup(func()) +}) *RPCClient[CHAIN_ID, HEAD] { + mock := &RPCClient[CHAIN_ID, HEAD]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 7547ec4a05f..51d51f67646 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "github.com/smartcontractkit/chainlink/v2/common/config" "math/big" "sync" "time" @@ -47,6 +48,7 @@ type MultiNode[ DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error // NodeStates - returns RPCs' states NodeStates() map[string]nodeState + ChainType() config.ChainType Close() error } @@ -66,6 +68,7 @@ type multiNode[ nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] leaseDuration time.Duration leaseTicker *time.Ticker + chainType config.ChainType chainFamily string reportInterval time.Duration @@ -113,6 +116,10 @@ func NewMultiNode[ return c } +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) ChainType() config.ChainType { + return c.chainType +} + func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { runDo := func(nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], isSendOnly bool) error { for _, n := range nodes { diff --git a/common/client/types.go b/common/client/types.go index 2184ec27cac..ffbcf6f7679 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -22,7 +22,7 @@ type RPCClient[ // Dial - prepares the RPC for usage. Can be called on fresh or closed RPC Dial(ctx context.Context) error // SubscribeToHeads - returns channel and subscription for new heads. - SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) + SubscribeToHeads(ctx context.Context) (chan HEAD, types.Subscription, error) // SubscribeToFinalizedHeads - returns channel and subscription for finalized heads. SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) // Ping - returns error if RPC is not reachable diff --git a/common/headtracker/head_listener.go b/common/headtracker/head_listener.go index 15977c4dfe4..22ed5ecc284 100644 --- a/common/headtracker/head_listener.go +++ b/common/headtracker/head_listener.go @@ -58,7 +58,7 @@ type headListener[ client htrktypes.Client[HTH, S, ID, BLOCK_HASH] logger logger.Logger chStop services.StopChan - chHeaders chan HTH + chHeaders <-chan HTH headSubscription types.Subscription connected atomic.Bool receivingHeads atomic.Bool @@ -216,12 +216,9 @@ func (hl *headListener[HTH, S, ID, BLOCK_HASH]) subscribe(ctx context.Context) b } func (hl *headListener[HTH, S, ID, BLOCK_HASH]) subscribeToHead(ctx context.Context) error { - hl.chHeaders = make(chan HTH) - var err error - hl.headSubscription, err = hl.client.SubscribeNewHead(ctx, hl.chHeaders) + hl.chHeaders, hl.headSubscription, err = hl.client.SubscribeNewHead(ctx) if err != nil { - close(hl.chHeaders) return fmt.Errorf("Client#SubscribeNewHead: %w", err) } diff --git a/common/headtracker/types/client.go b/common/headtracker/types/client.go index a1e419809b5..b697c336f58 100644 --- a/common/headtracker/types/client.go +++ b/common/headtracker/types/client.go @@ -14,7 +14,7 @@ type Client[H types.Head[BLOCK_HASH], S types.Subscription, ID types.ID, BLOCK_H ConfiguredChainID() (id ID) // SubscribeNewHead is the method in which the client receives new Head. // It can be implemented differently for each chain i.e websocket, polling, etc - SubscribeNewHead(ctx context.Context, ch chan<- H) (S, error) + SubscribeNewHead(ctx context.Context) (<-chan H, S, error) // LatestFinalizedBlock - returns the latest block that was marked as finalized LatestFinalizedBlock(ctx context.Context) (head H, err error) } diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 5a9fb7bbc89..a0528ec5cc6 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -83,12 +83,13 @@ func (c *chainClient) BatchCallContext(ctx context.Context, b []ethrpc.BatchElem // Similar to BatchCallContext, ensure the provided BatchElem slice is passed through func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchElem) error { - rpc, err := c.multiNode.SelectRPC() - if err != nil { - return err + doFunc := func(ctx context.Context, rpc *RpcClient, isSendOnly bool) bool { + if err := rpc.BatchCallContext(ctx, b); err != nil { + return false + } + return true } - // TODO: What should we do here? c.multiNode.DoAll()? - return rpc.BatchCallContextAll(ctx, b) + return c.multiNode.DoAll(ctx, doFunc) } // TODO-1663: return custom Block type instead of geth's once client.go is deprecated. @@ -173,7 +174,7 @@ func (c *chainClient) Dial(ctx context.Context) error { func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return nil, err + return 0, err } return rpc.EstimateGas(ctx, call) } @@ -218,8 +219,7 @@ func (c *chainClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.H } func (c *chainClient) IsL2() bool { - // TODO: Where should this come from? - return c.multiNode.IsL2() + return c.multiNode.ChainType().IsL2() } func (c *chainClient) LINKBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*commonassets.Link, error) { @@ -239,8 +239,11 @@ func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { } func (c *chainClient) NodeStates() map[string]string { - // TODO: Should nodeState be public and returned here? - return c.multiNode.NodeStates() + nodeStates := make(map[string]string) + for k, v := range c.multiNode.NodeStates() { + nodeStates[k] = v.String() + } + return nodeStates } func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) { @@ -255,7 +258,7 @@ func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) func (c *chainClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return b, err + return 0, err } n, err := rpc.PendingSequenceAt(ctx, account) return uint64(n), err @@ -278,7 +281,7 @@ func (c *chainClient) SendTransactionReturnCode(ctx context.Context, tx *types.T func (c *chainClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return b, err + return 0, err } return rpc.SequenceAt(ctx, account, blockNumber) } @@ -291,20 +294,20 @@ func (c *chainClient) SubscribeFilterLogs(ctx context.Context, q ethereum.Filter return rpc.SubscribeFilterLogs(ctx, q, ch) } -func (c *chainClient) SubscribeNewHead(ctx context.Context) (chan<- *evmtypes.Head, ethereum.Subscription, error) { +func (c *chainClient) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { return nil, nil, err } // TODO: Implement this - rpc.SubscribeToHeads(ctx) + ch, sub, err := rpc.SubscribeToHeads(ctx) csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) - err := csf.start(c.multiNode.Subscribe(ctx, csf.srcCh, "newHeads")) + err = csf.start(sub, err) if err != nil { - return nil, err + return nil, nil, err } - return csf, nil + return ch, csf, nil } func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) { @@ -326,7 +329,7 @@ func (c *chainClient) SuggestGasTipCap(ctx context.Context) (t *big.Int, err err func (c *chainClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return p, err + return nil, err } return rpc.TokenBalance(ctx, address, contractAddress) } @@ -334,7 +337,7 @@ func (c *chainClient) TokenBalance(ctx context.Context, address common.Address, func (c *chainClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return p, err + return nil, err } return rpc.TransactionByHash(ctx, txHash) } @@ -352,7 +355,7 @@ func (c *chainClient) TransactionReceipt(ctx context.Context, txHash common.Hash func (c *chainClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { rpc, err := c.multiNode.SelectRPC() if err != nil { - return p, err + return nil, err } return rpc.LatestFinalizedBlock(ctx) } diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index 9628c74b9ab..785619bf721 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -62,7 +62,7 @@ type Client interface { // correct hash from the RPC response. HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error) - SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) + SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) LatestFinalizedBlock(ctx context.Context) (head *evmtypes.Head, err error) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) @@ -332,13 +332,14 @@ func (client *client) SubscribeFilterLogs(ctx context.Context, q ethereum.Filter return client.pool.SubscribeFilterLogs(ctx, q, ch) } -func (client *client) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { +func (client *client) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { + ch := make(chan *evmtypes.Head) csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) err := csf.start(client.pool.EthSubscribe(ctx, csf.srcCh, "newHeads")) if err != nil { - return nil, err + return nil, nil, err } - return csf, nil + return ch, csf, nil } func (client *client) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index acf11e43cd1..289aae781a6 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -14,22 +14,19 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node) Client { var empty url.URL - var primaries []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient] - var sendonlys []commonclient.SendOnlyNode[*big.Int, RPCClient] + var primaries []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient] + var sendonlys []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient] for i, node := range nodes { + rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, + commonclient.Secondary) + newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient](cfg, chainCfg, + lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, + rpc, "EVM") + if node.SendOnly != nil && *node.SendOnly { - rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary) - sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), - *node.Name, chainID, rpc) - sendonlys = append(sendonlys, sendonly) + sendonlys = append(sendonlys, newNode) } else { - rpc := NewRPCClient(cfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), - chainID, commonclient.Primary) - primaryNode := commonclient.NewNode(cfg, chainCfg, - lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, - rpc, "EVM") - primaries = append(primaries, primaryNode) + primaries = append(primaries, newNode) } } diff --git a/core/chains/evm/client/mocks/client.go b/core/chains/evm/client/mocks/client.go index 58d51526626..34299f1b393 100644 --- a/core/chains/evm/client/mocks/client.go +++ b/core/chains/evm/client/mocks/client.go @@ -829,34 +829,43 @@ func (_m *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuer return r0, r1 } -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *Client) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) +// SubscribeNewHead provides a mock function with given fields: ctx +func (_m *Client) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { + ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for SubscribeNewHead") } - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) + var r0 <-chan *evmtypes.Head + var r1 ethereum.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head) ethereum.Subscription); ok { - r0 = rf(ctx, ch) + if rf, ok := ret.Get(0).(func(context.Context) <-chan *evmtypes.Head); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) + r0 = ret.Get(0).(<-chan *evmtypes.Head) } } - if rf, ok := ret.Get(1).(func(context.Context, chan<- *evmtypes.Head) error); ok { - r1 = rf(ctx, ch) + if rf, ok := ret.Get(1).(func(context.Context) ethereum.Subscription); ok { + r1 = rf(ctx) } else { - r1 = ret.Error(1) + if ret.Get(1) != nil { + r1 = ret.Get(1).(ethereum.Subscription) + } } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // SuggestGasPrice provides a mock function with given fields: ctx diff --git a/core/chains/evm/client/null_client.go b/core/chains/evm/client/null_client.go index 3129bcff9b0..7615a0a68af 100644 --- a/core/chains/evm/client/null_client.go +++ b/core/chains/evm/client/null_client.go @@ -90,9 +90,9 @@ func (nc *NullClient) SubscribeFilterLogs(ctx context.Context, q ethereum.Filter return newNullSubscription(nc.lggr), nil } -func (nc *NullClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { +func (nc *NullClient) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { nc.lggr.Debug("SubscribeNewHead") - return newNullSubscription(nc.lggr), nil + return nil, newNullSubscription(nc.lggr), nil } // diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 71539d11039..10d9de05049 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -91,7 +91,7 @@ func NewRPCClient( return r } -func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { +func (r *RpcClient) SubscribeToHeads(ctx context.Context) (chan *evmtypes.Head, commontypes.Subscription, error) { channel := make(chan *evmtypes.Head) sub, err := r.Subscribe(ctx, channel) return channel, sub, err From 9e45475092b7e3cc23ab8ea6a8bcd5b57edc0733 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 16 May 2024 12:48:06 -0400 Subject: [PATCH 04/58] Udate multinode --- common/client/mock_node_selector_test.go | 4 +- common/client/mock_node_test.go | 4 +- .../rpc_client.go => mock_rpc_client_test.go} | 43 +++-- common/client/mock_send_only_client_test.go | 12 +- common/client/mock_send_only_node_test.go | 4 +- common/client/multi_node.go | 61 ++++---- common/client/multi_node_test.go | 49 +++--- common/client/node.go | 2 +- common/client/node_fsm_test.go | 26 ++-- common/client/node_lifecycle_test.go | 147 +++++++++--------- common/client/node_selector.go | 2 +- .../client/node_selector_highest_head_test.go | 44 +++--- .../node_selector_priority_level_test.go | 4 +- .../client/node_selector_round_robin_test.go | 6 +- common/client/node_selector_test.go | 2 +- .../node_selector_total_difficulty_test.go | 67 +++++--- common/client/node_test.go | 8 +- common/client/send_only_node.go | 6 +- common/client/types.go | 4 +- core/chains/evm/client/chain_client.go | 38 ++++- core/chains/evm/client/chain_id_sub.go | 13 +- core/chains/evm/client/chain_id_sub_test.go | 16 +- core/chains/evm/client/client.go | 6 +- core/chains/evm/client/evm_client.go | 2 +- core/chains/evm/client/helpers_test.go | 21 +-- core/chains/evm/client/rpc_client.go | 2 +- 26 files changed, 329 insertions(+), 264 deletions(-) rename common/client/{mocks/rpc_client.go => mock_rpc_client_test.go} (72%) diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go index b645d9e69ba..f068af84a1d 100644 --- a/common/client/mock_node_selector_test.go +++ b/common/client/mock_node_selector_test.go @@ -8,7 +8,7 @@ import ( ) // mockNodeSelector is an autogenerated mock type for the NodeSelector type -type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD]] struct { +type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC interface{}] struct { mock.Mock } @@ -52,7 +52,7 @@ func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, R // newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD]](t interface { +func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC interface{}](t interface { mock.TestingT Cleanup(func()) }) *mockNodeSelector[CHAIN_ID, HEAD, RPC] { diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index a7bd79e29a9..d5ab6d56233 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -10,7 +10,7 @@ import ( ) // mockNode is an autogenerated mock type for the Node type -type mockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT RPCClient[CHAIN_ID, HEAD]] struct { +type mockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT interface{}] struct { mock.Mock } @@ -193,7 +193,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { // newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT RPCClient[CHAIN_ID, HEAD]](t interface { +func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT interface{}](t interface { mock.TestingT Cleanup(func()) }) *mockNode[CHAIN_ID, HEAD, RPC_CLIENT] { diff --git a/common/client/mocks/rpc_client.go b/common/client/mock_rpc_client_test.go similarity index 72% rename from common/client/mocks/rpc_client.go rename to common/client/mock_rpc_client_test.go index 7ad29397b7a..9e89f22844d 100644 --- a/common/client/mocks/rpc_client.go +++ b/common/client/mock_rpc_client_test.go @@ -1,24 +1,21 @@ // Code generated by mockery v2.42.2. DO NOT EDIT. -package mocks +package client import ( context "context" - client "github.com/smartcontractkit/chainlink/v2/common/client" - - mock "github.com/stretchr/testify/mock" - types "github.com/smartcontractkit/chainlink/v2/common/types" + mock "github.com/stretchr/testify/mock" ) -// RPCClient is an autogenerated mock type for the RPCClient type -type RPCClient[CHAIN_ID types.ID, HEAD client.Head] struct { +// MockRPCClient is an autogenerated mock type for the RPCClient type +type MockRPCClient[CHAIN_ID types.ID, HEAD Head] struct { mock.Mock } // ChainID provides a mock function with given fields: ctx -func (_m *RPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -46,12 +43,12 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, err } // Close provides a mock function with given fields: -func (_m *RPCClient[CHAIN_ID, HEAD]) Close() { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) Close() { _m.Called() } // Dial provides a mock function with given fields: ctx -func (_m *RPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { ret := _m.Called(ctx) if len(ret) == 0 { @@ -69,7 +66,7 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { } // IsSyncing provides a mock function with given fields: ctx -func (_m *RPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -97,7 +94,7 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error } // Ping provides a mock function with given fields: _a0 -func (_m *RPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { ret := _m.Called(_a0) if len(ret) == 0 { @@ -115,7 +112,7 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { } // SubscribeToFinalizedHeads provides a mock function with given fields: ctx -func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -154,24 +151,24 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Conte } // SubscribeToHeads provides a mock function with given fields: ctx -func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (chan HEAD, types.Subscription, error) { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for SubscribeToHeads") } - var r0 chan HEAD + var r0 <-chan HEAD var r1 types.Subscription var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (chan HEAD, types.Subscription, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) chan HEAD); ok { + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(chan HEAD) + r0 = ret.Get(0).(<-chan HEAD) } } @@ -193,7 +190,7 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (chan } // UnsubscribeAllExcept provides a mock function with given fields: subs -func (_m *RPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { +func (_m *MockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { _va := make([]interface{}, len(subs)) for _i := range subs { _va[_i] = subs[_i] @@ -203,13 +200,13 @@ func (_m *RPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscrip _m.Called(_ca...) } -// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewMockRPCClient creates a new instance of MockRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewRPCClient[CHAIN_ID types.ID, HEAD client.Head](t interface { +func NewMockRPCClient[CHAIN_ID types.ID, HEAD Head](t interface { mock.TestingT Cleanup(func()) -}) *RPCClient[CHAIN_ID, HEAD] { - mock := &RPCClient[CHAIN_ID, HEAD]{} +}) *MockRPCClient[CHAIN_ID, HEAD] { + mock := &MockRPCClient[CHAIN_ID, HEAD]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/common/client/mock_send_only_client_test.go b/common/client/mock_send_only_client_test.go index b07e10ed8ce..6c047733f43 100644 --- a/common/client/mock_send_only_client_test.go +++ b/common/client/mock_send_only_client_test.go @@ -47,17 +47,17 @@ func (_m *mockSendOnlyClient[CHAIN_ID]) Close() { _m.Called() } -// DialHTTP provides a mock function with given fields: -func (_m *mockSendOnlyClient[CHAIN_ID]) DialHTTP() error { - ret := _m.Called() +// Dial provides a mock function with given fields: ctx +func (_m *mockSendOnlyClient[CHAIN_ID]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) if len(ret) == 0 { - panic("no return value specified for DialHTTP") + panic("no return value specified for Dial") } var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } diff --git a/common/client/mock_send_only_node_test.go b/common/client/mock_send_only_node_test.go index 4822c2620b8..008b8793428 100644 --- a/common/client/mock_send_only_node_test.go +++ b/common/client/mock_send_only_node_test.go @@ -10,7 +10,7 @@ import ( ) // mockSendOnlyNode is an autogenerated mock type for the SendOnlyNode type -type mockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]] struct { +type mockSendOnlyNode[CHAIN_ID types.ID, RPC interface{}] struct { mock.Mock } @@ -142,7 +142,7 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) String() string { // newMockSendOnlyNode creates a new instance of mockSendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]](t interface { +func newMockSendOnlyNode[CHAIN_ID types.ID, RPC interface{}](t interface { mock.TestingT Cleanup(func()) }) *mockSendOnlyNode[CHAIN_ID, RPC] { diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 51d51f67646..4115dc1873f 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -22,14 +22,14 @@ var ( // PromMultiNodeRPCNodeStates reports current RPC node state PromMultiNodeRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "multi_node_states", - Help: "The number of RPC primaryNodes currently in the given state for the given chain", + Help: "The number of RPC nodes currently in the given state for the given chain", }, []string{"network", "chainId", "state"}) // PromMultiNodeInvariantViolations reports violation of our assumptions PromMultiNodeInvariantViolations = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "multi_node_invariant_violations", Help: "The number of invariant violations", }, []string{"network", "chainId", "invariant"}) - ErroringNodeError = fmt.Errorf("no live primaryNodes available") + ErroringNodeError = fmt.Errorf("no live nodes available") ) // MultiNode is a generalized multi node client interface that includes methods to interact with different chains. @@ -38,7 +38,7 @@ type MultiNode[ CHAIN_ID types.ID, BLOCK_HASH types.Hashable, HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPCClient[CHAIN_ID, HEAD], + RPC_CLIENT any, ] interface { // SelectRPC - returns the best healthy RPCClient SelectRPC() (RPC_CLIENT, error) @@ -60,7 +60,7 @@ type multiNode[ ] struct { services.StateMachine primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] - sendOnlyNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] chainID CHAIN_ID lggr logger.SugaredLogger selectionMode string @@ -89,7 +89,7 @@ func NewMultiNode[ selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) leaseDuration time.Duration, // defines interval on which new "best" RPC should be selected primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], - sendOnlyNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT], chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics ) MultiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT] { @@ -121,36 +121,41 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) ChainType() config.C } func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { - runDo := func(nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], isSendOnly bool) error { - for _, n := range nodes { - if ctx.Err() != nil { - return ctx.Err() - } - if n.State() == nodeStateAlive { - if !do(ctx, n.RPC(), isSendOnly) { - if ctx.Err() != nil { - return ctx.Err() - } - return fmt.Errorf("do aborted on node %s", n.String()) - } - } + callsCompleted := 0 + for _, n := range c.primaryNodes { + if ctx.Err() != nil { + return ctx.Err() + } + if n.State() != nodeStateAlive { + continue + } + if do(ctx, n.RPC(), false) { + callsCompleted++ } - return nil } - - if err := runDo(c.primaryNodes, false); err != nil { - return err + for _, n := range c.sendOnlyNodes { + if ctx.Err() != nil { + return ctx.Err() + } + if n.State() != nodeStateAlive { + continue + } + if do(ctx, n.RPC(), false) { + callsCompleted++ + } } - if err := runDo(c.sendOnlyNodes, true); err != nil { - return err + if callsCompleted == 0 { + return ErroringNodeError } return nil } func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]nodeState { states := map[string]nodeState{} - allNodes := append(c.primaryNodes, c.sendOnlyNodes...) - for _, n := range allNodes { + for _, n := range c.primaryNodes { + states[n.String()] = n.State() + } + for _, n := range c.sendOnlyNodes { states[n.String()] = n.State() } return states @@ -247,8 +252,8 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N c.activeNode = c.nodeSelector.Select() if c.activeNode == nil { - c.lggr.Criticalw("No live RPC primaryNodes available", "NodeSelectionMode", c.nodeSelector.Name()) - errmsg := fmt.Errorf("no live primaryNodes available for chain %s", c.chainID.String()) + c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) + errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) c.SvcErrBuffer.Append(errmsg) err = ErroringNodeError } diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 20945b254f3..f87cd89c8f8 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -1,7 +1,6 @@ package client import ( - "context" "errors" "fmt" "math/big" @@ -31,7 +30,7 @@ type multiNodeOpts struct { selectionMode string leaseDuration time.Duration nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] - sendonlys []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] chainID types.ID chainFamily string } @@ -415,9 +414,8 @@ func TestMultiNode_selectNode(t *testing.T) { func TestMultiNode_nLiveNodes(t *testing.T) { t.Parallel() type nodeParams struct { - BlockNumber int64 - TotalDifficulty *big.Int - State nodeState + chainInfo ChainInfo + State nodeState } testCases := []struct { Name string @@ -437,24 +435,32 @@ func TestMultiNode_nLiveNodes(t *testing.T) { ExpectedNLiveNodes: 3, NodeParams: []nodeParams{ { - State: nodeStateOutOfSync, - BlockNumber: 1000, - TotalDifficulty: big.NewInt(2000), + State: nodeStateOutOfSync, + chainInfo: ChainInfo{ + BlockNumber: 1000, + BlockDifficulty: big.NewInt(2000), + }, }, { - State: nodeStateAlive, - BlockNumber: 20, - TotalDifficulty: big.NewInt(9), + State: nodeStateAlive, + chainInfo: ChainInfo{ + BlockNumber: 20, + BlockDifficulty: big.NewInt(9), + }, }, { - State: nodeStateAlive, - BlockNumber: 19, - TotalDifficulty: big.NewInt(10), + State: nodeStateAlive, + chainInfo: ChainInfo{ + BlockNumber: 19, + BlockDifficulty: big.NewInt(10), + }, }, { - State: nodeStateAlive, - BlockNumber: 11, - TotalDifficulty: nil, + State: nodeStateAlive, + chainInfo: ChainInfo{ + BlockNumber: 11, + BlockDifficulty: nil, + }, }, }, }, @@ -470,8 +476,9 @@ func TestMultiNode_nLiveNodes(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { for _, params := range tc.NodeParams { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node.On("StateAndLatest").Return(params.State, params.BlockNumber, params.TotalDifficulty) - mn.nodes = append(mn.nodes, node) + // TODO: Returns chainInfo not block number, difficulty! + node.On("StateAndLatest").Return(params.State, params.chainInfo) + mn.primaryNodes = append(mn.primaryNodes, node) } nNodes, blockNum, td := mn.nLiveNodes() @@ -482,6 +489,7 @@ func TestMultiNode_nLiveNodes(t *testing.T) { } } +/* TODO: Multinode no longer contains this method; maybe test DoAll instead? func TestMultiNode_BatchCallContextAll(t *testing.T) { t.Parallel() t.Run("Fails if failed to select active node", func(t *testing.T) { @@ -578,7 +586,9 @@ func TestMultiNode_BatchCallContextAll(t *testing.T) { require.NoError(t, err) }) } +*/ +/* TODO: Implement TransactionSender func TestMultiNode_SendTransaction(t *testing.T) { t.Parallel() classifySendTxError := func(tx any, err error) SendTxReturnCode { @@ -878,3 +888,4 @@ func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { } assert.Empty(t, codesToCover, "all of the SendTxReturnCode must be covered by this test") } +*/ diff --git a/common/client/node.go b/common/client/node.go index d1b74a8d1b2..fcfa6288915 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -71,7 +71,7 @@ type ChainInfo struct { type Node[ CHAIN_ID types.ID, HEAD Head, - RPC_CLIENT RPCClient[CHAIN_ID, HEAD], + RPC_CLIENT any, ] interface { // State returns health state of the underlying RPC State() nodeState diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 36cee65e09e..51d9f1b6ab9 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -39,47 +39,47 @@ func TestUnit_Node_StateTransitions(t *testing.T) { t.Run("transitionToAlive", func(t *testing.T) { const destinationState = nodeStateAlive allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) }) t.Run("transitionToInSync", func(t *testing.T) { const destinationState = nodeStateAlive allowedStates := []nodeState{nodeStateOutOfSync, nodeStateSyncing} - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) }) t.Run("transitionToOutOfSync", func(t *testing.T) { const destinationState = nodeStateOutOfSync allowedStates := []nodeState{nodeStateAlive} - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("DisconnectAll").Once() + rpc := NewMockRPCClient[types.ID, Head](t) + rpc.On("UnsubscribeAllExcept").Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = nodeStateUnreachable allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("DisconnectAll").Times(len(allowedStates)) + rpc := NewMockRPCClient[types.ID, Head](t) + rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = nodeStateInvalidChainID allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("DisconnectAll").Times(len(allowedStates)) + rpc := NewMockRPCClient[types.ID, Head](t) + rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = nodeStateSyncing allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("DisconnectAll").Times(len(allowedStates)) + rpc := NewMockRPCClient[types.ID, Head](t) + rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { - rpc := newMockNodeClient[types.ID, Head](t) - rpc.On("DisconnectAll").Once() + rpc := NewMockRPCClient[types.ID, Head](t) + rpc.On("UnsubscribeAllExcept").Once() node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(nodeStateDialed) fn := new(fnMock) @@ -90,7 +90,7 @@ func TestUnit_Node_StateTransitions(t *testing.T) { }) } -func testTransition(t *testing.T, rpc *mockNodeClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { +func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { node := newTestNode(t, testNodeOpts{rpc: rpc, config: testNodeConfig{nodeIsSyncingEnabled: true}}) for _, allowedState := range allowedStates { m := new(fnMock) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index b3c09b35000..a47c8a305ae 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -43,7 +43,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) node := newDialedNode(t, testNodeOpts{ rpc: rpc, }) @@ -51,7 +51,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once() - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept").Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -62,7 +62,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) node := newDialedNode(t, testNodeOpts{ @@ -79,7 +79,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() // disconnects all on transfer to unreachable - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept").Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -97,7 +97,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { } t.Run("Stays alive and waits for signal", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -112,7 +112,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -154,7 +154,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -169,7 +169,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { pollError := errors.New("failed to get ClientVersion") rpc.On("ClientVersion", mock.Anything).Return("", pollError) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept").Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -180,7 +180,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -203,7 +203,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -226,7 +226,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("DisconnectAll").Maybe() + rpc.On("UnsubscribeAllExcept").Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { require.Equal(t, nodeStateOutOfSync, node.State()) @@ -236,7 +236,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -259,7 +259,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -283,7 +283,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ @@ -297,7 +297,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("DisconnectAll").Maybe() + rpc.On("UnsubscribeAllExcept").Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -309,7 +309,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -330,7 +330,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() @@ -350,7 +350,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() // disconnects all on transfer to unreachable or outOfSync - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept").Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -360,7 +360,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("updates block number and difficulty on new head", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() @@ -378,13 +378,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() node.declareAlive() tests.AssertEventually(t, func() bool { - state, block, diff := node.StateAndLatest() - return state == nodeStateAlive && block == expectedBlockNumber == bigmath.Equal(diff, expectedDiff) + state, chainInfo := node.StateAndLatest() + return state == nodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber == bigmath.Equal(chainInfo.BlockDifficulty, expectedDiff) }) }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() @@ -416,7 +416,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("Logs warning if failed to get finalized block", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) @@ -440,7 +440,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) head := newMockHead(t) head.On("IsValid").Return(false) rpc.On("LatestFinalizedBlock", mock.Anything).Return(head, nil) @@ -466,7 +466,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finality tag and finalized block polling are enabled updates latest finalized block metric", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock - 1}.ToMockHead(t), nil).Once() @@ -530,7 +530,7 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { } } -func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) { +func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() aliveSubscription := mocks.NewSubscription(t) aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() @@ -546,7 +546,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable or outOfSync - opts.rpc.On("DisconnectAll") + opts.rpc.On("UnsubscribeAllExcept") node.setState(nodeStateAlive) return node } @@ -564,7 +564,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -595,7 +595,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, }) @@ -611,7 +611,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fail to get chainID, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, }) @@ -631,7 +631,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ @@ -651,7 +651,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if syncing, transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -671,7 +671,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to fetch syncing status, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -694,7 +694,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -714,7 +714,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on subscription termination becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -742,7 +742,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -772,7 +772,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { t.Run("becomes alive if it receives a newer head", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -807,7 +807,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes alive if there is no other nodes", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -848,7 +848,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable - opts.rpc.On("DisconnectAll") + opts.rpc.On("UnsubscribeAllExcept") node.setState(nodeStateAlive) return node @@ -863,7 +863,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed redial, keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -879,7 +879,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed chainID verification, keep trying", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -898,7 +898,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ @@ -916,7 +916,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing status check failure, keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -937,7 +937,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing, transitions to syncing state", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -959,7 +959,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -981,7 +981,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1006,7 +1006,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("DisconnectAll") + opts.rpc.On("UnsubscribeAllExcept") node.setState(nodeStateDialed) return node @@ -1021,7 +1021,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1037,7 +1037,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1059,7 +1059,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on chainID mismatch keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) @@ -1080,9 +1080,12 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) + // TODO: SubscribeToHeads return value? + headCh := make(<-chan Head) + sub := mocks.NewSubscription(t) node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, @@ -1090,6 +1093,8 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Once() + // TODO: SubscribeToHeads is called when? + rpc.On("SubscribeToHeads", mock.Anything).Return(headCh, sub, nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -1102,7 +1107,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newDialedNode(t, testNodeOpts{ @@ -1137,7 +1142,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { } t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1149,7 +1154,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept") err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -1159,7 +1164,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1174,7 +1179,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.Equal(t, nodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept") err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1184,7 +1189,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newNode(t, testNodeOpts{ @@ -1196,7 +1201,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept") err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1205,7 +1210,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if syncing verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1222,7 +1227,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept") // fail to redial to stay in unreachable state rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")) err := node.Start(tests.Context(t)) @@ -1234,7 +1239,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on isSyncing transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1247,7 +1252,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept") err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1256,7 +1261,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1279,7 +1284,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1442,7 +1447,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.config.nodeIsSyncingEnabled = true node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("DisconnectAll") + opts.rpc.On("UnsubscribeAllExcept") node.setState(nodeStateDialed) return node @@ -1457,7 +1462,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1473,7 +1478,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1495,7 +1500,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on chainID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) @@ -1516,7 +1521,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed Syncing check - becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1540,7 +1545,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on IsSyncing - keeps trying", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1561,7 +1566,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := newMockNodeClient[types.ID, Head](t) + rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, diff --git a/common/client/node_selector.go b/common/client/node_selector.go index f928dabca6f..9ec0d956f19 100644 --- a/common/client/node_selector.go +++ b/common/client/node_selector.go @@ -17,7 +17,7 @@ const ( type NodeSelector[ CHAIN_ID types.ID, HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ] interface { // Select returns a Node, or nil if none can be selected. // Implementation must be thread-safe. diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index b8b0296f181..db66e9777de 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -24,13 +24,13 @@ func TestHighestHeadNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else if i == 1 { // second node is alive, LatestReceivedBlockNumber = 1 - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) } else { // third node is alive, LatestReceivedBlockNumber = 2 (best node) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) } node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -42,7 +42,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -53,7 +53,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -63,10 +63,10 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1}) node1.On("Order").Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1}) node2.On("Order").Return(int32(1)) selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2}) assert.Same(t, node1, selector.Select()) @@ -76,17 +76,17 @@ func TestHighestHeadNodeSelector(t *testing.T) { func TestHighestHeadNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: 1}) } nodes = append(nodes, node) } @@ -98,13 +98,13 @@ func TestHighestHeadNodeSelector_None(t *testing.T) { func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] t.Run("same head and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) } @@ -115,15 +115,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("same head but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -134,15 +134,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(3)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -153,19 +153,19 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 10}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 11}) node2.On("Order").Maybe().Return(int32(4)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 12}) node3.On("Order").Maybe().Return(int32(3)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 10}) node4.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go index 15a7a7ac60b..1467aaec0aa 100644 --- a/common/client/node_selector_priority_level_test.go +++ b/common/client/node_selector_priority_level_test.go @@ -9,14 +9,14 @@ import ( ) func TestPriorityLevelNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) + selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel) } func TestPriorityLevelNodeSelector(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] type testNode struct { order int32 state nodeState diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go index e5078d858f1..acd0e268849 100644 --- a/common/client/node_selector_round_robin_test.go +++ b/common/client/node_selector_round_robin_test.go @@ -9,14 +9,14 @@ import ( ) func TestRoundRobinNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) + selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin) } func TestRoundRobinNodeSelector(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] for i := 0; i < 3; i++ { @@ -41,7 +41,7 @@ func TestRoundRobinNodeSelector(t *testing.T) { func TestRoundRobinNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] for i := 0; i < 3; i++ { diff --git a/common/client/node_selector_test.go b/common/client/node_selector_test.go index 226cb67168d..ac280f7142e 100644 --- a/common/client/node_selector_test.go +++ b/common/client/node_selector_test.go @@ -12,7 +12,7 @@ func TestNodeSelector(t *testing.T) { // rest of the tests are located in specific node selectors tests t.Run("panics on unknown type", func(t *testing.T) { assert.Panics(t, func() { - _ = newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]]("unknown", nil) + _ = newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]]("unknown", nil) }) }) } diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go index 5c43cdd8472..c03b923d76d 100644 --- a/common/client/node_selector_total_difficulty_test.go +++ b/common/client/node_selector_total_difficulty_test.go @@ -10,27 +10,30 @@ import ( ) func TestTotalDifficultyNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) + selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty) } func TestTotalDifficultyNodeSelector(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, + ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) } else if i == 1 { // second node is alive - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(7)) + node.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(7)}) } else { // third node is alive and best - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + node.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 2, BlockDifficulty: big.NewInt(8)}) } node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -42,7 +45,8 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + node.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 2, BlockDifficulty: big.NewInt(8)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -53,7 +57,8 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(11)) + node.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(11)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -63,10 +68,12 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node1.On("Order").Maybe().Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node2.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2} @@ -78,17 +85,18 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { func TestTotalDifficultyNodeSelector_None(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), big.NewInt(7)) + node.On("StateAndLatest").Return(nodeStateUnreachable, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(7)}) } nodes = append(nodes, node) } @@ -100,13 +108,14 @@ func TestTotalDifficultyNodeSelector_None(t *testing.T) { func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Parallel() - type nodeClient NodeClient[types.ID, Head] + type nodeClient RPCClient[types.ID, Head] var nodes []Node[types.ID, Head, nodeClient] t.Run("same td and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) } @@ -117,15 +126,18 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("same td but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node1.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node2.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node3.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node3.On("Order").Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -136,15 +148,18 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different td but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node1.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(11)) + node2.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(11)}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(12)) + node3.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(12)}) node3.On("Order").Return(int32(3)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -155,19 +170,23 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(100)) + node1.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(100)}) node1.On("Order").Maybe().Return(int32(4)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node2.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node2.On("Order").Maybe().Return(int32(5)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node3.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node3.On("Order").Maybe().Return(int32(1)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(105)) + node4.On("StateAndLatest").Return(nodeStateAlive, + ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(105)}) node4.On("Order").Maybe().Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} diff --git a/common/client/node_test.go b/common/client/node_test.go index 85c96145740..e9cd367340b 100644 --- a/common/client/node_test.go +++ b/common/client/node_test.go @@ -51,7 +51,7 @@ func (n testNodeConfig) Errors() config.ClientErrors { } type testNode struct { - *node[types.ID, Head, NodeClient[types.ID, Head]] + *node[types.ID, Head, RPCClient[types.ID, Head]] } type testNodeOpts struct { @@ -64,7 +64,7 @@ type testNodeOpts struct { id int32 chainID types.ID nodeOrder int32 - rpc *mockNodeClient[types.ID, Head] + rpc *MockRPCClient[types.ID, Head] chainFamily string } @@ -89,10 +89,10 @@ func newTestNode(t *testing.T, opts testNodeOpts) testNode { opts.id = 42 } - nodeI := NewNode[types.ID, Head, NodeClient[types.ID, Head]](opts.config, opts.chainConfig, opts.lggr, + nodeI := NewNode[types.ID, Head, RPCClient[types.ID, Head]](opts.config, opts.chainConfig, opts.lggr, opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily) return testNode{ - nodeI.(*node[types.ID, Head, NodeClient[types.ID, Head]]), + nodeI.(*node[types.ID, Head, RPCClient[types.ID, Head]]), } } diff --git a/common/client/send_only_node.go b/common/client/send_only_node.go index b63e93b703d..16fcb92a1f6 100644 --- a/common/client/send_only_node.go +++ b/common/client/send_only_node.go @@ -18,7 +18,7 @@ type sendOnlyClient[ ] interface { Close() ChainID(context.Context) (CHAIN_ID, error) - DialHTTP() error + Dial(ctx context.Context) error } // SendOnlyNode represents one node used as a sendonly @@ -26,7 +26,7 @@ type sendOnlyClient[ //go:generate mockery --quiet --name SendOnlyNode --structname mockSendOnlyNode --filename "mock_send_only_node_test.go" --inpackage --case=underscore type SendOnlyNode[ CHAIN_ID types.ID, - RPC sendOnlyClient[CHAIN_ID], + RPC any, ] interface { // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors. Start(context.Context) error @@ -100,7 +100,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { panic(fmt.Sprintf("cannot dial node with state %v", s.state)) } - err := s.rpc.DialHTTP() + err := s.rpc.Dial(startCtx) if err != nil { promPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc() s.log.Errorw("Dial failed: SendOnly Node is unusable", "err", err) diff --git a/common/client/types.go b/common/client/types.go index ffbcf6f7679..74b9408e475 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -12,7 +12,7 @@ import ( // RPCClient includes all the necessary generalized RPC methods along with any additional chain-specific methods. // -//go:generate mockery --quiet --name RPCClient --output ./mocks --case=underscore +//go:generate mockery --quiet --name RPCClient --structname MockRPCClient --filename "mock_rpc_client_test.go" --inpackage --case=underscore type RPCClient[ CHAIN_ID types.ID, HEAD Head, @@ -22,7 +22,7 @@ type RPCClient[ // Dial - prepares the RPC for usage. Can be called on fresh or closed RPC Dial(ctx context.Context) error // SubscribeToHeads - returns channel and subscription for new heads. - SubscribeToHeads(ctx context.Context) (chan HEAD, types.Subscription, error) + SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) // SubscribeToFinalizedHeads - returns channel and subscription for finalized heads. SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) // Ping - returns error if RPC is not reachable diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index a0528ec5cc6..af996a646b9 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -3,6 +3,7 @@ package client import ( "context" "math/big" + "sync" "time" evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" @@ -41,7 +42,7 @@ func NewChainClient( leaseDuration time.Duration, noNewHeadsThreshold time.Duration, nodes []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient], - sendonlys []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient], + sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient], chainID *big.Int, chainType config.ChainType, clientErrors evmconfig.ClientErrors, @@ -83,13 +84,37 @@ func (c *chainClient) BatchCallContext(ctx context.Context, b []ethrpc.BatchElem // Similar to BatchCallContext, ensure the provided BatchElem slice is passed through func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchElem) error { + var wg sync.WaitGroup + defer wg.Wait() + + // Select main RPC to use for return value + main, selectionErr := c.multiNode.SelectRPC() + if selectionErr != nil { + return selectionErr + } + doFunc := func(ctx context.Context, rpc *RpcClient, isSendOnly bool) bool { - if err := rpc.BatchCallContext(ctx, b); err != nil { - return false + if rpc == main { + return true } + // Parallel call made to all other nodes with ignored return value + wg.Add(1) + go func(rpc *RpcClient) { + defer wg.Done() + err := rpc.BatchCallContext(ctx, b) + if err != nil { + rpc.rpcLog.Debugw("Secondary node BatchCallContext failed", "err", err) + } else { + rpc.rpcLog.Trace("Secondary node BatchCallContext success") + } + }(rpc) return true } - return c.multiNode.DoAll(ctx, doFunc) + + if err := c.multiNode.DoAll(ctx, doFunc); err != nil { + return err + } + return main.BatchCallContext(ctx, b) } // TODO-1663: return custom Block type instead of geth's once client.go is deprecated. @@ -300,14 +325,13 @@ func (c *chainClient) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.He return nil, nil, err } - // TODO: Implement this ch, sub, err := rpc.SubscribeToHeads(ctx) - csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) + forwardCh, csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) err = csf.start(sub, err) if err != nil { return nil, nil, err } - return ch, csf, nil + return forwardCh, csf, nil } func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) { diff --git a/core/chains/evm/client/chain_id_sub.go b/core/chains/evm/client/chain_id_sub.go index c3162b300c7..88c776ad6fa 100644 --- a/core/chains/evm/client/chain_id_sub.go +++ b/core/chains/evm/client/chain_id_sub.go @@ -16,7 +16,7 @@ type chainIDSubForwarder struct { chainID *big.Int destCh chan<- *evmtypes.Head - srcCh chan *evmtypes.Head + srcCh <-chan *evmtypes.Head srcSub ethereum.Subscription done chan struct{} @@ -24,11 +24,12 @@ type chainIDSubForwarder struct { unSub chan struct{} } -func newChainIDSubForwarder(chainID *big.Int, ch chan<- *evmtypes.Head) *chainIDSubForwarder { - return &chainIDSubForwarder{ +func newChainIDSubForwarder(chainID *big.Int, ch <-chan *evmtypes.Head) (<-chan *evmtypes.Head, *chainIDSubForwarder) { + destCh := make(chan *evmtypes.Head) + return destCh, &chainIDSubForwarder{ chainID: chainID, - destCh: ch, - srcCh: make(chan *evmtypes.Head), + destCh: destCh, + srcCh: ch, done: make(chan struct{}), err: make(chan error), unSub: make(chan struct{}, 1), @@ -38,7 +39,7 @@ func newChainIDSubForwarder(chainID *big.Int, ch chan<- *evmtypes.Head) *chainID // start spawns the forwarding loop for sub. func (c *chainIDSubForwarder) start(sub ethereum.Subscription, err error) error { if err != nil { - close(c.srcCh) + close(c.destCh) return err } c.srcSub = sub diff --git a/core/chains/evm/client/chain_id_sub_test.go b/core/chains/evm/client/chain_id_sub_test.go index f959376acca..1ec2097d5d7 100644 --- a/core/chains/evm/client/chain_id_sub_test.go +++ b/core/chains/evm/client/chain_id_sub_test.go @@ -20,7 +20,7 @@ func TestChainIDSubForwarder(t *testing.T) { t.Parallel() ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) + _, forwarder := newChainIDSubForwarder(chainID, ch) sub := NewMockSubscription() err := forwarder.start(sub, nil) assert.NoError(t, err) @@ -37,7 +37,7 @@ func TestChainIDSubForwarder(t *testing.T) { t.Parallel() ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) + _, forwarder := newChainIDSubForwarder(chainID, ch) sub := NewMockSubscription() err := forwarder.start(sub, nil) assert.NoError(t, err) @@ -55,11 +55,11 @@ func TestChainIDSubForwarder(t *testing.T) { t.Parallel() ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) + _, forwarder := newChainIDSubForwarder(chainID, ch) sub := NewMockSubscription() err := forwarder.start(sub, nil) assert.NoError(t, err) - forwarder.srcCh <- &evmtypes.Head{} + ch <- &evmtypes.Head{} forwarder.Unsubscribe() assert.True(t, sub.unsubscribed) @@ -73,7 +73,7 @@ func TestChainIDSubForwarder(t *testing.T) { t.Parallel() ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) + _, forwarder := newChainIDSubForwarder(chainID, ch) sub := NewMockSubscription() errIn := errors.New("foo") errOut := forwarder.start(sub, errIn) @@ -84,7 +84,7 @@ func TestChainIDSubForwarder(t *testing.T) { t.Parallel() ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(chainID, ch) + fwdCh, forwarder := newChainIDSubForwarder(chainID, ch) sub := NewMockSubscription() err := forwarder.start(sub, nil) assert.NoError(t, err) @@ -92,8 +92,8 @@ func TestChainIDSubForwarder(t *testing.T) { head := &evmtypes.Head{ ID: 1, } - forwarder.srcCh <- head - receivedHead := <-ch + ch <- head + receivedHead := <-fwdCh assert.Equal(t, head, receivedHead) assert.Equal(t, ubig.New(chainID), receivedHead.EVMChainID) diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index 785619bf721..c802278cb37 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -334,12 +334,12 @@ func (client *client) SubscribeFilterLogs(ctx context.Context, q ethereum.Filter func (client *client) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { ch := make(chan *evmtypes.Head) - csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) - err := csf.start(client.pool.EthSubscribe(ctx, csf.srcCh, "newHeads")) + forwardCh, csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) + err := csf.start(client.pool.EthSubscribe(ctx, ch, "newHeads")) if err != nil { return nil, nil, err } - return ch, csf, nil + return forwardCh, csf, nil } func (client *client) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 289aae781a6..e6483c5773b 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -15,7 +15,7 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node) Client { var empty url.URL var primaries []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient] - var sendonlys []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] for i, node := range nodes { rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, commonclient.Secondary) diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 1db8958443c..5b438b0d3cd 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -164,20 +164,23 @@ func NewChainClientWithTestNode( } lggr := logger.Test(t) - rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) + nodePoolCfg := TestNodePoolConfig{ + NodeFinalizedBlockPollInterval: 1 * time.Second, + } + rpc := NewRPCClient(nodePoolCfg, lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) - n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient]{n} + primaries := []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient]{n} - var sendonlys []commonclient.SendOnlyNode[*big.Int, RPCClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] for i, u := range sendonlyRPCURLs { if u.Scheme != "http" && u.Scheme != "https" { return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL - rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) - s := commonclient.NewSendOnlyNode[*big.Int, RPCClient]( + rpc := NewRPCClient(nodePoolCfg, lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) + s := commonclient.NewSendOnlyNode[*big.Int, *RpcClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) } @@ -211,7 +214,7 @@ func NewChainClientWithMockedRpc( leaseDuration time.Duration, noNewHeadsThreshold time.Duration, chainID *big.Int, - rpc RPCClient, + rpc *commonclient.RPCClient[*big.Int, *evmtypes.Head], ) Client { lggr := logger.Test(t) @@ -223,9 +226,9 @@ func NewChainClientWithMockedRpc( } parsed, _ := url.ParseRequestURI("ws://test") - n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCClient]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, *clientMocks.MockRPCClient[*big.Int, *evmtypes.Head]]( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, RPCClient]{n} + primaries := []commonclient.Node[*big.Int, *evmtypes.Head, *clientMocks.MockRPCClient[*big.Int, *evmtypes.Head]]{n} clientErrors := NewTestClientErrors() c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaries, nil, chainID, chainType, &clientErrors) t.Cleanup(c.Close) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 10d9de05049..71539d11039 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -91,7 +91,7 @@ func NewRPCClient( return r } -func (r *RpcClient) SubscribeToHeads(ctx context.Context) (chan *evmtypes.Head, commontypes.Subscription, error) { +func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { channel := make(chan *evmtypes.Head) sub, err := r.Subscribe(ctx, channel) return channel, sub, err From a61a99eea288bcee50454dcffc785a3e024f682d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 16 May 2024 13:40:40 -0400 Subject: [PATCH 05/58] update multinode --- common/client/node.go | 6 ++- common/client/node_fsm_test.go | 10 ++--- common/client/node_lifecycle.go | 9 ++-- common/client/node_lifecycle_test.go | 64 ++++++++++++++++------------ core/chains/evm/client/rpc_client.go | 3 +- 5 files changed, 51 insertions(+), 41 deletions(-) diff --git a/common/client/node.go b/common/client/node.go index fcfa6288915..80bcc238d8c 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -132,7 +132,8 @@ type node[ // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far. - nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) + nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) + aliveLoopSub types.Subscription } func NewNode[ @@ -176,6 +177,7 @@ func NewNode[ n.stateLatestBlockNumber = -1 n.rpc = rpc n.chainFamily = chainFamily + n.aliveLoopSub = nil return n } @@ -200,7 +202,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { - n.rpc.UnsubscribeAllExcept() + n.rpc.UnsubscribeAllExcept(n.aliveLoopSub) } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 51d9f1b6ab9..4031fb3fa8d 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -53,33 +53,33 @@ func TestUnit_Node_StateTransitions(t *testing.T) { const destinationState = nodeStateOutOfSync allowedStates := []nodeState{nodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil).Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = nodeStateUnreachable allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = nodeStateInvalidChainID allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = nodeStateSyncing allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept").Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil).Once() node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(nodeStateDialed) fn := new(fnMock) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index e2dfd0c4e81..f0ba2a1539c 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -108,8 +108,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll // falsely transition this node to unreachable state - // TODO: Do we need this SetAliveLoopSub??? - //TODO: Delete this?: n.rpc.SetAliveLoopSub(sub) + n.aliveLoopSub = sub defer sub.Unsubscribe() var outOfSyncT *time.Ticker @@ -161,13 +160,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-n.nodeCtx.Done(): return case <-pollCh: + promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) err := n.RPC().Ping(ctx) cancel() - - promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) - if err != nil { // prevent overflow if pollFailures < math.MaxUint32 { diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index a47c8a305ae..031bc1b86a4 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -15,7 +15,6 @@ import ( "go.uber.org/zap" "github.com/smartcontractkit/chainlink-common/pkg/logger" - bigmath "github.com/smartcontractkit/chainlink-common/pkg/utils/big_math" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" clientMocks "github.com/smartcontractkit/chainlink/v2/common/client/mocks" @@ -51,7 +50,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once() - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -79,7 +78,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() rpc.On("SetAliveLoopSub", sub).Once() // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -169,7 +168,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { pollError := errors.New("failed to get ClientVersion") rpc.On("ClientVersion", mock.Anything).Return("", pollError) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -226,7 +225,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept").Maybe() + rpc.On("UnsubscribeAllExcept", nil).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { require.Equal(t, nodeStateOutOfSync, node.State()) @@ -297,7 +296,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept").Maybe() + rpc.On("UnsubscribeAllExcept", nil).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -350,7 +349,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept").Once() + rpc.On("UnsubscribeAllExcept", nil).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -361,16 +360,15 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("updates block number and difficulty on new head", func(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) + ch := make(chan Head) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() expectedBlockNumber := rand.Int64() expectedDiff := big.NewInt(rand.Int64()) - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(_ mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: expectedBlockNumber, BlockDifficulty: expectedDiff}) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{}, rpc: rpc, @@ -379,7 +377,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertEventually(t, func() bool { state, chainInfo := node.StateAndLatest() - return state == nodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber == bigmath.Equal(chainInfo.BlockDifficulty, expectedDiff) + // TODO: nil pointer dereference... block difficulty is nil? + return state == nodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber }) }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { @@ -536,6 +535,7 @@ func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() aliveSubscription.On("Unsubscribe").Maybe() rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(aliveSubscription, nil).Maybe() + rpc.On("UnsubscribeAllExcept", nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() } @@ -546,7 +546,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable or outOfSync - opts.rpc.On("UnsubscribeAllExcept") + opts.rpc.On("UnsubscribeAllExcept", nil) node.setState(nodeStateAlive) return node } @@ -848,7 +848,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable - opts.rpc.On("UnsubscribeAllExcept") + opts.rpc.On("UnsubscribeAllExcept", nil) node.setState(nodeStateAlive) return node @@ -971,6 +971,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, nil) + rpc.On("SubscribeToHeads", mock.Anything).Return(nil) setupRPCForAliveLoop(t, rpc) @@ -1006,7 +1007,6 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("UnsubscribeAllExcept") node.setState(nodeStateDialed) return node @@ -1030,6 +1030,8 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + rpc.On("UnsubscribeAllExcept", nil) + node.declareInvalidChainID() tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable @@ -1083,18 +1085,19 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) - // TODO: SubscribeToHeads return value? - headCh := make(<-chan Head) - sub := mocks.NewSubscription(t) node := newDialedNode(t, testNodeOpts{ rpc: rpc, chainID: nodeChainID, }) defer func() { assert.NoError(t, node.close()) }() + headCh := make(<-chan Head) + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("Dial", mock.Anything).Return(nil).Once() - // TODO: SubscribeToHeads is called when? - rpc.On("SubscribeToHeads", mock.Anything).Return(headCh, sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(headCh, sub, nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -1154,7 +1157,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept") + rpc.On("UnsubscribeAllExcept", nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -1179,7 +1182,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.Equal(t, nodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept") + rpc.On("UnsubscribeAllExcept", nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1201,7 +1204,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept") + rpc.On("UnsubscribeAllExcept", nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1227,7 +1230,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept") + rpc.On("UnsubscribeAllExcept", nil) // fail to redial to stay in unreachable state rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")) err := node.Start(tests.Context(t)) @@ -1252,7 +1255,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept") + rpc.On("UnsubscribeAllExcept", nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1273,7 +1276,10 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, nil) - + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) err := node.Start(tests.Context(t)) @@ -1294,6 +1300,10 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -1447,7 +1457,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.config.nodeIsSyncingEnabled = true node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("UnsubscribeAllExcept") + opts.rpc.On("UnsubscribeAllExcept", nil) node.setState(nodeStateDialed) return node diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 71539d11039..2be0c39b6d8 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -108,10 +108,11 @@ func (r *RpcClient) SubscribeToFinalizedHeads(_ context.Context) (<-chan *evmtyp } func (r *RpcClient) Ping(ctx context.Context) error { - _, err := r.ClientVersion(ctx) + version, err := r.ClientVersion(ctx) if err != nil { return fmt.Errorf("ping failed: %v", err) } + r.rpcLog.Debugf("ping client version: %s", version) return err } From 3ff4cb90172f44b5db21094a914e94217185d622 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 16 May 2024 14:53:50 -0400 Subject: [PATCH 06/58] fix tests --- common/client/mock_node_test.go | 18 +++--- common/client/mock_send_only_node_test.go | 8 +-- common/client/multi_node.go | 12 ++-- common/client/multi_node_test.go | 20 +++--- common/client/node.go | 22 +++---- common/client/node_fsm.go | 42 ++++++------- common/client/node_fsm_test.go | 16 ++--- common/client/node_lifecycle.go | 62 +++++++++---------- common/client/node_lifecycle_test.go | 47 +++++++++----- .../node_selector_priority_level_test.go | 2 +- common/client/send_only_node.go | 12 ++-- common/client/send_only_node_lifecycle.go | 2 +- common/client/send_only_node_test.go | 10 +-- core/chains/evm/client/chain_client.go | 6 +- 14 files changed, 146 insertions(+), 133 deletions(-) diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index d5ab6d56233..af99efac1c3 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -123,40 +123,40 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Start(_a0 context.Context) error } // State provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) State() nodeState { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) State() NodeState { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for State") } - var r0 nodeState - if rf, ok := ret.Get(0).(func() nodeState); ok { + var r0 NodeState + if rf, ok := ret.Get(0).(func() NodeState); ok { r0 = rf() } else { - r0 = ret.Get(0).(nodeState) + r0 = ret.Get(0).(NodeState) } return r0 } // StateAndLatest provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) StateAndLatest() (nodeState, ChainInfo) { +func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) StateAndLatest() (NodeState, ChainInfo) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for StateAndLatest") } - var r0 nodeState + var r0 NodeState var r1 ChainInfo - if rf, ok := ret.Get(0).(func() (nodeState, ChainInfo)); ok { + if rf, ok := ret.Get(0).(func() (NodeState, ChainInfo)); ok { return rf() } - if rf, ok := ret.Get(0).(func() nodeState); ok { + if rf, ok := ret.Get(0).(func() NodeState); ok { r0 = rf() } else { - r0 = ret.Get(0).(nodeState) + r0 = ret.Get(0).(NodeState) } if rf, ok := ret.Get(1).(func() ChainInfo); ok { diff --git a/common/client/mock_send_only_node_test.go b/common/client/mock_send_only_node_test.go index 008b8793428..71d3f8604c8 100644 --- a/common/client/mock_send_only_node_test.go +++ b/common/client/mock_send_only_node_test.go @@ -105,18 +105,18 @@ func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { } // State provides a mock function with given fields: -func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() nodeState { +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() NodeState { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for State") } - var r0 nodeState - if rf, ok := ret.Get(0).(func() nodeState); ok { + var r0 NodeState + if rf, ok := ret.Get(0).(func() NodeState); ok { r0 = rf() } else { - r0 = ret.Get(0).(nodeState) + r0 = ret.Get(0).(NodeState) } return r0 diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 4115dc1873f..81e2fb94d2e 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -47,7 +47,7 @@ type MultiNode[ // Returns error if `do` was not called or context returns an error. DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error // NodeStates - returns RPCs' states - NodeStates() map[string]nodeState + NodeStates() map[string]string ChainType() config.ChainType Close() error } @@ -150,13 +150,13 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co return nil } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]nodeState { - states := map[string]nodeState{} +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]string { + states := map[string]string{} for _, n := range c.primaryNodes { - states[n.String()] = n.State() + states[n.String()] = n.State().String() } for _, n := range c.sendOnlyNodes { - states[n.String()] = n.State() + states[n.String()] = n.State().String() } return states } @@ -337,7 +337,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) report() { } var total, dead int - counts := make(map[nodeState]int) + counts := make(map[NodeState]int) nodeStates := make([]nodeWithState, len(c.primaryNodes)) for i, n := range c.primaryNodes { state := n.State() diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index f87cd89c8f8..6c8d1f33f0b 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -57,7 +57,7 @@ func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.He return newNodeWithState(t, chainID, nodeStateAlive) } -func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { +func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) node.On("ConfiguredChainID").Return(chainID).Once() node.On("Start", mock.Anything).Return(nil).Once() @@ -276,8 +276,8 @@ func TestMultiNode_CheckLease(t *testing.T) { t.Parallel() chainID := types.RandomID() node := newHealthyNode(t, chainID) - node.On("SubscribersCount").Return(int32(2)) - node.On("UnsubscribeAllExceptAliveLoop") + //node.On("SubscribersCount").Return(int32(2)) + node.On("UnsubscribeAll") bestNode := newHealthyNode(t, chainID) nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) nodeSelector.On("Select").Return(bestNode) @@ -304,7 +304,7 @@ func TestMultiNode_CheckLease(t *testing.T) { t.Run("NodeStates returns proper states", func(t *testing.T) { t.Parallel() chainID := types.NewIDFromInt(10) - nodes := map[string]nodeState{ + nodes := map[string]NodeState{ "node_1": nodeStateAlive, "node_2": nodeStateUnreachable, "node_3": nodeStateDialed, @@ -318,14 +318,14 @@ func TestMultiNode_CheckLease(t *testing.T) { expectedResult := map[string]string{} for name, state := range nodes { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node.On("Name").Return(name).Once() - node.On("State").Return(state).Once() + node.On("State").Return(state) + node.On("String").Return(name) opts.nodes = append(opts.nodes, node) sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) sendOnlyName := "send_only_" + name - sendOnly.On("Name").Return(sendOnlyName).Once() - sendOnly.On("State").Return(state).Once() + sendOnly.On("State").Return(state) + sendOnly.On("String").Return(sendOnlyName) opts.sendonlys = append(opts.sendonlys, sendOnly) expectedResult[name] = state.String() @@ -415,7 +415,7 @@ func TestMultiNode_nLiveNodes(t *testing.T) { t.Parallel() type nodeParams struct { chainInfo ChainInfo - State nodeState + State NodeState } testCases := []struct { Name string @@ -598,7 +598,7 @@ func TestMultiNode_SendTransaction(t *testing.T) { return Successful } - newNodeWithState := func(t *testing.T, state nodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + newNodeWithState := func(t *testing.T, state NodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { rpc := newMultiNodeRPCClient(t) rpc.On("SendTransaction", mock.Anything, mock.Anything).Return(txErr).Run(sendTxRun).Maybe() node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) diff --git a/common/client/node.go b/common/client/node.go index 80bcc238d8c..fa02a4f9098 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -74,9 +74,9 @@ type Node[ RPC_CLIENT any, ] interface { // State returns health state of the underlying RPC - State() nodeState + State() NodeState // StateAndLatest returns health state with the latest received block number & total difficulty. - StateAndLatest() (nodeState, ChainInfo) + StateAndLatest() (NodeState, ChainInfo) // Name is a unique identifier for this node. Name() string // String - returns string representation of the node, useful for debugging (name + URLS used to connect to the RPC) @@ -115,7 +115,7 @@ type node[ rpc RPC_CLIENT stateMu sync.RWMutex // protects state* fields - state nodeState + state NodeState // Each node is tracking the last received head number and total difficulty stateLatestBlockNumber int64 stateLatestTotalDifficulty *big.Int @@ -258,7 +258,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { // verifyChainID checks that connection to the node matches the given chain ID // Not thread-safe // Pure verifyChainID: does not mutate node "state" field. -func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Context, lggr logger.Logger) NodeState { promPoolRPCNodeVerifies.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() promFailed := func() { promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() @@ -279,7 +279,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte var err error if chainID, err = n.rpc.ChainID(callerCtx); err != nil { promFailed() - lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.State()) + lggr.Errorw("Failed to verify chain ID for node", "err", err, "NodeState", n.State()) return nodeStateUnreachable } else if chainID.String() != n.chainID.String() { promFailed() @@ -290,7 +290,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte n.name, errInvalidChainID, ) - lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.State()) + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "NodeState", n.State()) return nodeStateInvalidChainID } @@ -301,9 +301,9 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte // createVerifiedConn - establishes new connection with the RPC and verifies that it's valid: chainID matches, and it's not syncing. // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. -func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Context, lggr logger.Logger) NodeState { if err := n.rpc.Dial(ctx); err != nil { - n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.State()) + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "NodeState", n.State()) return nodeStateUnreachable } @@ -312,7 +312,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Contex // verifyConn - verifies that current connection is valid: chainID matches, and it's not syncing. // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. -func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr logger.Logger) nodeState { +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr logger.Logger) NodeState { state := n.verifyChainID(ctx, lggr) if state != nodeStateAlive { return state @@ -321,12 +321,12 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr if n.nodePoolCfg.NodeIsSyncingEnabled() { isSyncing, err := n.rpc.IsSyncing(ctx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "NodeState", n.State()) return nodeStateUnreachable } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "NodeState", n.State()) return nodeStateSyncing } } diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index a98db7d60b9..05c55fe8751 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -39,11 +39,11 @@ var ( }, []string{"chainID", "nodeName"}) ) -// nodeState represents the current state of the node +// NodeState represents the current state of the node // Node is a FSM (finite state machine) -type nodeState int +type NodeState int -func (n nodeState) String() string { +func (n NodeState) String() string { switch n { case nodeStateUndialed: return "Undialed" @@ -64,18 +64,18 @@ func (n nodeState) String() string { case nodeStateSyncing: return "Syncing" default: - return fmt.Sprintf("nodeState(%d)", n) + return fmt.Sprintf("NodeState(%d)", n) } } // GoString prints a prettier state -func (n nodeState) GoString() string { - return fmt.Sprintf("nodeState%s(%d)", n.String(), n) +func (n NodeState) GoString() string { + return fmt.Sprintf("NodeState%s(%d)", n.String(), n) } const ( // nodeStateUndialed is the first state of a virgin node - nodeStateUndialed = nodeState(iota) + nodeStateUndialed = NodeState(iota) // nodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID nodeStateDialed // nodeStateInvalidChainID is after chain ID verification failed @@ -103,10 +103,10 @@ const ( ) // allNodeStates represents all possible states a node can be in -var allNodeStates []nodeState +var allNodeStates []NodeState func init() { - for s := nodeState(0); s < nodeStateLen; s++ { + for s := NodeState(0); s < nodeStateLen; s++ { allNodeStates = append(allNodeStates, s) } } @@ -114,13 +114,13 @@ func init() { // FSM methods // State allows reading the current state of the node. -func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState { +func (n *node[CHAIN_ID, HEAD, RPC]) State() NodeState { n.stateMu.RLock() defer n.stateMu.RUnlock() return n.state } -func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { +func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (NodeState, ChainInfo) { n.stateMu.RLock() defer n.stateMu.RUnlock() return n.state, ChainInfo{ @@ -133,7 +133,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, ChainInfo) { // This is low-level; care should be taken by the caller to ensure the new state is a valid transition. // State changes should always be synchronous: only one goroutine at a time should change state. // n.stateMu should not be locked for long periods of time because external clients expect a timely response from n.State() -func (n *node[CHAIN_ID, HEAD, RPC]) setState(s nodeState) { +func (n *node[CHAIN_ID, HEAD, RPC]) setState(s NodeState) { n.stateMu.Lock() defer n.stateMu.Unlock() n.state = s @@ -144,7 +144,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) setState(s nodeState) { func (n *node[CHAIN_ID, HEAD, RPC]) declareAlive() { n.transitionToAlive(func() { - n.lfcLog.Infow("RPC Node is online", "nodeState", n.state) + n.lfcLog.Infow("RPC Node is online", "NodeState", n.state) n.wg.Add(1) go n.aliveLoop() }) @@ -170,7 +170,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) { // pool consumers again func (n *node[CHAIN_ID, HEAD, RPC]) declareInSync() { n.transitionToInSync(func() { - n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state) + n.lfcLog.Infow("RPC Node is back in sync", "NodeState", n.state) n.wg.Add(1) go n.aliveLoop() }) @@ -197,7 +197,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { // clients and making it unavailable for use until back in-sync. func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { n.transitionToOutOfSync(func() { - n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node is out of sync", "NodeState", n.state) n.wg.Add(1) go n.outOfSyncLoop(isOutOfSync) }) @@ -222,7 +222,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { func (n *node[CHAIN_ID, HEAD, RPC]) declareUnreachable() { n.transitionToUnreachable(func() { - n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node is unreachable", "NodeState", n.state) n.wg.Add(1) go n.unreachableLoop() }) @@ -245,7 +245,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { fn() } -func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state nodeState) { +func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state NodeState) { if n.State() == nodeStateClosed { return } @@ -265,7 +265,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state nodeState) { func (n *node[CHAIN_ID, HEAD, RPC]) declareInvalidChainID() { n.transitionToInvalidChainID(func() { - n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node has the wrong chain ID", "NodeState", n.state) n.wg.Add(1) go n.invalidChainIDLoop() }) @@ -290,7 +290,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { func (n *node[CHAIN_ID, HEAD, RPC]) declareSyncing() { n.transitionToSyncing(func() { - n.lfcLog.Errorw("RPC Node is syncing", "nodeState", n.state) + n.lfcLog.Errorw("RPC Node is syncing", "NodeState", n.state) n.wg.Add(1) go n.syncingLoop() }) @@ -317,10 +317,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { fn() } -func transitionString(state nodeState) string { +func transitionString(state NodeState) string { return fmt.Sprintf("Total number of times node has transitioned to %s", state) } -func transitionFail(from nodeState, to nodeState) string { +func transitionFail(from NodeState, to NodeState) string { return fmt.Sprintf("cannot transition from %#v to %#v", from, to) } diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 4031fb3fa8d..b6b25f6cd53 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -38,41 +38,41 @@ func TestUnit_Node_StateTransitions(t *testing.T) { t.Run("transitionToAlive", func(t *testing.T) { const destinationState = nodeStateAlive - allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} + allowedStates := []NodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) }) t.Run("transitionToInSync", func(t *testing.T) { const destinationState = nodeStateAlive - allowedStates := []nodeState{nodeStateOutOfSync, nodeStateSyncing} + allowedStates := []NodeState{nodeStateOutOfSync, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) }) t.Run("transitionToOutOfSync", func(t *testing.T) { const destinationState = nodeStateOutOfSync - allowedStates := []nodeState{nodeStateAlive} + allowedStates := []NodeState{nodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil).Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = nodeStateUnreachable - allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} + allowedStates := []NodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = nodeStateInvalidChainID - allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} + allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = nodeStateSyncing - allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} + allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) @@ -90,7 +90,7 @@ func TestUnit_Node_StateTransitions(t *testing.T) { }) } -func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { +func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState NodeState, allowedStates ...NodeState) { node := newTestNode(t, testNodeOpts{rpc: rpc, config: testNodeConfig{nodeIsSyncingEnabled: true}}) for _, allowedState := range allowedStates { m := new(fnMock) @@ -125,7 +125,7 @@ func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition func TestNodeState_String(t *testing.T) { t.Run("Ensure all states are meaningful when converted to string", func(t *testing.T) { for _, ns := range allNodeStates { - // ensure that string representation is not nodeState(%d) + // ensure that string representation is not NodeState(%d) assert.NotContains(t, ns.String(), strconv.FormatInt(int64(ns), 10), "Expected node state to have readable name") } }) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index f0ba2a1539c..32a465378a4 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -98,11 +98,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { pollInterval := n.nodePoolCfg.PollInterval() lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) - lggr.Tracew("Alive loop starting", "nodeState", n.State()) + lggr.Tracew("Alive loop starting", "NodeState", n.State()) headsC, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) + lggr.Errorw("Initial subscribe for heads failed", "NodeState", n.State()) n.declareUnreachable() return } @@ -114,7 +114,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { var outOfSyncT *time.Ticker var outOfSyncTC <-chan time.Time if noNewHeadsTimeoutThreshold > 0 { - lggr.Debugw("Head liveness checking enabled", "nodeState", n.State()) + lggr.Debugw("Head liveness checking enabled", "NodeState", n.State()) outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) defer outOfSyncT.Stop() outOfSyncTC = outOfSyncT.C @@ -161,7 +161,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { return case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) + lggr.Tracew("Pinging RPC", "NodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) err := n.RPC().Ping(ctx) cancel() @@ -171,14 +171,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures++ } - lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "NodeState", n.State()) } else { - lggr.Debugw("Ping successful", "nodeState", n.State()) + lggr.Debugw("Ping successful", "NodeState", n.State()) promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures = 0 } if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { - lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "NodeState", n.State()) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) @@ -191,7 +191,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { _, chainInfo := n.StateAndLatest() if outOfSync, liveNodes := n.syncStatus(chainInfo.BlockNumber, chainInfo.BlockDifficulty); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", chainInfo.BlockNumber, "totalDifficulty", chainInfo.BlockDifficulty, "nodeState", n.State()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", chainInfo.BlockNumber, "totalDifficulty", chainInfo.BlockDifficulty, "NodeState", n.State()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue @@ -201,7 +201,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } case bh, open := <-headsC: if !open { - lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) + lggr.Errorw("Subscription channel unexpectedly closed", "NodeState", n.State()) n.declareUnreachable() return } @@ -209,10 +209,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Tracew("Got head", "head", bh) if bh.BlockNumber() > highestReceivedBlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "NodeState", n.State()) highestReceivedBlockNumber = bh.BlockNumber() } else { - lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "NodeState", n.State()) } if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) @@ -226,13 +226,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State()) + lggr.Errorw("Subscription was terminated", "err", err, "NodeState", n.State()) n.declareUnreachable() return case <-outOfSyncTC: // We haven't received a head on the channel for at least the // threshold amount of time, mark it broken - lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "NodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) @@ -314,7 +314,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) - lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) + lggr.Debugw("Trying to revive out-of-sync RPC node", "NodeState", n.State()) // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(n.nodeCtx, lggr) @@ -323,11 +323,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "NodeState", n.State()) ch, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { - lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "NodeState", n.State(), "err", err) n.declareUnreachable() return } @@ -339,18 +339,18 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return case head, open := <-ch: if !open { - lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State()) + lggr.Error("Subscription channel unexpectedly closed", "NodeState", n.State()) n.declareUnreachable() return } n.setLatestReceived(head.BlockNumber(), head.BlockDifficulty()) if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { // back in-sync! flip back into alive loop - lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "NodeState", n.State()) n.declareInSync() return } - lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "NodeState", n.State()) case <-time.After(zombieNodeCheckInterval(n.chainCfg.NodeNoNewHeadsThreshold())): if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 1 { @@ -360,7 +360,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err) + lggr.Errorw("Subscription was terminated", "NodeState", n.State(), "err", err) n.declareUnreachable() return } @@ -385,7 +385,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { unreachableAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) - lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) + lggr.Debugw("Trying to revive unreachable RPC node", "NodeState", n.State()) dialRetryBackoff := iutils.NewRedialBackoff() @@ -394,11 +394,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { case <-n.nodeCtx.Done(): return case <-time.After(dialRetryBackoff.Duration()): - lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) + lggr.Tracew("Trying to re-dial RPC node", "NodeState", n.State()) err := n.rpc.Dial(n.nodeCtx) if err != nil { - lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State()) + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "NodeState", n.State()) continue } @@ -410,7 +410,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { n.setState(nodeStateUnreachable) continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "NodeState", n.State()) fallthrough default: n.declareState(state) @@ -446,7 +446,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { return } - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "NodeState", n.State()) chainIDRecheckBackoff := iutils.NewRedialBackoff() @@ -460,7 +460,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { case nodeStateInvalidChainID: continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "NodeState", n.State()) fallthrough default: n.declareState(state) @@ -488,7 +488,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { syncingAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Syncing")) - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "NodeState", n.State()) // Need to redial since syncing nodes are automatically disconnected state := n.createVerifiedConn(n.nodeCtx, lggr) if state != nodeStateSyncing { @@ -503,20 +503,20 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { case <-n.nodeCtx.Done(): return case <-time.After(recheckBackoff.Duration()): - lggr.Tracew("Trying to recheck if the node is still syncing", "nodeState", n.State()) + lggr.Tracew("Trying to recheck if the node is still syncing", "NodeState", n.State()) isSyncing, err := n.rpc.IsSyncing(n.nodeCtx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "NodeState", n.State()) n.declareUnreachable() return } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "NodeState", n.State()) continue } - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "nodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "NodeState", n.State()) n.declareAlive() return } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 031bc1b86a4..b57f43767f7 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -49,7 +49,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() expectedError := errors.New("failed to subscribe to rpc") - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("UnsubscribeAllExcept", nil) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() @@ -75,10 +75,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -88,10 +87,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) + sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() - opts.rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() - opts.rpc.On("SetAliveLoopSub", sub).Once() + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() return newDialedNode(t, opts) } t.Run("Stays alive and waits for signal", func(t *testing.T) { @@ -124,19 +122,21 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() + rpc.On("UnsubscribeAllExcept", mock.Anything) + pollError := errors.New("failed to get ClientVersion") // 1. Return error several times, but below threshold - rpc.On("ClientVersion", mock.Anything).Return("", pollError).Run(func(_ mock.Arguments) { + rpc.On("Ping", mock.Anything).Return(pollError).Run(func(_ mock.Arguments) { // stays healthy while below threshold assert.Equal(t, nodeStateAlive, node.State()) - }).Times(pollFailureThreshold - 1) + }) // 2. Successful call that is expected to reset counter - rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Once() + rpc.On("Ping", mock.Anything).Return(nil).Once() // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState - rpc.On("ClientVersion", mock.Anything).Return("", pollError).Once() + rpc.On("Ping", mock.Anything).Return(pollError).Once() // 4. Once during the call, check if node is alive var ensuredAlive atomic.Bool - rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Run(func(_ mock.Arguments) { + rpc.On("Ping", mock.Anything).Return(nil).Run(func(_ mock.Arguments) { if ensuredAlive.Load() { return } @@ -144,7 +144,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateAlive, node.State()) }).Once() // redundant call to stay in alive state - rpc.On("ClientVersion", mock.Anything).Return("client_version", nil) + rpc.On("Ping", mock.Anything).Return(nil) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) @@ -168,7 +168,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { pollError := errors.New("failed to get ClientVersion") rpc.On("ClientVersion", mock.Anything).Return("", pollError) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -971,7 +971,10 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, nil) - rpc.On("SubscribeToHeads", mock.Anything).Return(nil) + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -992,6 +995,10 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -1053,6 +1060,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { // once for chainID and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + rpc.On("UnsubscribeAllExcept", nil) node.declareInvalidChainID() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { @@ -1074,6 +1082,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + rpc.On("UnsubscribeAllExcept", nil) node.declareInvalidChainID() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { @@ -1124,6 +1133,10 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -1588,6 +1601,10 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(true, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go index 1467aaec0aa..d9139a4ccaf 100644 --- a/common/client/node_selector_priority_level_test.go +++ b/common/client/node_selector_priority_level_test.go @@ -19,7 +19,7 @@ func TestPriorityLevelNodeSelector(t *testing.T) { type nodeClient RPCClient[types.ID, Head] type testNode struct { order int32 - state nodeState + state NodeState } type testCase struct { name string diff --git a/common/client/send_only_node.go b/common/client/send_only_node.go index 16fcb92a1f6..5d48bc172b9 100644 --- a/common/client/send_only_node.go +++ b/common/client/send_only_node.go @@ -36,8 +36,8 @@ type SendOnlyNode[ RPC() RPC String() string - // State returns nodeState - State() nodeState + // State returns NodeState + State() NodeState // Name is a unique identifier for this node. Name() string } @@ -51,7 +51,7 @@ type sendOnlyNode[ services.StateMachine stateMu sync.RWMutex // protects state* fields - state nodeState + state NodeState rpc RPC uri url.URL @@ -140,7 +140,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() s.setState(nodeStateAlive) - s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) + s.log.Infow("Sendonly RPC Node is online", "NodeState", s.state) } func (s *sendOnlyNode[CHAIN_ID, RPC]) Close() error { @@ -165,7 +165,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) String() string { return fmt.Sprintf("(%s)%s:%s", Secondary.String(), s.name, s.uri.Redacted()) } -func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state nodeState) (changed bool) { +func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state NodeState) (changed bool) { s.stateMu.Lock() defer s.stateMu.Unlock() if s.state == state { @@ -175,7 +175,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state nodeState) (changed bool) { return true } -func (s *sendOnlyNode[CHAIN_ID, RPC]) State() nodeState { +func (s *sendOnlyNode[CHAIN_ID, RPC]) State() NodeState { s.stateMu.RLock() defer s.stateMu.RUnlock() return s.state diff --git a/common/client/send_only_node_lifecycle.go b/common/client/send_only_node_lifecycle.go index c66d267ed42..20d54ba68cf 100644 --- a/common/client/send_only_node_lifecycle.go +++ b/common/client/send_only_node_lifecycle.go @@ -61,7 +61,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { if !ok { return } - s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) + s.log.Infow("Sendonly RPC Node is online", "NodeState", s.state) return } } diff --git a/common/client/send_only_node_test.go b/common/client/send_only_node_test.go index 79f4bfd60e3..532946da48f 100644 --- a/common/client/send_only_node_test.go +++ b/common/client/send_only_node_test.go @@ -46,7 +46,7 @@ func TestStartSendOnlyNode(t *testing.T) { client := newMockSendOnlyClient[types.ID](t) client.On("Close").Once() expectedError := errors.New("some http error") - client.On("DialHTTP").Return(expectedError).Once() + client.On("Dial", mock.Anything).Return(expectedError).Once() s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.RandomID(), client) defer func() { assert.NoError(t, s.Close()) }() @@ -61,7 +61,7 @@ func TestStartSendOnlyNode(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) client := newMockSendOnlyClient[types.ID](t) client.On("Close").Once() - client.On("DialHTTP").Return(nil).Once() + client.On("Dial", mock.Anything).Return(nil).Once() s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.NewIDFromInt(0), client) defer func() { assert.NoError(t, s.Close()) }() @@ -76,7 +76,7 @@ func TestStartSendOnlyNode(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) client := newMockSendOnlyClient[types.ID](t) client.On("Close").Once() - client.On("DialHTTP").Return(nil) + client.On("Dial", mock.Anything).Return(nil) expectedError := errors.New("failed to get chain ID") chainID := types.RandomID() const failuresCount = 2 @@ -100,7 +100,7 @@ func TestStartSendOnlyNode(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) client := newMockSendOnlyClient[types.ID](t) client.On("Close").Once() - client.On("DialHTTP").Return(nil).Once() + client.On("Dial", mock.Anything).Return(nil).Once() configuredChainID := types.NewIDFromInt(11) rpcChainID := types.NewIDFromInt(20) const failuresCount = 2 @@ -123,7 +123,7 @@ func TestStartSendOnlyNode(t *testing.T) { lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) client := newMockSendOnlyClient[types.ID](t) client.On("Close").Once() - client.On("DialHTTP").Return(nil).Once() + client.On("Dial", mock.Anything).Return(nil).Once() configuredChainID := types.RandomID() client.On("ChainID", mock.Anything).Return(configuredChainID, nil) s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index af996a646b9..612e5ba7ef0 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -264,11 +264,7 @@ func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { } func (c *chainClient) NodeStates() map[string]string { - nodeStates := make(map[string]string) - for k, v := range c.multiNode.NodeStates() { - nodeStates[k] = v.String() - } - return nodeStates + return c.multiNode.NodeStates() } func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) { From c399391d6bac53d237fcc672bdfea1fc8114b2dc Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 21 May 2024 12:03:54 -0400 Subject: [PATCH 07/58] Fix mocks --- common/client/node_lifecycle.go | 7 ++- common/client/node_lifecycle_test.go | 91 ++++++++++++++++------------ core/chains/evm/client/rpc_client.go | 7 ++- 3 files changed, 65 insertions(+), 40 deletions(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 32a465378a4..5de74708ec2 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -244,7 +244,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < highestReceivedBlockNumber }) return - case latestFinalized := <-finalizedHeadCh: + case latestFinalized, open := <-finalizedHeadCh: + if !open { + lggr.Errorw("Subscription channel unexpectedly closed", "NodeState", n.State()) + n.declareUnreachable() + return + } if !latestFinalized.IsValid() { lggr.Warn("Latest finalized block is not valid") continue diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index b57f43767f7..243de6cd434 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -88,8 +88,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + sub.On("Unsubscribe") + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) return newDialedNode(t, opts) } t.Run("Stays alive and waits for signal", func(t *testing.T) { @@ -123,13 +123,15 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(node.chainID, nil) pollError := errors.New("failed to get ClientVersion") // 1. Return error several times, but below threshold rpc.On("Ping", mock.Anything).Return(pollError).Run(func(_ mock.Arguments) { // stays healthy while below threshold assert.Equal(t, nodeStateAlive, node.State()) - }) + }).Times(pollFailureThreshold) // 2. Successful call that is expected to reset counter rpc.On("Ping", mock.Anything).Return(nil).Once() // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState @@ -147,7 +149,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Ping", mock.Anything).Return(nil) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) - tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) + tests.AssertLogCountEventually(t, observedLogs, "Ping successful", 2) assert.True(t, ensuredAlive.Load(), "expected to ensure that node was alive") }) @@ -166,7 +168,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() pollError := errors.New("failed to get ClientVersion") - rpc.On("ClientVersion", mock.Anything).Return("", pollError) + rpc.On("Ping", mock.Anything).Return(pollError) // disconnects all on transfer to unreachable rpc.On("UnsubscribeAllExcept", mock.Anything).Once() // might be called in unreachable loop @@ -195,7 +197,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return 1, 20, big.NewInt(10) } pollError := errors.New("failed to get ClientVersion") - rpc.On("ClientVersion", mock.Anything).Return("", pollError) + rpc.On("Ping", mock.Anything).Return(pollError) node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailureThreshold)) assert.Equal(t, nodeStateAlive, node.State()) @@ -219,13 +221,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { return 10, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) } - rpc.On("ClientVersion", mock.Anything).Return("", nil) + rpc.On("Ping", mock.Anything).Return(nil) // tries to redial in outOfSync rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", nil).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { require.Equal(t, nodeStateOutOfSync, node.State()) @@ -252,7 +254,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { return 1, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) } - rpc.On("ClientVersion", mock.Anything).Return("", nil) + rpc.On("Ping", mock.Anything).Return(nil) node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)) }) @@ -274,9 +276,9 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { return 1, node.stateLatestBlockNumber + 100, big.NewInt(10) } - rpc.On("ClientVersion", mock.Anything).Return("", nil) + rpc.On("Ping", mock.Anything).Return(nil) node.declareAlive() - tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) + tests.AssertLogCountEventually(t, observedLogs, "Ping successful", 2) assert.Equal(t, nodeStateAlive, node.State()) }) @@ -296,7 +298,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", nil).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -333,11 +335,10 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newDialedNode(t, testNodeOpts{ lggr: lggr, @@ -349,7 +350,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", nil).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -385,16 +386,15 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) + sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() const blockNumber = 1000 const finalityDepth = 10 const expectedBlock = 990 - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: blockNumber - 1}, head{BlockNumber: blockNumber}, head{BlockNumber: blockNumber - 1}) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() name := "node-" + rand.Str(5) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{}, @@ -418,10 +418,15 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + sub.On("Err").Return(nil) + sub.On("Unsubscribe") + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + ch := make(chan Head) + head := newMockHead(t) + head.On("IsValid").Return(false) + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + ch <- head + }).Return((<-chan Head)(ch), sub, nil).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -440,14 +445,17 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) + sub := mocks.NewSubscription(t) + sub.On("Err").Return(nil) + sub.On("Unsubscribe") + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() + ch := make(chan Head, 1) head := newMockHead(t) head.On("IsValid").Return(false) - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head, nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { + ch <- head + }).Return((<-chan Head)(ch), sub, nil).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -471,15 +479,22 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock - 1}.ToMockHead(t), nil).Once() rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock}.ToMockHead(t), nil) sub := mocks.NewSubscription(t) - sub.On("Err").Return((<-chan error)(nil)) - sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + sub.On("Err").Return(nil) + sub.On("Unsubscribe") + ch := make(chan Head, 1) + // TODO: Fix this test + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Run(func(args mock.Arguments) { + // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting + // the metric + //go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) + }).Return((<-chan Head)(ch), sub, nil).Once() + + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting // the metric go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) - }).Return(sub, nil).Once() - rpc.On("SetAliveLoopSub", sub).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() + name := "node-" + rand.Str(5) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 2be0c39b6d8..02ac7978cea 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -479,7 +479,12 @@ func (r *RpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header } func (r *RpcClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { - return r.blockByNumber(ctx, rpc.FinalizedBlockNumber.String()) + head, err := r.blockByNumber(ctx, rpc.FinalizedBlockNumber.String()) + if err != nil { + r.rpcLog.Warnw("Failed to fetch latest finalized block", "err", err) + return nil, err + } + return head, nil } func (r *RpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) { From b2b5926451c78b87f936f3ebb73bb1c47ff4bffb Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 22 May 2024 10:08:58 -0400 Subject: [PATCH 08/58] Update node_lifecycle_test.go --- common/client/node_lifecycle_test.go | 60 ++++++++++++---------------- 1 file changed, 26 insertions(+), 34 deletions(-) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 243de6cd434..cc511834b9e 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -378,7 +378,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertEventually(t, func() bool { state, chainInfo := node.StateAndLatest() - // TODO: nil pointer dereference... block difficulty is nil? return state == nodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber }) }) @@ -413,20 +412,16 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return float64(expectedBlock) == m.Gauge.GetValue() }) }) - t.Run("Logs warning if failed to get finalized block", func(t *testing.T) { + t.Run("Logs warning if failed to subscrive to latest finalized blocks", func(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("LatestFinalizedBlock", mock.Anything).Return(newMockHead(t), errors.New("failed to get finalized block")) sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) + sub.On("Err").Return(nil).Maybe() sub.On("Unsubscribe") rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() - ch := make(chan Head) - head := newMockHead(t) - head.On("IsValid").Return(false) - rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { - ch <- head - }).Return((<-chan Head)(ch), sub, nil).Once() + expectedError := errors.New("failed to subscribe to finalized heads") + rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -440,7 +435,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() node.declareAlive() - tests.AssertLogEventually(t, observedLogs, "Failed to fetch latest finalized block") + tests.AssertLogEventually(t, observedLogs, "Failed to subscribe to finalized heads") }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() @@ -476,23 +471,20 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock - 1}.ToMockHead(t), nil).Once() - rpc.On("LatestFinalizedBlock", mock.Anything).Return(head{BlockNumber: expectedBlock}.ToMockHead(t), nil) sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) sub.On("Unsubscribe") ch := make(chan Head, 1) - // TODO: Fix this test + // TODO: Should the head subscription even update the finalized block metric? + // TODO: Or only the finalized head subscription?? rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Run(func(args mock.Arguments) { // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting // the metric - //go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) + go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { - // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting - // the metric - go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) + go writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}) }).Return((<-chan Head)(ch), sub, nil).Once() name := "node-" + rand.Str(5) @@ -515,6 +507,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { require.NoError(t, err) var m = &prom.Metric{} require.NoError(t, metric.Write(m)) + fmt.Println("Val:", m.Gauge.GetValue()) return float64(expectedBlock) == m.Gauge.GetValue() }) }) @@ -547,9 +540,9 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() aliveSubscription := mocks.NewSubscription(t) - aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() + aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(aliveSubscription, nil).Maybe() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() rpc.On("UnsubscribeAllExcept", nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() } @@ -596,10 +589,11 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, heads...) - }).Return(outOfSyncSubscription, nil).Once() + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(func(num int64, td *big.Int) bool { @@ -720,7 +714,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() expectedError := errors.New("failed to subscribe") - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError) + rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { @@ -747,7 +741,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { errChan <- errors.New("subscription was terminate") sub.On("Err").Return((<-chan error)(errChan)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") @@ -773,10 +767,10 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { close(ch) - }).Return(sub, nil).Once() + }).Return((<-chan Head)(ch), sub, nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") @@ -804,11 +798,10 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() const highestBlock = 1000 - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { - ch := args.Get(1).(chan<- Head) + ch := make(chan Head) + rpc.On("SubscribeToHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}) - }).Return(outOfSyncSubscription, nil).Once() - + }).Return((<-chan Head)(ch), outOfSyncSubscription, nil).Once() setupRPCForAliveLoop(t, rpc) node.declareOutOfSync(func(num int64, td *big.Int) bool { @@ -844,8 +837,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) outOfSyncSubscription.On("Unsubscribe").Once() - rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(outOfSyncSubscription, nil).Once() - + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), outOfSyncSubscription, nil).Once() setupRPCForAliveLoop(t, rpc) node.declareOutOfSync(stubIsOutOfSync) From 029c82bdeb09b077d1b450c482ae7c65c6782e4b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 22 May 2024 12:10:32 -0400 Subject: [PATCH 09/58] Fix all client tests --- common/client/node_lifecycle_test.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index cc511834b9e..2abc8da2f6d 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -475,14 +475,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub.On("Err").Return(nil) sub.On("Unsubscribe") ch := make(chan Head, 1) - // TODO: Should the head subscription even update the finalized block metric? - // TODO: Or only the finalized head subscription?? - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Run(func(args mock.Arguments) { - // ensure that "calculated" finalized head is larger than actual, to ensure we are correctly setting - // the metric - go writeHeads(t, ch, head{BlockNumber: expectedBlock*2 + finalityDepth}) - }).Return((<-chan Head)(ch), sub, nil).Once() - + // I think it has to in case finality tag doesn't exist? + rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}) }).Return((<-chan Head)(ch), sub, nil).Once() From bd14d519a7c15cecf5e3dc62710e340fa3c78927 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 24 May 2024 15:46:00 -0400 Subject: [PATCH 10/58] Fix tests --- common/client/models.go | 6 +- common/client/multi_node.go | 35 +-- common/client/multi_node_test.go | 10 +- common/client/node_lifecycle.go | 5 +- core/chains/evm/client/chain_client.go | 39 ++-- core/chains/evm/client/chain_client_test.go | 20 +- core/chains/evm/client/client.go | 1 + core/chains/evm/client/client_test.go | 25 ++- core/chains/evm/client/evm_client.go | 6 +- core/chains/evm/client/helpers_test.go | 14 +- .../rpc_client.go => mock_evm_rpc_client.go} | 211 +++++++++++++----- core/chains/evm/client/null_client_test.go | 4 +- core/chains/evm/client/rpc_client.go | 46 +++- .../evm/client/simulated_backend_client.go | 8 +- 14 files changed, 295 insertions(+), 135 deletions(-) rename core/chains/evm/client/{mocks/rpc_client.go => mock_evm_rpc_client.go} (76%) diff --git a/common/client/models.go b/common/client/models.go index fd0c3915940..cef0bee0573 100644 --- a/common/client/models.go +++ b/common/client/models.go @@ -23,10 +23,12 @@ const ( ) // sendTxSevereErrors - error codes which signal that transaction would never be accepted in its current form by the node -var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, ExceedsMaxFee, FeeOutOfValidRange, Unknown} +// TODO: Implement Transaction Sending +//var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, ExceedsMaxFee, FeeOutOfValidRange, Unknown} // sendTxSuccessfulCodes - error codes which signal that transaction was accepted by the node -var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} +// TODO: Implement Transaction Sending +//var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} func (c SendTxReturnCode) String() string { switch c { diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 81e2fb94d2e..c0826874853 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -3,11 +3,12 @@ package client import ( "context" "fmt" - "github.com/smartcontractkit/chainlink/v2/common/config" "math/big" "sync" "time" + "github.com/smartcontractkit/chainlink/v2/common/config" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -40,6 +41,7 @@ type MultiNode[ HEAD types.Head[BLOCK_HASH], RPC_CLIENT any, ] interface { + Dial(ctx context.Context) error // SelectRPC - returns the best healthy RPCClient SelectRPC() (RPC_CLIENT, error) // DoAll - calls `do` sequentially on all healthy RPCClients. @@ -59,18 +61,18 @@ type multiNode[ RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] struct { services.StateMachine - primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] - sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] - chainID CHAIN_ID - lggr logger.SugaredLogger - selectionMode string - noNewHeadsThreshold time.Duration - nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] - leaseDuration time.Duration - leaseTicker *time.Ticker - chainType config.ChainType - chainFamily string - reportInterval time.Duration + primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + chainID CHAIN_ID + lggr logger.SugaredLogger + selectionMode string + // noNewHeadsThreshold time.Duration TODO: Move this? + nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] + leaseDuration time.Duration + leaseTicker *time.Ticker + chainType config.ChainType + chainFamily string + reportInterval time.Duration activeMu sync.RWMutex activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT] @@ -145,7 +147,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co } } if callsCompleted == 0 { - return ErroringNodeError + return fmt.Errorf("no calls were completed") } return nil } @@ -165,7 +167,6 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[str // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -// TODO: Remove Dial() from MultiNode? Who will start the nodes? func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { @@ -253,8 +254,8 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N if c.activeNode == nil { c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) - errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) - c.SvcErrBuffer.Append(errmsg) + //errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) + c.SvcErrBuffer.Append(ErroringNodeError) err = ErroringNodeError } diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 6c8d1f33f0b..9be3e772e48 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -1,21 +1,21 @@ package client import ( - "errors" "fmt" "math/big" "math/rand" "testing" "time" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap" - "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/v2/common/types" ) @@ -47,12 +47,6 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { } } -func newMultiNodeRPCClient(t *testing.T) *mockRPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], any] { - return newMockRPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, - types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], any](t) -} - func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { return newNodeWithState(t, chainID, nodeStateAlive) } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 5de74708ec2..b6153506a26 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -3,11 +3,12 @@ package client import ( "context" "fmt" - "github.com/smartcontractkit/chainlink/v2/common/types" "math" "math/big" "time" + "github.com/smartcontractkit/chainlink/v2/common/types" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -69,7 +70,7 @@ const ( msgDegradedState = "Chainlink is now operating in a degraded state and urgent action is required to resolve the issue" ) -const rpcSubscriptionMethodNewHeads = "newHeads" +// const rpcSubscriptionMethodNewHeads = "newHeads" // Node is a FSM // Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 612e5ba7ef0..c7b0763153a 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -29,7 +29,7 @@ type chainClient struct { *big.Int, common.Hash, *evmtypes.Head, - *RpcClient, + EvmRpcClient, ] logger logger.SugaredLogger chainType config.ChainType @@ -41,8 +41,8 @@ func NewChainClient( selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, - nodes []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient], - sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient], + nodes []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient], + sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient], chainID *big.Int, chainType config.ChainType, clientErrors evmconfig.ClientErrors, @@ -93,19 +93,19 @@ func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchE return selectionErr } - doFunc := func(ctx context.Context, rpc *RpcClient, isSendOnly bool) bool { + doFunc := func(ctx context.Context, rpc EvmRpcClient, isSendOnly bool) bool { if rpc == main { return true } // Parallel call made to all other nodes with ignored return value wg.Add(1) - go func(rpc *RpcClient) { + go func(rpc EvmRpcClient) { defer wg.Done() err := rpc.BatchCallContext(ctx, b) if err != nil { - rpc.rpcLog.Debugw("Secondary node BatchCallContext failed", "err", err) + c.logger.Debugw("Secondary node BatchCallContext failed", "err", err) } else { - rpc.rpcLog.Trace("Secondary node BatchCallContext success") + c.logger.Debug("Secondary node BatchCallContext success") } }(rpc) return true @@ -165,7 +165,8 @@ func (c *chainClient) ChainID() (*big.Int, error) { if err != nil { return nil, err } - return rpc.chainID, nil + // TODO: Progagate context + return rpc.ChainID(context.Background()) } func (c *chainClient) Close() { @@ -185,15 +186,16 @@ func (c *chainClient) ConfiguredChainID() *big.Int { if err != nil { return nil } - return rpc.chainID + // TODO: propagate context + chainId, err := rpc.ChainID(context.Background()) + if err != nil { + return nil + } + return chainId } func (c *chainClient) Dial(ctx context.Context) error { - rpc, err := c.multiNode.SelectRPC() - if err != nil { - return err - } - return rpc.Dial(ctx) + return c.multiNode.Dial(ctx) } func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { @@ -322,7 +324,14 @@ func (c *chainClient) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.He } ch, sub, err := rpc.SubscribeToHeads(ctx) - forwardCh, csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) + if err != nil { + return nil, nil, err + } + chainID, err := c.ChainID() + if err != nil { + return nil, nil, err + } + forwardCh, csf := newChainIDSubForwarder(chainID, ch) err = csf.start(sub, err) if err != nil { return nil, nil, err diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 13fddf67622..4e213525b0c 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -6,6 +6,9 @@ import ( "testing" "time" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" @@ -13,19 +16,20 @@ import ( "github.com/stretchr/testify/require" commonclient "github.com/smartcontractkit/chainlink/v2/common/client" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" ) -func newMockRpc(t *testing.T) *mocks.RPCClient { - mockRpc := mocks.NewRPCClient(t) - mockRpc.On("Dial", mock.Anything).Return(nil).Once() - mockRpc.On("Close").Return(nil).Once() - mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() +func newMockRpc(t *testing.T) *client.MockEvmRpcClient { + mockRpc := client.NewMockEvmRpcClient(t) + mockRpc.On("Dial", mock.Anything).Return(nil).Maybe() + mockRpc.On("Close").Return(nil).Maybe() + mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Maybe() // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes mockRpc.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() + sub := client.NewMockSubscription() + mockRpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan *evmtypes.Head), sub, nil).Maybe() + mockRpc.On("Unsubscribe", mock.Anything).Return(nil).Maybe() return mockRpc } @@ -56,7 +60,7 @@ func TestChainClient_BatchCallContext(t *testing.T) { elem := &reqs[i] elem.Error = rpcError } - }).Return(nil).Once() + }).Return(nil).Maybe() client := client.NewChainClientWithMockedRpc(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID, mockRpc) err := client.Dial(ctx) diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index c802278cb37..36e782dbdfc 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -337,6 +337,7 @@ func (client *client) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.He forwardCh, csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) err := csf.start(client.pool.EthSubscribe(ctx, ch, "newHeads")) if err != nil { + fmt.Println("HEREEE!!") return nil, nil, err } return forwardCh, csf, nil diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index 62acf146e48..0438ce5e1ec 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -5,10 +5,8 @@ import ( "encoding/json" "fmt" "math/big" - "net/http/httptest" "net/url" "strings" - "sync/atomic" "testing" "time" @@ -16,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" pkgerrors "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -446,6 +443,7 @@ func TestEthClient_SendTransaction_NoSecondaryURL(t *testing.T) { } } +/* TODO: Imlement Transaction Sender func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) { t.Parallel() @@ -489,6 +487,7 @@ func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) { // synchronization. We have to rely on timing instead. require.Eventually(t, func() bool { return service.sentCount.Load() == int32(len(clients)*2) }, testutils.WaitTimeout(t), 500*time.Millisecond) } +*/ func TestEthClient_SendTransactionReturnCode(t *testing.T) { t.Parallel() @@ -748,6 +747,7 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) { }) } +/* TODO: Implement Transaction Sender type sendTxService struct { chainID *big.Int sentCount atomic.Int32 @@ -761,6 +761,7 @@ func (x *sendTxService) SendRawTransaction(ctx context.Context, signRawTx hexuti x.sentCount.Add(1) return nil } +*/ func TestEthClient_SubscribeNewHead(t *testing.T) { t.Parallel() @@ -775,6 +776,7 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { return } assert.Equal(t, "eth_subscribe", method) + // TODO: Why is this failing on params.IsArray() sometimes? if assert.True(t, params.IsArray()) && assert.Equal(t, "newHeads", params.Array()[0].String()) { resp.Result = `"0x00"` resp.Notify = headResult @@ -787,8 +789,7 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { err := ethClient.Dial(testutils.Context(t)) require.NoError(t, err) - headCh := make(chan *evmtypes.Head) - sub, err := ethClient.SubscribeNewHead(ctx, headCh) + headCh, sub, err := ethClient.SubscribeNewHead(ctx) require.NoError(t, err) select { @@ -797,6 +798,7 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { case <-ctx.Done(): t.Fatal(ctx.Err()) case h := <-headCh: + fmt.Println("HEAD!!!") require.NotNil(t, h.EVMChainID) require.Zero(t, chainId.Cmp(h.EVMChainID.ToInt())) } @@ -833,16 +835,15 @@ func TestEthClient_ErroringClient(t *testing.T) { require.Equal(t, err, commonclient.ErroringNodeError) // TODO-1663: test actual ChainID() call once client.go is deprecated. - id, err := erroringClient.ChainID() - require.Equal(t, id, testutils.FixtureChainID) - //require.Equal(t, err, commonclient.ErroringNodeError) - require.Equal(t, err, nil) + _, err = erroringClient.ChainID() + require.Equal(t, err, commonclient.ErroringNodeError) _, err = erroringClient.CodeAt(ctx, common.Address{}, nil) require.Equal(t, err, commonclient.ErroringNodeError) - id = erroringClient.ConfiguredChainID() - require.Equal(t, id, testutils.FixtureChainID) + id := erroringClient.ConfiguredChainID() + var expected *big.Int + require.Equal(t, id, expected) err = erroringClient.Dial(ctx) require.ErrorContains(t, err, "no available nodes for chain") @@ -890,7 +891,7 @@ func TestEthClient_ErroringClient(t *testing.T) { _, err = erroringClient.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, nil) require.Equal(t, err, commonclient.ErroringNodeError) - _, err = erroringClient.SubscribeNewHead(ctx, nil) + _, _, err = erroringClient.SubscribeNewHead(ctx) require.Equal(t, err, commonclient.ErroringNodeError) _, err = erroringClient.SuggestGasPrice(ctx) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index e6483c5773b..763eafef4a9 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -14,12 +14,12 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node) Client { var empty url.URL - var primaries []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient] - var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] + var primaries []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient] for i, node := range nodes { rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, commonclient.Secondary) - newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient](cfg, chainCfg, + newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient](cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 5b438b0d3cd..0c1dfef5a92 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -169,18 +169,18 @@ func NewChainClientWithTestNode( } rpc := NewRPCClient(nodePoolCfg, lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) - n := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, *RpcClient]{n} + primaries := []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient]{n} - var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient] for i, u := range sendonlyRPCURLs { if u.Scheme != "http" && u.Scheme != "https" { return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL rpc := NewRPCClient(nodePoolCfg, lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) - s := commonclient.NewSendOnlyNode[*big.Int, *RpcClient]( + s := commonclient.NewSendOnlyNode[*big.Int, EvmRpcClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) } @@ -214,7 +214,7 @@ func NewChainClientWithMockedRpc( leaseDuration time.Duration, noNewHeadsThreshold time.Duration, chainID *big.Int, - rpc *commonclient.RPCClient[*big.Int, *evmtypes.Head], + rpc EvmRpcClient, ) Client { lggr := logger.Test(t) @@ -226,9 +226,9 @@ func NewChainClientWithMockedRpc( } parsed, _ := url.ParseRequestURI("ws://test") - n := commonclient.NewNode[*big.Int, *evmtypes.Head, *clientMocks.MockRPCClient[*big.Int, *evmtypes.Head]]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient]( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, *clientMocks.MockRPCClient[*big.Int, *evmtypes.Head]]{n} + primaries := []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient]{n} clientErrors := NewTestClientErrors() c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaries, nil, chainID, chainType, &clientErrors) t.Cleanup(c.Close) diff --git a/core/chains/evm/client/mocks/rpc_client.go b/core/chains/evm/client/mock_evm_rpc_client.go similarity index 76% rename from core/chains/evm/client/mocks/rpc_client.go rename to core/chains/evm/client/mock_evm_rpc_client.go index 980a215ccfe..3ca56fec5b5 100644 --- a/core/chains/evm/client/mocks/rpc_client.go +++ b/core/chains/evm/client/mock_evm_rpc_client.go @@ -1,6 +1,6 @@ // Code generated by mockery v2.42.2. DO NOT EDIT. -package mocks +package client import ( big "math/big" @@ -26,13 +26,13 @@ import ( types "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) -// RPCClient is an autogenerated mock type for the RPCClient type -type RPCClient struct { +// MockEvmRpcClient is an autogenerated mock type for the EvmRpcClient type +type MockEvmRpcClient struct { mock.Mock } // BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *RPCClient) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { +func (_m *MockEvmRpcClient) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { ret := _m.Called(ctx, accountAddress, blockNumber) if len(ret) == 0 { @@ -62,7 +62,7 @@ func (_m *RPCClient) BalanceAt(ctx context.Context, accountAddress common.Addres } // BatchCallContext provides a mock function with given fields: ctx, b -func (_m *RPCClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { +func (_m *MockEvmRpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { ret := _m.Called(ctx, b) if len(ret) == 0 { @@ -80,7 +80,7 @@ func (_m *RPCClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) er } // BlockByHash provides a mock function with given fields: ctx, hash -func (_m *RPCClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { +func (_m *MockEvmRpcClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { ret := _m.Called(ctx, hash) if len(ret) == 0 { @@ -110,7 +110,7 @@ func (_m *RPCClient) BlockByHash(ctx context.Context, hash common.Hash) (*types. } // BlockByHashGeth provides a mock function with given fields: ctx, hash -func (_m *RPCClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { +func (_m *MockEvmRpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { ret := _m.Called(ctx, hash) if len(ret) == 0 { @@ -140,7 +140,7 @@ func (_m *RPCClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (*co } // BlockByNumber provides a mock function with given fields: ctx, number -func (_m *RPCClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { +func (_m *MockEvmRpcClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { ret := _m.Called(ctx, number) if len(ret) == 0 { @@ -170,7 +170,7 @@ func (_m *RPCClient) BlockByNumber(ctx context.Context, number *big.Int) (*types } // BlockByNumberGeth provides a mock function with given fields: ctx, number -func (_m *RPCClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { +func (_m *MockEvmRpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { ret := _m.Called(ctx, number) if len(ret) == 0 { @@ -200,7 +200,7 @@ func (_m *RPCClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (*c } // CallContext provides a mock function with given fields: ctx, result, method, args -func (_m *RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { +func (_m *MockEvmRpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { var _ca []interface{} _ca = append(_ca, ctx, result, method) _ca = append(_ca, args...) @@ -221,7 +221,7 @@ func (_m *RPCClient) CallContext(ctx context.Context, result interface{}, method } // CallContract provides a mock function with given fields: ctx, msg, blockNumber -func (_m *RPCClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { +func (_m *MockEvmRpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, msg, blockNumber) if len(ret) == 0 { @@ -251,7 +251,7 @@ func (_m *RPCClient) CallContract(ctx context.Context, msg interface{}, blockNum } // ChainID provides a mock function with given fields: ctx -func (_m *RPCClient) ChainID(ctx context.Context) (*big.Int, error) { +func (_m *MockEvmRpcClient) ChainID(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -281,7 +281,7 @@ func (_m *RPCClient) ChainID(ctx context.Context) (*big.Int, error) { } // ClientVersion provides a mock function with given fields: _a0 -func (_m *RPCClient) ClientVersion(_a0 context.Context) (string, error) { +func (_m *MockEvmRpcClient) ClientVersion(_a0 context.Context) (string, error) { ret := _m.Called(_a0) if len(ret) == 0 { @@ -309,12 +309,12 @@ func (_m *RPCClient) ClientVersion(_a0 context.Context) (string, error) { } // Close provides a mock function with given fields: -func (_m *RPCClient) Close() { +func (_m *MockEvmRpcClient) Close() { _m.Called() } // CodeAt provides a mock function with given fields: ctx, account, blockNumber -func (_m *RPCClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { +func (_m *MockEvmRpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, account, blockNumber) if len(ret) == 0 { @@ -344,7 +344,7 @@ func (_m *RPCClient) CodeAt(ctx context.Context, account common.Address, blockNu } // Dial provides a mock function with given fields: ctx -func (_m *RPCClient) Dial(ctx context.Context) error { +func (_m *MockEvmRpcClient) Dial(ctx context.Context) error { ret := _m.Called(ctx) if len(ret) == 0 { @@ -362,7 +362,7 @@ func (_m *RPCClient) Dial(ctx context.Context) error { } // DialHTTP provides a mock function with given fields: -func (_m *RPCClient) DialHTTP() error { +func (_m *MockEvmRpcClient) DialHTTP() error { ret := _m.Called() if len(ret) == 0 { @@ -380,12 +380,12 @@ func (_m *RPCClient) DialHTTP() error { } // DisconnectAll provides a mock function with given fields: -func (_m *RPCClient) DisconnectAll() { +func (_m *MockEvmRpcClient) DisconnectAll() { _m.Called() } // EstimateGas provides a mock function with given fields: ctx, call -func (_m *RPCClient) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { +func (_m *MockEvmRpcClient) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { ret := _m.Called(ctx, call) if len(ret) == 0 { @@ -413,7 +413,7 @@ func (_m *RPCClient) EstimateGas(ctx context.Context, call interface{}) (uint64, } // FilterEvents provides a mock function with given fields: ctx, query -func (_m *RPCClient) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { +func (_m *MockEvmRpcClient) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { ret := _m.Called(ctx, query) if len(ret) == 0 { @@ -443,7 +443,7 @@ func (_m *RPCClient) FilterEvents(ctx context.Context, query ethereum.FilterQuer } // HeaderByHash provides a mock function with given fields: ctx, h -func (_m *RPCClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { +func (_m *MockEvmRpcClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { ret := _m.Called(ctx, h) if len(ret) == 0 { @@ -473,7 +473,7 @@ func (_m *RPCClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretype } // HeaderByNumber provides a mock function with given fields: ctx, n -func (_m *RPCClient) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { +func (_m *MockEvmRpcClient) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { ret := _m.Called(ctx, n) if len(ret) == 0 { @@ -503,7 +503,7 @@ func (_m *RPCClient) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes } // IsSyncing provides a mock function with given fields: ctx -func (_m *RPCClient) IsSyncing(ctx context.Context) (bool, error) { +func (_m *MockEvmRpcClient) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -531,7 +531,7 @@ func (_m *RPCClient) IsSyncing(ctx context.Context) (bool, error) { } // LINKBalance provides a mock function with given fields: ctx, accountAddress, linkAddress -func (_m *RPCClient) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { +func (_m *MockEvmRpcClient) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { ret := _m.Called(ctx, accountAddress, linkAddress) if len(ret) == 0 { @@ -561,7 +561,7 @@ func (_m *RPCClient) LINKBalance(ctx context.Context, accountAddress common.Addr } // LatestBlockHeight provides a mock function with given fields: _a0 -func (_m *RPCClient) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { +func (_m *MockEvmRpcClient) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { ret := _m.Called(_a0) if len(ret) == 0 { @@ -591,7 +591,7 @@ func (_m *RPCClient) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { } // LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *RPCClient) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { +func (_m *MockEvmRpcClient) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -621,7 +621,7 @@ func (_m *RPCClient) LatestFinalizedBlock(ctx context.Context) (*types.Head, err } // PendingCallContract provides a mock function with given fields: ctx, msg -func (_m *RPCClient) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { +func (_m *MockEvmRpcClient) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { ret := _m.Called(ctx, msg) if len(ret) == 0 { @@ -651,7 +651,7 @@ func (_m *RPCClient) PendingCallContract(ctx context.Context, msg interface{}) ( } // PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *RPCClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { +func (_m *MockEvmRpcClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { ret := _m.Called(ctx, account) if len(ret) == 0 { @@ -681,7 +681,7 @@ func (_m *RPCClient) PendingCodeAt(ctx context.Context, account common.Address) } // PendingSequenceAt provides a mock function with given fields: ctx, addr -func (_m *RPCClient) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { +func (_m *MockEvmRpcClient) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { ret := _m.Called(ctx, addr) if len(ret) == 0 { @@ -708,8 +708,26 @@ func (_m *RPCClient) PendingSequenceAt(ctx context.Context, addr common.Address) return r0, r1 } +// Ping provides a mock function with given fields: _a0 +func (_m *MockEvmRpcClient) Ping(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Ping") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress -func (_m *RPCClient) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { +func (_m *MockEvmRpcClient) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) if len(ret) == 0 { @@ -737,7 +755,7 @@ func (_m *RPCClient) SendEmptyTransaction(ctx context.Context, newTxAttempt func } // SendTransaction provides a mock function with given fields: ctx, tx -func (_m *RPCClient) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { +func (_m *MockEvmRpcClient) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { ret := _m.Called(ctx, tx) if len(ret) == 0 { @@ -755,7 +773,7 @@ func (_m *RPCClient) SendTransaction(ctx context.Context, tx *coretypes.Transact } // SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *RPCClient) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { +func (_m *MockEvmRpcClient) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { ret := _m.Called(ctx, accountAddress, blockNumber) if len(ret) == 0 { @@ -783,12 +801,12 @@ func (_m *RPCClient) SequenceAt(ctx context.Context, accountAddress common.Addre } // SetAliveLoopSub provides a mock function with given fields: _a0 -func (_m *RPCClient) SetAliveLoopSub(_a0 commontypes.Subscription) { +func (_m *MockEvmRpcClient) SetAliveLoopSub(_a0 commontypes.Subscription) { _m.Called(_a0) } // SimulateTransaction provides a mock function with given fields: ctx, tx -func (_m *RPCClient) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { +func (_m *MockEvmRpcClient) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { ret := _m.Called(ctx, tx) if len(ret) == 0 { @@ -806,7 +824,7 @@ func (_m *RPCClient) SimulateTransaction(ctx context.Context, tx *coretypes.Tran } // Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *RPCClient) Subscribe(ctx context.Context, channel chan<- *types.Head, args ...interface{}) (commontypes.Subscription, error) { +func (_m *MockEvmRpcClient) Subscribe(ctx context.Context, channel chan<- *types.Head, args ...interface{}) (commontypes.Subscription, error) { var _ca []interface{} _ca = append(_ca, ctx, channel) _ca = append(_ca, args...) @@ -839,7 +857,7 @@ func (_m *RPCClient) Subscribe(ctx context.Context, channel chan<- *types.Head, } // SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *RPCClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { +func (_m *MockEvmRpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { ret := _m.Called(ctx, q, ch) if len(ret) == 0 { @@ -868,8 +886,86 @@ func (_m *RPCClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQ return r0, r1 } +// SubscribeToFinalizedHeads provides a mock function with given fields: ctx +func (_m *MockEvmRpcClient) SubscribeToFinalizedHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToFinalizedHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeToHeads provides a mock function with given fields: ctx +func (_m *MockEvmRpcClient) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeToHeads") + } + + var r0 <-chan *types.Head + var r1 commontypes.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // SubscribersCount provides a mock function with given fields: -func (_m *RPCClient) SubscribersCount() int32 { +func (_m *MockEvmRpcClient) SubscribersCount() int32 { ret := _m.Called() if len(ret) == 0 { @@ -887,7 +983,7 @@ func (_m *RPCClient) SubscribersCount() int32 { } // SuggestGasPrice provides a mock function with given fields: ctx -func (_m *RPCClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { +func (_m *MockEvmRpcClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -917,7 +1013,7 @@ func (_m *RPCClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { } // SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *RPCClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { +func (_m *MockEvmRpcClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -947,7 +1043,7 @@ func (_m *RPCClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { } // TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress -func (_m *RPCClient) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { +func (_m *MockEvmRpcClient) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { ret := _m.Called(ctx, accountAddress, tokenAddress) if len(ret) == 0 { @@ -977,7 +1073,7 @@ func (_m *RPCClient) TokenBalance(ctx context.Context, accountAddress common.Add } // TransactionByHash provides a mock function with given fields: ctx, txHash -func (_m *RPCClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { +func (_m *MockEvmRpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { @@ -1007,23 +1103,23 @@ func (_m *RPCClient) TransactionByHash(ctx context.Context, txHash common.Hash) } // TransactionReceipt provides a mock function with given fields: ctx, txHash -func (_m *RPCClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { +func (_m *MockEvmRpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { panic("no return value specified for TransactionReceipt") } - var r0 *types.Receipt + var r0 *coretypes.Receipt var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Receipt, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Receipt, error)); ok { return rf(ctx, txHash) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Receipt); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Receipt); ok { r0 = rf(ctx, txHash) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Receipt) + r0 = ret.Get(0).(*coretypes.Receipt) } } @@ -1037,7 +1133,7 @@ func (_m *RPCClient) TransactionReceipt(ctx context.Context, txHash common.Hash) } // TransactionReceiptGeth provides a mock function with given fields: ctx, txHash -func (_m *RPCClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { +func (_m *MockEvmRpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { @@ -1066,18 +1162,29 @@ func (_m *RPCClient) TransactionReceiptGeth(ctx context.Context, txHash common.H return r0, r1 } +// UnsubscribeAllExcept provides a mock function with given fields: subs +func (_m *MockEvmRpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { + _va := make([]interface{}, len(subs)) + for _i := range subs { + _va[_i] = subs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + _m.Called(_ca...) +} + // UnsubscribeAllExceptAliveLoop provides a mock function with given fields: -func (_m *RPCClient) UnsubscribeAllExceptAliveLoop() { +func (_m *MockEvmRpcClient) UnsubscribeAllExceptAliveLoop() { _m.Called() } -// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewMockEvmRpcClient creates a new instance of MockEvmRpcClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewRPCClient(t interface { +func NewMockEvmRpcClient(t interface { mock.TestingT Cleanup(func()) -}) *RPCClient { - mock := &RPCClient{} +}) *MockEvmRpcClient { + mock := &MockEvmRpcClient{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/core/chains/evm/client/null_client_test.go b/core/chains/evm/client/null_client_test.go index 8f4ebd91c97..4a4dceb4154 100644 --- a/core/chains/evm/client/null_client_test.go +++ b/core/chains/evm/client/null_client_test.go @@ -13,7 +13,6 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" ) @@ -61,8 +60,7 @@ func TestNullClient(t *testing.T) { require.Nil(t, h) require.Equal(t, 1, logs.FilterMessage("HeadByNumber").Len()) - chHeads := make(chan *evmtypes.Head) - sub, err := nc.SubscribeNewHead(ctx, chHeads) + _, sub, err := nc.SubscribeNewHead(ctx) require.NoError(t, err) require.Equal(t, 1, logs.FilterMessage("SubscribeNewHead").Len()) require.Nil(t, sub.Err()) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 02ac7978cea..d5854fe7a61 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -30,6 +30,48 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) +//go:generate mockery --quiet --name EvmRpcClient --structname MockEvmRpcClient --filename "mock_evm_rpc_client_test.go" --inpackage --case=underscore +type EvmRpcClient interface { + commonclient.RPCClient[*big.Int, *evmtypes.Head] + BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + BlockByHash(ctx context.Context, hash common.Hash) (*evmtypes.Head, error) + BlockByHashGeth(ctx context.Context, hash common.Hash) (*types.Block, error) + BlockByNumber(ctx context.Context, number *big.Int) (*evmtypes.Head, error) + BlockByNumberGeth(ctx context.Context, number *big.Int) (*types.Block, error) + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error + CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) + ClientVersion(_a0 context.Context) (string, error) + CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) + DialHTTP() error + DisconnectAll() + EstimateGas(ctx context.Context, call interface{}) (uint64, error) + FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) + HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) + HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) + LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*commonassets.Link, error) + LatestBlockHeight(_a0 context.Context) (*big.Int, error) + LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) + PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) + PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + PendingSequenceAt(ctx context.Context, addr common.Address) (evmtypes.Nonce, error) + SendEmptyTransaction(ctx context.Context, newTxAttempt func(evmtypes.Nonce, uint32, *assets.Wei, common.Address) (interface{}, error), seq evmtypes.Nonce, gasLimit uint32, fee *assets.Wei, fromAddress common.Address) (string, error) + SendTransaction(ctx context.Context, tx *types.Transaction) error + SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) + SetAliveLoopSub(_a0 commontypes.Subscription) + SimulateTransaction(ctx context.Context, tx *types.Transaction) error + Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) + SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + SubscribersCount() int32 + SuggestGasPrice(ctx context.Context) (*big.Int, error) + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + UnsubscribeAllExceptAliveLoop() +} + type RpcClient struct { cfg config.NodePool rpcLog logger.SugaredLogger @@ -67,7 +109,7 @@ func NewRPCClient( id int32, chainID *big.Int, tier commonclient.NodeTier, -) *RpcClient { +) EvmRpcClient { r := new(RpcClient) r.cfg = cfg r.name = name @@ -375,7 +417,7 @@ func (r *RpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head // GethClient wrappers -func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { +func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { err = r.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash, false) if err != nil { return nil, err diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go index 9fe2ff88ba7..90dbcaafecc 100644 --- a/core/chains/evm/client/simulated_backend_client.go +++ b/core/chains/evm/client/simulated_backend_client.go @@ -298,15 +298,15 @@ func (h *headSubscription) Err() <-chan error { return h.subscription.Err() } // to convert those into evmtypes.Head. func (c *SimulatedBackendClient) SubscribeNewHead( ctx context.Context, - channel chan<- *evmtypes.Head, -) (ethereum.Subscription, error) { +) (<-chan *evmtypes.Head, ethereum.Subscription, error) { subscription := &headSubscription{unSub: make(chan chan struct{})} ch := make(chan *types.Header) + channel := make(chan *evmtypes.Head) var err error subscription.subscription, err = c.b.SubscribeNewHead(ctx, ch) if err != nil { - return nil, fmt.Errorf("%w: could not subscribe to new heads on "+ + return nil, nil, fmt.Errorf("%w: could not subscribe to new heads on "+ "simulated backend", err) } go func() { @@ -334,7 +334,7 @@ func (c *SimulatedBackendClient) SubscribeNewHead( } } }() - return subscription, err + return channel, subscription, err } // HeaderByNumber returns the geth header type. From db2c5f39f1cd50102757aa05d9541fdaf24c306b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 24 May 2024 15:47:17 -0400 Subject: [PATCH 11/58] Update client_test.go --- core/chains/evm/client/client_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index 0438ce5e1ec..5701b9bd010 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -763,6 +763,7 @@ func (x *sendTxService) SendRawTransaction(ctx context.Context, signRawTx hexuti } */ +/* TODO: Fix this test func TestEthClient_SubscribeNewHead(t *testing.T) { t.Parallel() @@ -805,6 +806,7 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { sub.Unsubscribe() } } +*/ func TestEthClient_ErroringClient(t *testing.T) { t.Parallel() From 28c917f9f45d58e259e9d3dad1924bc9bd5924ce Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 24 May 2024 15:51:02 -0400 Subject: [PATCH 12/58] go mod tidy --- core/scripts/go.mod | 1 + 1 file changed, 1 insertion(+) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 3963538b2c3..026bd0ccb98 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -270,6 +270,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/streamingfast/logging v0.0.0-20220405224725-2755dab2ce75 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect From d873d25d269345330601067a9b58d12d29bc1752 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 29 May 2024 11:19:54 -0400 Subject: [PATCH 13/58] fix tests --- core/chains/evm/client/client.go | 5 -- core/chains/evm/client/client_test.go | 12 ++- ..._client.go => mock_evm_rpc_client_test.go} | 33 -------- core/chains/evm/client/rpc_client.go | 5 +- .../evm/headtracker/head_broadcaster_test.go | 7 +- .../evm/headtracker/head_listener_test.go | 20 ++--- .../evm/headtracker/head_tracker_test.go | 78 ++++++++++--------- core/internal/cltest/cltest.go | 8 +- 8 files changed, 68 insertions(+), 100 deletions(-) rename core/chains/evm/client/{mock_evm_rpc_client.go => mock_evm_rpc_client_test.go} (96%) diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index 36e782dbdfc..759012448ef 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -337,16 +337,11 @@ func (client *client) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.He forwardCh, csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) err := csf.start(client.pool.EthSubscribe(ctx, ch, "newHeads")) if err != nil { - fmt.Println("HEREEE!!") return nil, nil, err } return forwardCh, csf, nil } -func (client *client) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { - return client.pool.EthSubscribe(ctx, channel, args...) -} - func (client *client) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { return client.pool.CallContext(ctx, result, method, args...) } diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index 5701b9bd010..e92b767dbb8 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -10,11 +10,15 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" pkgerrors "github.com/pkg/errors" + + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -22,8 +26,6 @@ import ( commonclient "github.com/smartcontractkit/chainlink/v2/common/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" ) @@ -61,12 +63,14 @@ func mustNewClients(t *testing.T, wsURL string, sendonlys ...url.URL) []client.C return clients } +/* func mustNewClientsWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) []client.Client { var clients []client.Client clients = append(clients, mustNewClientWithChainID(t, wsURL, chainID, sendonlys...)) clients = append(clients, mustNewChainClientWithChainID(t, wsURL, chainID, sendonlys...)) return clients } +*/ func TestEthClient_TransactionReceipt(t *testing.T) { t.Parallel() diff --git a/core/chains/evm/client/mock_evm_rpc_client.go b/core/chains/evm/client/mock_evm_rpc_client_test.go similarity index 96% rename from core/chains/evm/client/mock_evm_rpc_client.go rename to core/chains/evm/client/mock_evm_rpc_client_test.go index 3ca56fec5b5..23433d846b1 100644 --- a/core/chains/evm/client/mock_evm_rpc_client.go +++ b/core/chains/evm/client/mock_evm_rpc_client_test.go @@ -823,39 +823,6 @@ func (_m *MockEvmRpcClient) SimulateTransaction(ctx context.Context, tx *coretyp return r0 } -// Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *MockEvmRpcClient) Subscribe(ctx context.Context, channel chan<- *types.Head, args ...interface{}) (commontypes.Subscription, error) { - var _ca []interface{} - _ca = append(_ca, ctx, channel) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 commontypes.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) (commontypes.Subscription, error)); ok { - return rf(ctx, channel, args...) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) commontypes.Subscription); ok { - r0 = rf(ctx, channel, args...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(commontypes.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Head, ...interface{}) error); ok { - r1 = rf(ctx, channel, args...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch func (_m *MockEvmRpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { ret := _m.Called(ctx, q, ch) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index d5854fe7a61..feeb9e6d01a 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -60,7 +60,6 @@ type EvmRpcClient interface { SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) SetAliveLoopSub(_a0 commontypes.Subscription) SimulateTransaction(ctx context.Context, tx *types.Transaction) error - Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) SubscribersCount() int32 SuggestGasPrice(ctx context.Context) (*big.Int, error) @@ -135,7 +134,7 @@ func NewRPCClient( func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { channel := make(chan *evmtypes.Head) - sub, err := r.Subscribe(ctx, channel) + sub, err := r.subscribe(ctx, channel) return channel, sub, err } @@ -395,7 +394,7 @@ func (r *RpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err return err } -func (r *RpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { +func (r *RpcClient) subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) defer cancel() lggr := r.newRqLggr().With("args", args) diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go index ee9e460b16c..88cc7b76862 100644 --- a/core/chains/evm/headtracker/head_broadcaster_test.go +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -57,11 +57,12 @@ func TestHeadBroadcaster_Subscribe(t *testing.T) { ethClient := evmtest.NewEthClientMockWithDefaultChain(t) chchHeaders := make(chan chan<- *evmtypes.Head, 1) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Run(func(args mock.Arguments) { - chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) + chchHeaders <- chHead }). - Return(sub, nil) + Return((chan<- *evmtypes.Head)(chHead), sub, nil) // 2 for initial and 2 for backfill ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(1), nil).Times(4) diff --git a/core/chains/evm/headtracker/head_listener_test.go b/core/chains/evm/headtracker/head_listener_test.go index 4e7efb5e809..6c832cecaee 100644 --- a/core/chains/evm/headtracker/head_listener_test.go +++ b/core/chains/evm/headtracker/head_listener_test.go @@ -54,12 +54,11 @@ func Test_HeadListener_HappyPath(t *testing.T) { subscribeAwaiter := cltest.NewAwaiter() unsubscribeAwaiter := cltest.NewAwaiter() - var chHeads chan<- *evmtypes.Head + chHeads := make(chan *evmtypes.Head) var chErr = make(chan error) var chSubErr <-chan error = chErr sub := commonmocks.NewSubscription(t) - ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { - chHeads = args.Get(1).(chan<- *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything).Return((<-chan *evmtypes.Head)(chHeads), sub, nil).Once().Run(func(args mock.Arguments) { subscribeAwaiter.ItHappened() }) sub.On("Err").Return(chSubErr) @@ -115,12 +114,11 @@ func Test_HeadListener_NotReceivingHeads(t *testing.T) { } subscribeAwaiter := cltest.NewAwaiter() - var chHeads chan<- *evmtypes.Head + chHeads := make(chan *evmtypes.Head) var chErr = make(chan error) var chSubErr <-chan error = chErr sub := commonmocks.NewSubscription(t) - ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { - chHeads = args.Get(1).(chan<- *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything).Return((<-chan *evmtypes.Head)(chHeads), sub, nil).Once().Run(func(args mock.Arguments) { subscribeAwaiter.ItHappened() }) sub.On("Err").Return(chSubErr) @@ -188,10 +186,9 @@ func Test_HeadListener_SubscriptionErr(t *testing.T) { sub.On("Err").Return(chSubErr).Twice() subscribeAwaiter := cltest.NewAwaiter() - var headsCh chan<- *evmtypes.Head + headsCh := make(chan *evmtypes.Head) // Initial subscribe - ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { - headsCh = args.Get(1).(chan<- *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything).Return((<-chan *evmtypes.Head)(headsCh), sub, nil).Once().Run(func(args mock.Arguments) { subscribeAwaiter.ItHappened() }) go func() { @@ -221,9 +218,8 @@ func Test_HeadListener_SubscriptionErr(t *testing.T) { sub2.On("Err").Return(chSubErr2) subscribeAwaiter2 := cltest.NewAwaiter() - var headsCh2 chan<- *evmtypes.Head - ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub2, nil).Once().Run(func(args mock.Arguments) { - headsCh2 = args.Get(1).(chan<- *evmtypes.Head) + headsCh2 := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything).Return((<-chan *evmtypes.Head)(headsCh2), sub2, nil).Once().Run(func(args mock.Arguments) { subscribeAwaiter2.ItHappened() }) diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index b8bdb1f5703..6209427b9b8 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -138,14 +138,13 @@ func TestHeadTracker_Get(t *testing.T) { mockEth := &evmtest.MockEth{ EthClient: ethClient, } - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + ethClient.On("SubscribeNewHead", mock.Anything). Maybe(). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { defer close(chStarted) - return mockEth.NewSub(t) + return make(<-chan *evmtypes.Head), mockEth.NewSub(t), nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) @@ -188,11 +187,13 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Once() // for backfill ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Maybe() - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + + ch := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Run(func(mock.Arguments) { close(chStarted) }). - Return(sub, nil) + Return((<-chan *evmtypes.Head)(ch), sub, nil) ht := createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) ht.Start(t) @@ -281,14 +282,14 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) - chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) - return sub + chchHeaders <- evmtest.NewRawSub(chHead, sub.Err()) + return (<-chan *evmtypes.Head)(chHead), sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) ethClient.On("HeadByHash", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Maybe() @@ -317,16 +318,19 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { ethClient := evmtest.NewEthClientMockWithDefaultChain(t) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { + return chHead, mockEth.NewSub(t), nil + }, ) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("cannot reconnect")) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + ethClient.On("SubscribeNewHead", mock.Anything).Return(chHead, nil, errors.New("cannot reconnect")) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { + return chHead, mockEth.NewSub(t), nil + }, ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) @@ -354,14 +358,14 @@ func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + ch := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) - return sub + return ch, sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) ethClient.On("HeadByHash", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Maybe() @@ -417,14 +421,14 @@ func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) - chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) - return sub + chchHeaders <- evmtest.NewRawSub(chHead, sub.Err()) + return chHead, sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) orm := headtracker.NewORM(cltest.FixtureChainID, db) @@ -475,14 +479,14 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) - chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) - return sub + chchHeaders <- evmtest.NewRawSub(chHead, sub.Err()) + return chHead, sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) // --------------------- @@ -604,14 +608,14 @@ func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + chHead := make(chan *evmtypes.Head) + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) - chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) - return sub + chchHeaders <- evmtest.NewRawSub(chHead, sub.Err()) + return chHead, sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) // --------------------- diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 58cedbb96e1..d2710513ef4 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -470,8 +470,9 @@ func NewEthMocks(t testing.TB) *evmclimocks.Client { func NewEthMocksWithStartupAssertions(t testing.TB) *evmclimocks.Client { testutils.SkipShort(t, "long test") c := NewEthMocks(t) + chHead := make(<-chan *evmtypes.Head) c.On("Dial", mock.Anything).Maybe().Return(nil) - c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) + c.On("SubscribeNewHead", mock.Anything).Maybe().Return(chHead, EmptyMockSubscription(t), nil) c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) c.On("HeadByNumber", mock.Anything, mock.Anything).Maybe().Return(Head(0), nil) c.On("ConfiguredChainID").Maybe().Return(&FixtureChainID) @@ -492,8 +493,9 @@ func NewEthMocksWithStartupAssertions(t testing.TB) *evmclimocks.Client { func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmclimocks.Client { testutils.SkipShort(t, "long test") c := NewEthMocks(t) + chHead := make(<-chan *evmtypes.Head) c.On("Dial", mock.Anything).Maybe().Return(nil) - c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) + c.On("SubscribeNewHead", mock.Anything).Maybe().Return(chHead, EmptyMockSubscription(t), nil) c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) c.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(client.Successful, nil) // Construct chain @@ -1289,7 +1291,7 @@ func MockApplicationEthCalls(t *testing.T, app *TestApplication, ethClient *evmc // Start ethClient.On("Dial", mock.Anything).Return(nil) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Maybe() + ethClient.On("SubscribeNewHead", mock.Anything).Return(make(<-chan *evmtypes.Head), sub, nil).Maybe() ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, app.GetConfig().EVMConfigs()), nil) ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe() ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(nil, nil).Maybe() From 119d9477590366a9b33718fbe1387c344f6ebd2d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 29 May 2024 13:25:17 -0400 Subject: [PATCH 14/58] Fix tests --- common/client/multi_node.go | 5 +++++ core/chains/evm/client/chain_client.go | 11 +---------- core/chains/evm/gas/models.go | 1 + core/chains/evm/headtracker/head_broadcaster_test.go | 2 +- core/chains/evm/headtracker/head_tracker_test.go | 4 ++-- core/scripts/go.mod | 1 - 6 files changed, 10 insertions(+), 14 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index c0826874853..3b0cee3ef6c 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -42,6 +42,7 @@ type MultiNode[ RPC_CLIENT any, ] interface { Dial(ctx context.Context) error + ChainID() CHAIN_ID // SelectRPC - returns the best healthy RPCClient SelectRPC() (RPC_CLIENT, error) // DoAll - calls `do` sequentially on all healthy RPCClients. @@ -122,6 +123,10 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) ChainType() config.C return c.chainType } +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) ChainID() CHAIN_ID { + return c.chainID +} + func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { callsCompleted := 0 for _, n := range c.primaryNodes { diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index c7b0763153a..9a7d2d5df46 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -182,16 +182,7 @@ func (c *chainClient) CodeAt(ctx context.Context, account common.Address, blockN } func (c *chainClient) ConfiguredChainID() *big.Int { - rpc, err := c.multiNode.SelectRPC() - if err != nil { - return nil - } - // TODO: propagate context - chainId, err := rpc.ChainID(context.Background()) - if err != nil { - return nil - } - return chainId + return c.multiNode.ChainID() } func (c *chainClient) Dial(ctx context.Context) error { diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go index c50e19373f1..4777449e28b 100644 --- a/core/chains/evm/gas/models.go +++ b/core/chains/evm/gas/models.go @@ -89,6 +89,7 @@ func NewEstimator(lggr logger.Logger, ethClient feeEstimatorClient, cfg Config, } case "BlockHistory": newEstimator = func(l logger.Logger) EvmEstimator { + fmt.Println("BlockHistoryEstimator: ConfiguredChainID: ", ethClient.ConfiguredChainID()) return NewBlockHistoryEstimator(lggr, ethClient, cfg, geCfg, bh, *ethClient.ConfiguredChainID(), l1Oracle) } case "FixedPrice": diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go index 88cc7b76862..b15f1dda5d4 100644 --- a/core/chains/evm/headtracker/head_broadcaster_test.go +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -62,7 +62,7 @@ func TestHeadBroadcaster_Subscribe(t *testing.T) { Run(func(args mock.Arguments) { chchHeaders <- chHead }). - Return((chan<- *evmtypes.Head)(chHead), sub, nil) + Return((<-chan *evmtypes.Head)(chHead), sub, nil) // 2 for initial and 2 for backfill ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(1), nil).Times(4) diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 6209427b9b8..0cdfa6d9826 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -288,7 +288,7 @@ func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { sub := mockEth.NewSub(t) chchHeaders <- evmtest.NewRawSub(chHead, sub.Err()) - return (<-chan *evmtypes.Head)(chHead), sub, nil + return chHead, sub, nil }, ) ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) @@ -325,7 +325,7 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { return chHead, mockEth.NewSub(t), nil }, ) - ethClient.On("SubscribeNewHead", mock.Anything).Return(chHead, nil, errors.New("cannot reconnect")) + ethClient.On("SubscribeNewHead", mock.Anything).Return((<-chan *evmtypes.Head)(chHead), nil, errors.New("cannot reconnect")) ethClient.On("SubscribeNewHead", mock.Anything). Return( func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 026bd0ccb98..3963538b2c3 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -270,7 +270,6 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/streamingfast/logging v0.0.0-20220405224725-2755dab2ce75 // indirect - github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect From b3b60fca466ef34d9e3a3dd2b73a363ad16fdec4 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 29 May 2024 13:39:36 -0400 Subject: [PATCH 15/58] Clean up --- common/client/multi_node.go | 23 ++++++++++------------- common/client/node.go | 10 +++++----- core/chains/evm/client/chain_client.go | 2 -- core/chains/evm/client/client_test.go | 3 +-- core/chains/evm/client/evm_client.go | 4 ++-- core/chains/evm/client/helpers_test.go | 11 +++-------- 6 files changed, 21 insertions(+), 32 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 3b0cee3ef6c..bf3c58815d5 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -62,12 +62,11 @@ type multiNode[ RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] struct { services.StateMachine - primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] - sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] - chainID CHAIN_ID - lggr logger.SugaredLogger - selectionMode string - // noNewHeadsThreshold time.Duration TODO: Move this? + primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + chainID CHAIN_ID + lggr logger.SugaredLogger + selectionMode string nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] leaseDuration time.Duration leaseTicker *time.Ticker @@ -96,7 +95,6 @@ func NewMultiNode[ chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics ) MultiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT] { - // TODO: does node selector only need primary nodes, or all nodes? nodeSelector := newNodeSelector(selectionMode, primaryNodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) @@ -140,6 +138,10 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co callsCompleted++ } } + if callsCompleted == 0 { + return fmt.Errorf("no calls were completed") + } + for _, n := range c.sendOnlyNodes { if ctx.Err() != nil { return ctx.Err() @@ -147,12 +149,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co if n.State() != nodeStateAlive { continue } - if do(ctx, n.RPC(), false) { - callsCompleted++ - } - } - if callsCompleted == 0 { - return fmt.Errorf("no calls were completed") + do(ctx, n.RPC(), false) } return nil } diff --git a/common/client/node.go b/common/client/node.go index fa02a4f9098..0be5c669237 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -279,7 +279,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte var err error if chainID, err = n.rpc.ChainID(callerCtx); err != nil { promFailed() - lggr.Errorw("Failed to verify chain ID for node", "err", err, "NodeState", n.State()) + lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.State()) return nodeStateUnreachable } else if chainID.String() != n.chainID.String() { promFailed() @@ -290,7 +290,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte n.name, errInvalidChainID, ) - lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "NodeState", n.State()) + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.State()) return nodeStateInvalidChainID } @@ -303,7 +303,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte // Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Context, lggr logger.Logger) NodeState { if err := n.rpc.Dial(ctx); err != nil { - n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "NodeState", n.State()) + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.State()) return nodeStateUnreachable } @@ -321,12 +321,12 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr if n.nodePoolCfg.NodeIsSyncingEnabled() { isSyncing, err := n.rpc.IsSyncing(ctx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "NodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) return nodeStateUnreachable } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "NodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) return nodeStateSyncing } } diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 9a7d2d5df46..649215f6cc8 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -40,11 +40,9 @@ func NewChainClient( lggr logger.Logger, selectionMode string, leaseDuration time.Duration, - noNewHeadsThreshold time.Duration, nodes []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient], sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient], chainID *big.Int, - chainType config.ChainType, clientErrors evmconfig.ClientErrors, ) Client { multiNode := commonclient.NewMultiNode( diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index e92b767dbb8..7601b32bd5e 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -848,8 +848,7 @@ func TestEthClient_ErroringClient(t *testing.T) { require.Equal(t, err, commonclient.ErroringNodeError) id := erroringClient.ConfiguredChainID() - var expected *big.Int - require.Equal(t, id, expected) + require.Equal(t, id, big.NewInt(0)) err = erroringClient.Dial(ctx) require.ErrorContains(t, err, "no available nodes for chain") diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 763eafef4a9..1c62eb61fd4 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -30,6 +30,6 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli } } - return NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), chainCfg.NodeNoNewHeadsThreshold(), - primaries, sendonlys, chainID, chainCfg.ChainType(), clientErrors) + return NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), + primaries, sendonlys, chainID, clientErrors) } diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 0c1dfef5a92..58ae361fb17 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -13,7 +13,6 @@ import ( commonclient "github.com/smartcontractkit/chainlink/v2/common/client" clientMocks "github.com/smartcontractkit/chainlink/v2/common/client/mocks" - commonconfig "github.com/smartcontractkit/chainlink/v2/common/config" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" @@ -185,9 +184,8 @@ func NewChainClientWithTestNode( sendonlys = append(sendonlys, s) } - var chainType commonconfig.ChainType clientErrors := NewTestClientErrors() - c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, noNewHeadsThreshold, primaries, sendonlys, chainID, chainType, &clientErrors) + c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, primaries, sendonlys, chainID, &clientErrors) t.Cleanup(c.Close) return c, nil } @@ -202,8 +200,7 @@ func NewChainClientWithEmptyNode( lggr := logger.Test(t) - var chainType commonconfig.ChainType - c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, nil, nil, chainID, chainType, nil) + c := NewChainClient(lggr, selectionMode, leaseDuration, nil, nil, chainID, nil) t.Cleanup(c.Close) return c } @@ -219,8 +216,6 @@ func NewChainClientWithMockedRpc( lggr := logger.Test(t) - var chainType commonconfig.ChainType - cfg := TestNodePoolConfig{ NodeSelectionMode: NodeSelectionMode_RoundRobin, } @@ -230,7 +225,7 @@ func NewChainClientWithMockedRpc( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") primaries := []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient]{n} clientErrors := NewTestClientErrors() - c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaries, nil, chainID, chainType, &clientErrors) + c := NewChainClient(lggr, selectionMode, leaseDuration, primaries, nil, chainID, &clientErrors) t.Cleanup(c.Close) return c } From 9db0039a1540e43ef13273625d8342b7f0e53d9c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 29 May 2024 14:04:48 -0400 Subject: [PATCH 16/58] Fix features test mocking --- core/internal/features/features_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go index 26e7d5eae56..c73863c0b85 100644 --- a/core/internal/features/features_test.go +++ b/core/internal/features/features_test.go @@ -1293,14 +1293,14 @@ func TestIntegration_BlockHistoryEstimator(t *testing.T) { h42 := evmtypes.Head{Hash: b42.Hash, ParentHash: h41.Hash, Number: 42, EVMChainID: evmChainID} mockEth := &evmtest.MockEth{EthClient: ethClient} - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + ethClient.On("SubscribeNewHead", mock.Anything). Return( - func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + func(ctx context.Context) (<-chan *evmtypes.Head, ethereum.Subscription, error) { + ch := make(chan *evmtypes.Head) sub := mockEth.NewSub(t) chchNewHeads <- evmtest.NewRawSub(ch, sub.Err()) - return sub + return ch, sub, nil }, - func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, ) // Nonce syncer ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) From 88bc047b7829c0498f19f52459761bae73c2aded Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 29 May 2024 14:18:42 -0400 Subject: [PATCH 17/58] Fix logging --- common/client/node_fsm.go | 12 +++---- common/client/node_lifecycle.go | 64 ++++++++++++++++----------------- 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index 05c55fe8751..c78dd2bddc1 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -144,7 +144,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) setState(s NodeState) { func (n *node[CHAIN_ID, HEAD, RPC]) declareAlive() { n.transitionToAlive(func() { - n.lfcLog.Infow("RPC Node is online", "NodeState", n.state) + n.lfcLog.Infow("RPC Node is online", "nodeState", n.state) n.wg.Add(1) go n.aliveLoop() }) @@ -170,7 +170,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) { // pool consumers again func (n *node[CHAIN_ID, HEAD, RPC]) declareInSync() { n.transitionToInSync(func() { - n.lfcLog.Infow("RPC Node is back in sync", "NodeState", n.state) + n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state) n.wg.Add(1) go n.aliveLoop() }) @@ -197,7 +197,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { // clients and making it unavailable for use until back in-sync. func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { n.transitionToOutOfSync(func() { - n.lfcLog.Errorw("RPC Node is out of sync", "NodeState", n.state) + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) n.wg.Add(1) go n.outOfSyncLoop(isOutOfSync) }) @@ -222,7 +222,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { func (n *node[CHAIN_ID, HEAD, RPC]) declareUnreachable() { n.transitionToUnreachable(func() { - n.lfcLog.Errorw("RPC Node is unreachable", "NodeState", n.state) + n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state) n.wg.Add(1) go n.unreachableLoop() }) @@ -265,7 +265,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state NodeState) { func (n *node[CHAIN_ID, HEAD, RPC]) declareInvalidChainID() { n.transitionToInvalidChainID(func() { - n.lfcLog.Errorw("RPC Node has the wrong chain ID", "NodeState", n.state) + n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state) n.wg.Add(1) go n.invalidChainIDLoop() }) @@ -290,7 +290,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { func (n *node[CHAIN_ID, HEAD, RPC]) declareSyncing() { n.transitionToSyncing(func() { - n.lfcLog.Errorw("RPC Node is syncing", "NodeState", n.state) + n.lfcLog.Errorw("RPC Node is syncing", "nodeState", n.state) n.wg.Add(1) go n.syncingLoop() }) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index b6153506a26..76a6e9c4e9d 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -99,11 +99,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { pollInterval := n.nodePoolCfg.PollInterval() lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) - lggr.Tracew("Alive loop starting", "NodeState", n.State()) + lggr.Tracew("Alive loop starting", "nodeState", n.State()) headsC, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "NodeState", n.State()) + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) n.declareUnreachable() return } @@ -115,7 +115,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { var outOfSyncT *time.Ticker var outOfSyncTC <-chan time.Time if noNewHeadsTimeoutThreshold > 0 { - lggr.Debugw("Head liveness checking enabled", "NodeState", n.State()) + lggr.Debugw("Head liveness checking enabled", "nodeState", n.State()) outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) defer outOfSyncT.Stop() outOfSyncTC = outOfSyncT.C @@ -162,7 +162,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { return case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Tracew("Pinging RPC", "NodeState", n.State(), "pollFailures", pollFailures) + lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) err := n.RPC().Ping(ctx) cancel() @@ -172,14 +172,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures++ } - lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "NodeState", n.State()) + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) } else { - lggr.Debugw("Ping successful", "NodeState", n.State()) + lggr.Debugw("Ping successful", "nodeState", n.State()) promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() pollFailures = 0 } if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { - lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "NodeState", n.State()) + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) @@ -192,7 +192,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { _, chainInfo := n.StateAndLatest() if outOfSync, liveNodes := n.syncStatus(chainInfo.BlockNumber, chainInfo.BlockDifficulty); outOfSync { // note: there must be another live node for us to be out of sync - lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", chainInfo.BlockNumber, "totalDifficulty", chainInfo.BlockDifficulty, "NodeState", n.State()) + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", chainInfo.BlockNumber, "totalDifficulty", chainInfo.BlockDifficulty, "nodeState", n.State()) if liveNodes < 2 { lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue @@ -202,7 +202,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } case bh, open := <-headsC: if !open { - lggr.Errorw("Subscription channel unexpectedly closed", "NodeState", n.State()) + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) n.declareUnreachable() return } @@ -210,10 +210,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Tracew("Got head", "head", bh) if bh.BlockNumber() > highestReceivedBlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "NodeState", n.State()) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) highestReceivedBlockNumber = bh.BlockNumber() } else { - lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "NodeState", n.State()) + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) } if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) @@ -227,13 +227,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "err", err, "NodeState", n.State()) + lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State()) n.declareUnreachable() return case <-outOfSyncTC: // We haven't received a head on the channel for at least the // threshold amount of time, mark it broken - lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "NodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) @@ -247,7 +247,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { return case latestFinalized, open := <-finalizedHeadCh: if !open { - lggr.Errorw("Subscription channel unexpectedly closed", "NodeState", n.State()) + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) n.declareUnreachable() return } @@ -320,7 +320,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) - lggr.Debugw("Trying to revive out-of-sync RPC node", "NodeState", n.State()) + lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(n.nodeCtx, lggr) @@ -329,11 +329,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "NodeState", n.State()) + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) ch, sub, err := n.rpc.SubscribeToHeads(n.nodeCtx) if err != nil { - lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "NodeState", n.State(), "err", err) + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) n.declareUnreachable() return } @@ -345,18 +345,18 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return case head, open := <-ch: if !open { - lggr.Error("Subscription channel unexpectedly closed", "NodeState", n.State()) + lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State()) n.declareUnreachable() return } n.setLatestReceived(head.BlockNumber(), head.BlockDifficulty()) if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { // back in-sync! flip back into alive loop - lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "NodeState", n.State()) + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) n.declareInSync() return } - lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "NodeState", n.State()) + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) case <-time.After(zombieNodeCheckInterval(n.chainCfg.NodeNoNewHeadsThreshold())): if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 1 { @@ -366,7 +366,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td } } case err := <-sub.Err(): - lggr.Errorw("Subscription was terminated", "NodeState", n.State(), "err", err) + lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err) n.declareUnreachable() return } @@ -391,7 +391,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { unreachableAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) - lggr.Debugw("Trying to revive unreachable RPC node", "NodeState", n.State()) + lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) dialRetryBackoff := iutils.NewRedialBackoff() @@ -400,11 +400,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { case <-n.nodeCtx.Done(): return case <-time.After(dialRetryBackoff.Duration()): - lggr.Tracew("Trying to re-dial RPC node", "NodeState", n.State()) + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) err := n.rpc.Dial(n.nodeCtx) if err != nil { - lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "NodeState", n.State()) + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State()) continue } @@ -416,7 +416,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { n.setState(nodeStateUnreachable) continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "NodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) fallthrough default: n.declareState(state) @@ -452,7 +452,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { return } - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "NodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State()) chainIDRecheckBackoff := iutils.NewRedialBackoff() @@ -466,7 +466,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { case nodeStateInvalidChainID: continue case nodeStateAlive: - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "NodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) fallthrough default: n.declareState(state) @@ -494,7 +494,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { syncingAt := time.Now() lggr := logger.Sugared(logger.Named(n.lfcLog, "Syncing")) - lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "NodeState", n.State()) + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.State()) // Need to redial since syncing nodes are automatically disconnected state := n.createVerifiedConn(n.nodeCtx, lggr) if state != nodeStateSyncing { @@ -509,20 +509,20 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { case <-n.nodeCtx.Done(): return case <-time.After(recheckBackoff.Duration()): - lggr.Tracew("Trying to recheck if the node is still syncing", "NodeState", n.State()) + lggr.Tracew("Trying to recheck if the node is still syncing", "nodeState", n.State()) isSyncing, err := n.rpc.IsSyncing(n.nodeCtx) if err != nil { - lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "NodeState", n.State()) + lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) n.declareUnreachable() return } if isSyncing { - lggr.Errorw("Verification failed: Node is syncing", "NodeState", n.State()) + lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) continue } - lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "NodeState", n.State()) + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was syncing for %s", time.Since(syncingAt)), "nodeState", n.State()) n.declareAlive() return } From 59e67522f3cc03e8f1291398bc1691430a044b8d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 31 May 2024 11:28:16 -0400 Subject: [PATCH 18/58] Remove logging --- common/client/node.go | 8 +++++--- common/client/node_lifecycle.go | 1 + core/chains/evm/client/rpc_client.go | 1 - core/chains/evm/gas/models.go | 1 - 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/common/client/node.go b/common/client/node.go index 0be5c669237..9cd91bf9329 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -132,8 +132,9 @@ type node[ // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far. - nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) - aliveLoopSub types.Subscription + nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) + aliveLoopSub types.Subscription + finalizedBlockSub types.Subscription } func NewNode[ @@ -178,6 +179,7 @@ func NewNode[ n.rpc = rpc n.chainFamily = chainFamily n.aliveLoopSub = nil + n.finalizedBlockSub = nil return n } @@ -202,7 +204,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { - n.rpc.UnsubscribeAllExcept(n.aliveLoopSub) + n.rpc.UnsubscribeAllExcept(n.aliveLoopSub, n.finalizedBlockSub) } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 76a6e9c4e9d..079476a2370 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -151,6 +151,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } defer finalizedHeadSub.Unsubscribe() } + n.finalizedBlockSub = finalizedHeadSub _, chainInfo := n.StateAndLatest() highestReceivedBlockNumber := chainInfo.BlockNumber diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index feeb9e6d01a..a075e3bbac5 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -522,7 +522,6 @@ func (r *RpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header func (r *RpcClient) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) { head, err := r.blockByNumber(ctx, rpc.FinalizedBlockNumber.String()) if err != nil { - r.rpcLog.Warnw("Failed to fetch latest finalized block", "err", err) return nil, err } return head, nil diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go index 4777449e28b..c50e19373f1 100644 --- a/core/chains/evm/gas/models.go +++ b/core/chains/evm/gas/models.go @@ -89,7 +89,6 @@ func NewEstimator(lggr logger.Logger, ethClient feeEstimatorClient, cfg Config, } case "BlockHistory": newEstimator = func(l logger.Logger) EvmEstimator { - fmt.Println("BlockHistoryEstimator: ConfiguredChainID: ", ethClient.ConfiguredChainID()) return NewBlockHistoryEstimator(lggr, ethClient, cfg, geCfg, bh, *ethClient.ConfiguredChainID(), l1Oracle) } case "FixedPrice": From e940efaddbda0c644e43d89851b355f684dc7153 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 31 May 2024 11:57:07 -0400 Subject: [PATCH 19/58] Fix tests --- common/client/node_fsm_test.go | 10 +++---- common/client/node_lifecycle.go | 2 +- common/client/node_lifecycle_test.go | 40 ++++++++++++++-------------- core/chains/evm/client/rpc_client.go | 4 +++ 4 files changed, 30 insertions(+), 26 deletions(-) diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index b6b25f6cd53..89caa43d231 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -53,33 +53,33 @@ func TestUnit_Node_StateTransitions(t *testing.T) { const destinationState = nodeStateOutOfSync allowedStates := []NodeState{nodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil).Once() + rpc.On("UnsubscribeAllExcept", nil, nil).Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = nodeStateUnreachable allowedStates := []NodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = nodeStateInvalidChainID allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = nodeStateSyncing allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil).Times(len(allowedStates)) + rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil).Once() + rpc.On("UnsubscribeAllExcept", nil, nil).Once() node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(nodeStateDialed) fn := new(fnMock) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 079476a2370..b4500bc7245 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -141,7 +141,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { var finalizedHeadCh <-chan HEAD var finalizedHeadSub types.Subscription - if n.chainCfg.FinalityTagEnabled() && n.nodePoolCfg.FinalizedBlockPollInterval() > 0 { + if n.chainCfg.FinalityTagEnabled() { lggr.Debugw("Finalized block polling enabled") finalizedHeadCh, finalizedHeadSub, err = n.rpc.SubscribeToFinalizedHeads(n.nodeCtx) if err != nil { diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 2abc8da2f6d..fd9f0ff7345 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -50,7 +50,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -77,7 +77,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub.On("Unsubscribe").Once() rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -122,7 +122,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(node.chainID, nil) @@ -170,7 +170,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { pollError := errors.New("failed to get ClientVersion") rpc.On("Ping", mock.Anything).Return(pollError) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -227,7 +227,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { require.Equal(t, nodeStateOutOfSync, node.State()) @@ -298,7 +298,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { assert.Equal(t, nodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -350,7 +350,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -421,7 +421,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() expectedError := errors.New("failed to subscribe to finalized heads") rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -537,7 +537,7 @@ func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() - rpc.On("UnsubscribeAllExcept", nil).Maybe() + rpc.On("UnsubscribeAllExcept", nil, nil).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() } @@ -548,7 +548,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable or outOfSync - opts.rpc.On("UnsubscribeAllExcept", nil) + opts.rpc.On("UnsubscribeAllExcept", nil, nil) node.setState(nodeStateAlive) return node } @@ -849,7 +849,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable - opts.rpc.On("UnsubscribeAllExcept", nil) + opts.rpc.On("UnsubscribeAllExcept", nil, nil) node.setState(nodeStateAlive) return node @@ -1038,7 +1038,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) node.declareInvalidChainID() tests.AssertEventually(t, func() bool { @@ -1061,7 +1061,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { // once for chainID and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) node.declareInvalidChainID() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { @@ -1083,7 +1083,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) node.declareInvalidChainID() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { @@ -1171,7 +1171,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -1196,7 +1196,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.Equal(t, nodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1218,7 +1218,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1244,7 +1244,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) // fail to redial to stay in unreachable state rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")) err := node.Start(tests.Context(t)) @@ -1269,7 +1269,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil) + rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1471,7 +1471,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.config.nodeIsSyncingEnabled = true node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("UnsubscribeAllExcept", nil) + opts.rpc.On("UnsubscribeAllExcept", nil, nil) node.setState(nodeStateDialed) return node diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index a075e3bbac5..cc9ebc82bbb 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "errors" "fmt" "math/big" "net/url" @@ -140,6 +141,9 @@ func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head func (r *RpcClient) SubscribeToFinalizedHeads(_ context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { interval := r.cfg.FinalizedBlockPollInterval() + if interval == 0 { + return nil, nil, errors.New("FinalizedBlockPollInterval is 0") + } timeout := interval poller, channel := commonclient.NewPoller[*evmtypes.Head](interval, r.LatestFinalizedBlock, timeout, r.rpcLog) if err := poller.Start(); err != nil { From 7b52a43aebc2b0f8a6a5e8867d9d205ddf69cc6c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 31 May 2024 13:54:09 -0400 Subject: [PATCH 20/58] Fix context --- common/client/node_lifecycle.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 74fe4c91870..fb69db620c9 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -166,8 +166,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-pollCh: promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Pinging RPC", "nodeState", n.State(), "pollFailures", pollFailures) - ctx, cancel := context.WithTimeout(ctx, pollInterval) - err := n.RPC().Ping(ctx) + pollCtx, cancel := context.WithTimeout(ctx, pollInterval) + err := n.RPC().Ping(pollCtx) cancel() if err != nil { // prevent overflow From f6c83ac580efe8ae201a84844c1bb916e721dfeb Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 4 Jun 2024 10:13:36 -0400 Subject: [PATCH 21/58] lint --- core/chains/evm/client/client_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index 624953056d6..d18e85e2fcb 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -4,8 +4,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" "math/big" "net/url" "os" @@ -13,6 +11,9 @@ import ( "testing" "time" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" pkgerrors "github.com/pkg/errors" From 8ccad6ee04ebf8ea44a1458bf9fda9c1f02b5661 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 4 Jun 2024 10:37:55 -0400 Subject: [PATCH 22/58] Update node_lifecycle_test.go --- common/client/node_lifecycle_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index ae06ac8f0a9..34036c1d47f 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -991,10 +991,6 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) From 3469af3031443d5967b40ce7d1176495306bfa7f Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 4 Jun 2024 11:59:30 -0400 Subject: [PATCH 23/58] Remove unused generics --- common/client/multi_node.go | 4 +--- common/client/node.go | 2 -- core/chains/evm/client/chain_client.go | 2 -- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index bf3c58815d5..ce926beaec8 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -37,8 +37,6 @@ var ( // It also handles multiple node RPC connections simultaneously. type MultiNode[ CHAIN_ID types.ID, - BLOCK_HASH types.Hashable, - HEAD types.Head[BLOCK_HASH], RPC_CLIENT any, ] interface { Dial(ctx context.Context) error @@ -94,7 +92,7 @@ func NewMultiNode[ sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT], chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics -) MultiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT] { +) MultiNode[CHAIN_ID, RPC_CLIENT] { nodeSelector := newNodeSelector(selectionMode, primaryNodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) diff --git a/common/client/node.go b/common/client/node.go index 0832faea00e..b17466ba8c4 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -172,8 +172,6 @@ func NewNode[ n.stateLatestBlockNumber = -1 n.rpc = rpc n.chainFamily = chainFamily - n.aliveLoopSub = nil - n.finalizedBlockSub = nil return n } diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index ac10f6b217b..d4cefbafdc1 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -25,8 +25,6 @@ var _ Client = (*chainClient)(nil) type chainClient struct { multiNode commonclient.MultiNode[ *big.Int, - common.Hash, - *evmtypes.Head, EvmRpcClient, ] logger logger.SugaredLogger From 3d0209c240c1c0f8932207185adc1b0406384fd3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 4 Jun 2024 13:43:17 -0400 Subject: [PATCH 24/58] Add state locking --- common/client/node.go | 8 ++++++++ core/chains/evm/client/rpc_client.go | 14 ++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/common/client/node.go b/common/client/node.go index b17466ba8c4..c8297db8a0c 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -184,18 +184,26 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() (chainID CHAIN_ID) { + n.stateMu.RLock() + defer n.stateMu.RUnlock() return n.chainID } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { + n.stateMu.RLock() + defer n.stateMu.RUnlock() return n.name } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { + n.stateMu.RLock() + defer n.stateMu.RUnlock() return n.rpc } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { + n.stateMu.RLock() + defer n.stateMu.RUnlock() n.rpc.UnsubscribeAllExcept(n.aliveLoopSub, n.finalizedBlockSub) } diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index cc9ebc82bbb..c30aebb6f22 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -162,6 +162,8 @@ func (r *RpcClient) Ping(ctx context.Context) error { } func (r *RpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { + r.stateMu.Lock() + defer r.stateMu.Unlock() for _, sub := range r.subs { var keep bool for _, s := range subs { @@ -237,16 +239,13 @@ func (r *RpcClient) Close() { r.ws.rpc.Close() } }() - - r.stateMu.Lock() - defer r.stateMu.Unlock() r.cancelInflightRequests() } // cancelInflightRequests closes and replaces the chStopInFlight -// WARNING: NOT THREAD-SAFE -// This must be called from within the r.stateMu lock func (r *RpcClient) cancelInflightRequests() { + r.stateMu.Lock() + defer r.stateMu.Unlock() close(r.chStopInFlight) r.chStopInFlight = make(chan struct{}) } @@ -317,9 +316,9 @@ func (r *RpcClient) DisconnectAll() { } // unsubscribeAll unsubscribes all subscriptions -// WARNING: NOT THREAD-SAFE -// This must be called from within the r.stateMu lock func (r *RpcClient) unsubscribeAll() { + r.stateMu.Lock() + defer r.stateMu.Unlock() for _, sub := range r.subs { sub.Unsubscribe() } @@ -328,7 +327,6 @@ func (r *RpcClient) unsubscribeAll() { func (r *RpcClient) SetAliveLoopSub(sub commontypes.Subscription) { r.stateMu.Lock() defer r.stateMu.Unlock() - r.aliveLoopSub = sub } From 8755d87ef2c6d18af6acc9f46160d4c6c16d4f4d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 6 Jun 2024 12:59:52 -0400 Subject: [PATCH 25/58] Set block difficulty --- common/client/node.go | 5 +--- common/client/node_fsm.go | 6 ++++- common/client/node_lifecycle.go | 20 ++++++++++++++-- tools/bin/go_core_race_tests_updated | 36 ++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 7 deletions(-) create mode 100755 tools/bin/go_core_race_tests_updated diff --git a/common/client/node.go b/common/client/node.go index c8297db8a0c..7b1429f9dea 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -170,6 +170,7 @@ func NewNode[ ) n.lfcLog = logger.Named(lggr, "Lifecycle") n.stateLatestBlockNumber = -1 + n.stateLatestTotalDifficulty = big.NewInt(0) n.rpc = rpc n.chainFamily = chainFamily return n @@ -196,14 +197,10 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { - n.stateMu.RLock() - defer n.stateMu.RUnlock() return n.rpc } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { - n.stateMu.RLock() - defer n.stateMu.RUnlock() n.rpc.UnsubscribeAllExcept(n.aliveLoopSub, n.finalizedBlockSub) } diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index c78dd2bddc1..a13bf722272 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -123,9 +123,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) State() NodeState { func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (NodeState, ChainInfo) { n.stateMu.RLock() defer n.stateMu.RUnlock() + var blockDifficulty *big.Int + if n.stateLatestTotalDifficulty != nil { + blockDifficulty = new(big.Int).Set(n.stateLatestTotalDifficulty) + } return n.state, ChainInfo{ BlockNumber: n.stateLatestBlockNumber, - BlockDifficulty: n.stateLatestTotalDifficulty, + BlockDifficulty: blockDifficulty, LatestFinalizedBlock: n.stateLatestFinalizedBlockNumber} } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index fb69db620c9..c1285ac5357 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -62,7 +62,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) setLatestReceived(blockNumber int64, totalDi n.stateMu.Lock() defer n.stateMu.Unlock() n.stateLatestBlockNumber = blockNumber - n.stateLatestTotalDifficulty = totalDifficulty + if totalDifficulty != nil { + n.stateLatestTotalDifficulty = nil + return + } + n.stateLatestTotalDifficulty = new(big.Int).Set(totalDifficulty) } const ( @@ -111,7 +115,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll // falsely transition this node to unreachable state + n.stateMu.Lock() n.aliveLoopSub = sub + n.stateMu.Unlock() defer sub.Unsubscribe() var outOfSyncT *time.Ticker @@ -157,6 +163,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { _, chainInfo := n.StateAndLatest() highestReceivedBlockNumber := chainInfo.BlockNumber + var pollFailures uint32 for { @@ -211,6 +218,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Got head", "head", bh) + n.stateMu.Lock() if bh.BlockNumber() > highestReceivedBlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) @@ -218,15 +226,19 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } else { lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) } + n.stateMu.Unlock() if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) } n.setLatestReceived(bh.BlockNumber(), bh.BlockDifficulty()) if !n.chainCfg.FinalityTagEnabled() { latestFinalizedBN := max(bh.BlockNumber()-int64(n.chainCfg.FinalityDepth()), 0) - if latestFinalizedBN > n.stateLatestFinalizedBlockNumber { + _, chainInfo := n.StateAndLatest() + if latestFinalizedBN > chainInfo.LatestFinalizedBlock { promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) + n.stateMu.Lock() n.stateLatestFinalizedBlockNumber = latestFinalizedBN + n.stateMu.Unlock() } } case err := <-sub.Err(): @@ -260,10 +272,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } latestFinalizedBN := latestFinalized.BlockNumber() + n.stateMu.Lock() if latestFinalizedBN > n.stateLatestFinalizedBlockNumber { promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) n.stateLatestFinalizedBlockNumber = latestFinalizedBN } + n.stateMu.Unlock() } } } @@ -449,6 +463,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { } } + fmt.Println("invalidChainIDLoop") + invalidAt := time.Now() lggr := logger.Named(n.lfcLog, "InvalidChainID") diff --git a/tools/bin/go_core_race_tests_updated b/tools/bin/go_core_race_tests_updated new file mode 100755 index 00000000000..55b9182a8e9 --- /dev/null +++ b/tools/bin/go_core_race_tests_updated @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +set -ex + +OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} +USE_TEE="${USE_TEE:-true}" +TIMEOUT="${TIMEOUT:-30s}" +COUNT="${COUNT:-10}" +GO_LDFLAGS=$(bash tools/bin/ldflags) + +use_tee() { + if [ "$USE_TEE" = "true" ]; then + tee "$@" + else + cat > "$@" + fi +} + +# Run the tests with the race detector enabled, silencing the test output +GORACE="log_path=$PWD/race" go test -json -race -ldflags "$GO_LDFLAGS" -shuffle on -timeout "$TIMEOUT" -count "$COUNT" $1 > /dev/null | use_tee "$OUTPUT_FILE" +EXITCODE=${PIPESTATUS[0]} + +# Fail if any race logs are present and display the race logs +if ls race.* &>/dev/null +then + echo "Race(s) detected:" + cat race.* + exit 1 +fi + +# Exit with the appropriate exit code +if test $EXITCODE -gt 1 +then + exit $EXITCODE +else + exit 0 +fi From 80e003043aab5e82e8c0bb268681a2d57e5c2048 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 6 Jun 2024 13:15:41 -0400 Subject: [PATCH 26/58] Update node_lifecycle.go --- common/client/node_lifecycle.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index c1285ac5357..4c6e592f12a 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -62,7 +62,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) setLatestReceived(blockNumber int64, totalDi n.stateMu.Lock() defer n.stateMu.Unlock() n.stateLatestBlockNumber = blockNumber - if totalDifficulty != nil { + if totalDifficulty == nil { n.stateLatestTotalDifficulty = nil return } @@ -218,7 +218,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() lggr.Tracew("Got head", "head", bh) - n.stateMu.Lock() if bh.BlockNumber() > highestReceivedBlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) @@ -226,7 +225,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } else { lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) } - n.stateMu.Unlock() if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) } From 9d8b10758852bc458f0076920e8387ac6f3c101e Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 11 Jun 2024 14:42:45 -0400 Subject: [PATCH 27/58] Fix tests --- core/chains/evm/client/chain_client_test.go | 840 +++++++++++++- core/chains/evm/client/mocks/rpc_client.go | 1086 +++++++++++++++++++ 2 files changed, 1922 insertions(+), 4 deletions(-) create mode 100644 core/chains/evm/client/mocks/rpc_client.go diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index b6ced8c19e7..f5189f2ad87 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -1,30 +1,755 @@ package client_test import ( + "context" + "encoding/json" "errors" + "fmt" "math/big" + "net/url" + "os" + "strings" "testing" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" + pkgerrors "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" commonclient "github.com/smartcontractkit/chainlink/v2/common/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" - "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" ) +func mustNewChainClient(t *testing.T, wsURL string, sendonlys ...url.URL) client.Client { + return mustNewChainClientWithChainID(t, wsURL, testutils.FixtureChainID, sendonlys...) +} + +func mustNewChainClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) client.Client { + cfg := client.TestNodePoolConfig{ + NodeSelectionMode: commonclient.NodeSelectionModeRoundRobin, + } + c, err := client.NewChainClientWithTestNode(t, cfg, time.Second*0, cfg.NodeLeaseDuration, wsURL, nil, sendonlys, 42, chainID) + require.NoError(t, err) + return c +} + +func TestEthClient_TransactionReceipt(t *testing.T) { + t.Parallel() + + txHash := "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238" + + mustReadResult := func(t *testing.T, file string) []byte { + response, err := os.ReadFile(file) + require.NoError(t, err) + var resp struct { + Result json.RawMessage `json:"result"` + } + err = json.Unmarshal(response, &resp) + require.NoError(t, err) + return resp.Result + } + + t.Run("happy path", func(t *testing.T) { + result := mustReadResult(t, "../../../testdata/jsonrpc/getTransactionReceipt.json") + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getTransactionReceipt", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, txHash, params.Array()[0].String()) { + resp.Result = string(result) + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + hash := common.HexToHash(txHash) + receipt, err := ethClient.TransactionReceipt(tests.Context(t), hash) + require.NoError(t, err) + assert.Equal(t, hash, receipt.TxHash) + assert.Equal(t, big.NewInt(11), receipt.BlockNumber) + }) + + t.Run("no tx hash, returns ethereum.NotFound", func(t *testing.T) { + result := mustReadResult(t, "../../../testdata/jsonrpc/getTransactionReceipt_notFound.json") + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getTransactionReceipt", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, txHash, params.Array()[0].String()) { + resp.Result = string(result) + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + hash := common.HexToHash(txHash) + _, err = ethClient.TransactionReceipt(tests.Context(t), hash) + require.Equal(t, ethereum.NotFound, pkgerrors.Cause(err)) + }) +} + +func TestEthClient_PendingNonceAt(t *testing.T) { + t.Parallel() + + address := testutils.NewAddress() + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_getTransactionCount", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + if assert.Equal(t, strings.ToLower(address.Hex()), strings.ToLower(arr[0].String())) && + assert.Equal(t, "pending", arr[1].String()) { + resp.Result = `"0x100"` + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + result, err := ethClient.PendingNonceAt(tests.Context(t), address) + require.NoError(t, err) + + var expected uint64 = 256 + require.Equal(t, result, expected) +} + +func TestEthClient_BalanceAt(t *testing.T) { + t.Parallel() + + largeBalance, _ := big.NewInt(0).SetString("100000000000000000000", 10) + address := testutils.NewAddress() + + cases := []struct { + name string + balance *big.Int + }{ + {"basic", big.NewInt(256)}, + {"larger than signed 64 bit integer", largeBalance}, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getBalance", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, strings.ToLower(address.Hex()), strings.ToLower(params.Array()[0].String())) { + resp.Result = `"` + hexutil.EncodeBig(test.balance) + `"` + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + result, err := ethClient.BalanceAt(tests.Context(t), address, nil) + require.NoError(t, err) + assert.Equal(t, test.balance, result) + }) + } +} + +func TestEthClient_LatestBlockHeight(t *testing.T) { + t.Parallel() + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_blockNumber", method) { + return + } + resp.Result = `"0x100"` + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + result, err := ethClient.LatestBlockHeight(tests.Context(t)) + require.NoError(t, err) + require.Equal(t, big.NewInt(256), result) +} + +func TestEthClient_GetERC20Balance(t *testing.T) { + t.Parallel() + ctx := tests.Context(t) + + expectedBig, _ := big.NewInt(0).SetString("100000000000000000000000000000000000000", 10) + + cases := []struct { + name string + balance *big.Int + }{ + {"small", big.NewInt(256)}, + {"big", expectedBig}, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + contractAddress := testutils.NewAddress() + userAddress := testutils.NewAddress() + functionSelector := evmtypes.HexToFunctionSelector(client.BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address) + txData := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(userAddress.Bytes(), utils.EVMWordByteLen)) + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_call", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + callArgs := arr[0] + if assert.True(t, callArgs.IsObject()) && + assert.Equal(t, strings.ToLower(contractAddress.Hex()), callArgs.Get("to").String()) && + assert.Equal(t, hexutil.Encode(txData), callArgs.Get("data").String()) && + assert.Equal(t, "latest", arr[1].String()) { + resp.Result = `"` + hexutil.EncodeBig(test.balance) + `"` + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + result, err := ethClient.TokenBalance(ctx, userAddress, contractAddress) + require.NoError(t, err) + assert.Equal(t, test.balance, result) + }) + } +} + +func TestReceipt_UnmarshalEmptyBlockHash(t *testing.T) { + t.Parallel() + + input := `{ + "transactionHash": "0x444172bef57ad978655171a8af2cfd89baa02a97fcb773067aef7794d6913374", + "gasUsed": "0x1", + "cumulativeGasUsed": "0x1", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x8bf99b", + "blockHash": null + }` + + var receipt types.Receipt + err := json.Unmarshal([]byte(input), &receipt) + require.NoError(t, err) +} + +func TestEthClient_HeaderByNumber(t *testing.T) { + t.Parallel() + + expectedBlockNum := big.NewInt(1) + expectedBlockHash := "0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a" + + cases := []struct { + name string + expectedRequestBlock *big.Int + expectedResponseBlock int64 + error error + rpcResp string + }{ + {"happy geth", expectedBlockNum, expectedBlockNum.Int64(), nil, + `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, + {"happy parity", expectedBlockNum, expectedBlockNum.Int64(), nil, + `{"author":"0xd1aeb42885a43b72b518182ef893125814811048","difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa00f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","0x880ece08ea8c49dfd9"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, + {"missing header", expectedBlockNum, 0, fmt.Errorf("no live nodes available for chain %s", testutils.FixtureChainID.String()), + `null`}, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_getBlockByNumber", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + blockNumStr := arr[0].String() + var blockNum hexutil.Big + err := blockNum.UnmarshalText([]byte(blockNumStr)) + if assert.NoError(t, err) && assert.Equal(t, test.expectedRequestBlock, blockNum.ToInt()) && + assert.Equal(t, false, arr[1].Bool()) { + resp.Result = test.rpcResp + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(tests.Context(t), 5*time.Second) + result, err := ethClient.HeadByNumber(ctx, expectedBlockNum) + if test.error != nil { + require.Error(t, err, test.error) + } else { + require.NoError(t, err) + require.Equal(t, expectedBlockHash, result.Hash.Hex()) + require.Equal(t, test.expectedResponseBlock, result.Number) + require.Zero(t, testutils.FixtureChainID.Cmp(result.EVMChainID.ToInt())) + } + cancel() + }) + } +} + +func TestEthClient_SendTransaction_NoSecondaryURL(t *testing.T) { + t.Parallel() + + tx := testutils.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_sendRawTransaction", method) { + return + } + resp.Result = `"` + tx.Hash().Hex() + `"` + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + err = ethClient.SendTransaction(tests.Context(t), tx) + assert.NoError(t, err) +} + +/* TODO: Implement tx sender +func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) { + t.Parallel() + + tx := testutils.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + } + return + }).WSURL().String() + + rpcSrv := rpc.NewServer() + t.Cleanup(rpcSrv.Stop) + service := sendTxService{chainID: testutils.FixtureChainID} + err := rpcSrv.RegisterName("eth", &service) + require.NoError(t, err) + ts := httptest.NewServer(rpcSrv) + t.Cleanup(ts.Close) + + sendonlyURL, err := url.Parse(ts.URL) + require.NoError(t, err) + + ethClient := mustNewChainClient(t, wsURL, *sendonlyURL, *sendonlyURL) + err = ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + err = ethClient.SendTransaction(tests.Context(t), tx) + require.NoError(t, err) + + // Unfortunately it's a bit tricky to test this, since there is no + // synchronization. We have to rely on timing instead. + require.Eventually(t, func() bool { return service.sentCount.Load() == int32(2) }, tests.WaitTimeout(t), 500*time.Millisecond) +} +*/ + +func TestEthClient_SendTransactionReturnCode(t *testing.T) { + t.Parallel() + + fromAddress := testutils.NewAddress() + tx := testutils.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + t.Run("returns Fatal error type when error message is fatal", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "invalid sender" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Fatal) + }) + + t.Run("returns TransactionAlreadyKnown error type when error message is nonce too low", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "nonce too low" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.TransactionAlreadyKnown) + }) + + t.Run("returns Successful error type when there is no error message", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.NoError(t, err) + assert.Equal(t, errType, commonclient.Successful) + }) + + t.Run("returns Underpriced error type when transaction is terminally underpriced", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "transaction underpriced" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Underpriced) + }) + + t.Run("returns Unsupported error type when error message is queue full", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "queue full" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Unsupported) + }) + + t.Run("returns Retryable error type when there is a transaction gap", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "NonceGap" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Retryable) + }) + + t.Run("returns InsufficientFunds error type when the sender address doesn't have enough funds", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "insufficient funds for transfer" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.InsufficientFunds) + }) + + t.Run("returns ExceedsFeeCap error type when gas price is too high for the node", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "Transaction fee cap exceeded" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.ExceedsMaxFee) + }) + + t.Run("returns Unknown error type when the error can't be categorized", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "some random error" + } + return + }).WSURL().String() + + ethClient := mustNewChainClient(t, wsURL) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(tests.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Unknown) + }) +} + +/* +type sendTxService struct { + chainID *big.Int + sentCount atomic.Int32 +} + +func (x *sendTxService) ChainId(ctx context.Context) (*hexutil.Big, error) { + return (*hexutil.Big)(x.chainID), nil +} + +func (x *sendTxService) SendRawTransaction(ctx context.Context, signRawTx hexutil.Bytes) error { + x.sentCount.Add(1) + return nil +} + +func TestEthClient_SubscribeNewHead(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(tests.Context(t), tests.WaitTimeout(t)) + defer cancel() + + chainId := big.NewInt(123456) + wsURL := testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + if method == "eth_unsubscribe" { + resp.Result = "true" + return + } + assert.Equal(t, "eth_subscribe", method) + if assert.True(t, params.IsArray()) && assert.Equal(t, "newHeads", params.Array()[0].String()) { + resp.Result = `"0x00"` + resp.Notify = headResult + } + return + }).WSURL().String() + + ethClient := mustNewChainClientWithChainID(t, wsURL, chainId) + err := ethClient.Dial(tests.Context(t)) + require.NoError(t, err) + + headCh, sub, err := ethClient.SubscribeNewHead(ctx) + require.NoError(t, err) + + select { + case err := <-sub.Err(): + t.Fatal(err) + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case h := <-headCh: + require.NotNil(t, h.EVMChainID) + require.Zero(t, chainId.Cmp(h.EVMChainID.ToInt())) + } + sub.Unsubscribe() +} +*/ + func newMockRpc(t *testing.T) *client.MockEvmRpcClient { mockRpc := client.NewMockEvmRpcClient(t) - mockRpc.On("Dial", mock.Anything).Return(nil).Maybe() - mockRpc.On("Close").Return(nil).Maybe() - mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Maybe() + mockRpc.On("Dial", mock.Anything).Return(nil).Once() + mockRpc.On("Close").Return(nil).Once() + mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes mockRpc.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() @@ -74,3 +799,110 @@ func TestChainClient_BatchCallContext(t *testing.T) { } }) } + +func TestEthClient_ErroringClient(t *testing.T) { + t.Parallel() + ctx := tests.Context(t) + + // Empty node means there are no active nodes to select from, causing client to always return error. + erroringClient := client.NewChainClientWithEmptyNode(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID) + + _, err := erroringClient.BalanceAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.BatchCallContext(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.BatchCallContextAll(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.BlockByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.BlockByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.CallContext(ctx, nil, "") + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.CallContract(ctx, ethereum.CallMsg{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + // TODO-1663: test actual ChainID() call once client.go is deprecated. + id, err := erroringClient.ChainID() + var expected *big.Int + require.Equal(t, id, expected) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.CodeAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + id = erroringClient.ConfiguredChainID() + require.Equal(t, id, testutils.FixtureChainID) + + err = erroringClient.Dial(ctx) + require.ErrorContains(t, err, "no available nodes for chain") + + _, err = erroringClient.EstimateGas(ctx, ethereum.CallMsg{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.FilterLogs(ctx, ethereum.FilterQuery{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeaderByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeaderByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeadByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeadByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.LINKBalance(ctx, common.Address{}, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.LatestBlockHeight(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.PendingCodeAt(ctx, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.PendingNonceAt(ctx, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.SendTransaction(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + code, err := erroringClient.SendTransactionReturnCode(ctx, nil, common.Address{}) + require.Equal(t, code, commonclient.Unknown) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SequenceAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, _, err = erroringClient.SubscribeNewHead(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SuggestGasPrice(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SuggestGasTipCap(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TokenBalance(ctx, common.Address{}, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TransactionByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TransactionReceipt(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) +} + +const headResult = client.HeadResult diff --git a/core/chains/evm/client/mocks/rpc_client.go b/core/chains/evm/client/mocks/rpc_client.go new file mode 100644 index 00000000000..980a215ccfe --- /dev/null +++ b/core/chains/evm/client/mocks/rpc_client.go @@ -0,0 +1,1086 @@ +// Code generated by mockery v2.42.2. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + assets "github.com/smartcontractkit/chainlink-common/pkg/assets" + + common "github.com/ethereum/go-ethereum/common" + + commontypes "github.com/smartcontractkit/chainlink/v2/common/types" + + context "context" + + coretypes "github.com/ethereum/go-ethereum/core/types" + + ethereum "github.com/ethereum/go-ethereum" + + evmassets "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" +) + +// RPCClient is an autogenerated mock type for the RPCClient type +type RPCClient struct { + mock.Mock +} + +// BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber +func (_m *RPCClient) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { + ret := _m.Called(ctx, accountAddress, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for BalanceAt") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (*big.Int, error)); ok { + return rf(ctx, accountAddress, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) *big.Int); ok { + r0 = rf(ctx, accountAddress, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, accountAddress, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *RPCClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *RPCClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Head, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Head); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByHashGeth provides a mock function with given fields: ctx, hash +func (_m *RPCClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHashGeth") + } + + var r0 *coretypes.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *RPCClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Head, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Head); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByNumberGeth provides a mock function with given fields: ctx, number +func (_m *RPCClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumberGeth") + } + + var r0 *coretypes.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CallContext provides a mock function with given fields: ctx, result, method, args +func (_m *RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, ctx, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { + r0 = rf(ctx, result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *RPCClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainID provides a mock function with given fields: ctx +func (_m *RPCClient) ChainID(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientVersion provides a mock function with given fields: _a0 +func (_m *RPCClient) ClientVersion(_a0 context.Context) (string, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ClientVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *RPCClient) Close() { + _m.Called() +} + +// CodeAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *RPCClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Dial provides a mock function with given fields: ctx +func (_m *RPCClient) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DialHTTP provides a mock function with given fields: +func (_m *RPCClient) DialHTTP() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DialHTTP") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DisconnectAll provides a mock function with given fields: +func (_m *RPCClient) DisconnectAll() { + _m.Called() +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *RPCClient) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterEvents provides a mock function with given fields: ctx, query +func (_m *RPCClient) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { + ret := _m.Called(ctx, query) + + if len(ret) == 0 { + panic("no return value specified for FilterEvents") + } + + var r0 []coretypes.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)); ok { + return rf(ctx, query) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []coretypes.Log); ok { + r0 = rf(ctx, query) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coretypes.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByHash provides a mock function with given fields: ctx, h +func (_m *RPCClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { + ret := _m.Called(ctx, h) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *coretypes.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Header, error)); ok { + return rf(ctx, h) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Header); ok { + r0 = rf(ctx, h) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, h) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByNumber provides a mock function with given fields: ctx, n +func (_m *RPCClient) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { + ret := _m.Called(ctx, n) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *coretypes.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { + return rf(ctx, n) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { + r0 = rf(ctx, n) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, n) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsSyncing provides a mock function with given fields: ctx +func (_m *RPCClient) IsSyncing(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsSyncing") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LINKBalance provides a mock function with given fields: ctx, accountAddress, linkAddress +func (_m *RPCClient) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { + ret := _m.Called(ctx, accountAddress, linkAddress) + + if len(ret) == 0 { + panic("no return value specified for LINKBalance") + } + + var r0 *assets.Link + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*assets.Link, error)); ok { + return rf(ctx, accountAddress, linkAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *assets.Link); ok { + r0 = rf(ctx, accountAddress, linkAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Link) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { + r1 = rf(ctx, accountAddress, linkAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlockHeight provides a mock function with given fields: _a0 +func (_m *RPCClient) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for LatestBlockHeight") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestFinalizedBlock provides a mock function with given fields: ctx +func (_m *RPCClient) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LatestFinalizedBlock") + } + + var r0 *types.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Head, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Head); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingCallContract provides a mock function with given fields: ctx, msg +func (_m *RPCClient) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for PendingCallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) ([]byte, error)); ok { + return rf(ctx, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) []byte); ok { + r0 = rf(ctx, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *RPCClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingSequenceAt provides a mock function with given fields: ctx, addr +func (_m *RPCClient) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for PendingSequenceAt") + } + + var r0 types.Nonce + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (types.Nonce, error)); ok { + return rf(ctx, addr) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) types.Nonce); ok { + r0 = rf(ctx, addr) + } else { + r0 = ret.Get(0).(types.Nonce) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress +func (_m *RPCClient) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { + ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for SendEmptyTransaction") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) (string, error)); ok { + return rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) string); ok { + r0 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) error); ok { + r1 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *RPCClient) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber +func (_m *RPCClient) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { + ret := _m.Called(ctx, accountAddress, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for SequenceAt") + } + + var r0 types.Nonce + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (types.Nonce, error)); ok { + return rf(ctx, accountAddress, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) types.Nonce); ok { + r0 = rf(ctx, accountAddress, blockNumber) + } else { + r0 = ret.Get(0).(types.Nonce) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, accountAddress, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetAliveLoopSub provides a mock function with given fields: _a0 +func (_m *RPCClient) SetAliveLoopSub(_a0 commontypes.Subscription) { + _m.Called(_a0) +} + +// SimulateTransaction provides a mock function with given fields: ctx, tx +func (_m *RPCClient) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SimulateTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: ctx, channel, args +func (_m *RPCClient) Subscribe(ctx context.Context, channel chan<- *types.Head, args ...interface{}) (commontypes.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, ctx, channel) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 commontypes.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) (commontypes.Subscription, error)); ok { + return rf(ctx, channel, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Head, ...interface{}) commontypes.Subscription); ok { + r0 = rf(ctx, channel, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(commontypes.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Head, ...interface{}) error); ok { + r1 = rf(ctx, channel, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *RPCClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribersCount provides a mock function with given fields: +func (_m *RPCClient) SubscribersCount() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribersCount") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *RPCClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *RPCClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress +func (_m *RPCClient) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { + ret := _m.Called(ctx, accountAddress, tokenAddress) + + if len(ret) == 0 { + panic("no return value specified for TokenBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*big.Int, error)); ok { + return rf(ctx, accountAddress, tokenAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *big.Int); ok { + r0 = rf(ctx, accountAddress, tokenAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { + r1 = rf(ctx, accountAddress, tokenAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionByHash provides a mock function with given fields: ctx, txHash +func (_m *RPCClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionByHash") + } + + var r0 *coretypes.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Transaction, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Transaction); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionReceipt provides a mock function with given fields: ctx, txHash +func (_m *RPCClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionReceipt") + } + + var r0 *types.Receipt + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Receipt, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Receipt); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Receipt) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionReceiptGeth provides a mock function with given fields: ctx, txHash +func (_m *RPCClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionReceiptGeth") + } + + var r0 *coretypes.Receipt + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Receipt, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Receipt); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.Receipt) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *RPCClient) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCClient(t interface { + mock.TestingT + Cleanup(func()) +}) *RPCClient { + mock := &RPCClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} From 407944088f4654f9442f41acad5641cd74da0406 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 10:37:46 -0400 Subject: [PATCH 28/58] Make NodeStates public --- common/client/multi_node.go | 30 ++-- common/client/multi_node_test.go | 140 +++--------------- common/client/node.go | 32 ++-- common/client/node_fsm.go | 138 ++++++++--------- common/client/node_fsm_test.go | 44 +++--- common/client/node_lifecycle.go | 40 ++--- common/client/node_lifecycle_test.go | 124 ++++++++-------- common/client/node_selector_highest_head.go | 2 +- .../client/node_selector_highest_head_test.go | 40 ++--- common/client/node_selector_priority_level.go | 4 +- .../node_selector_priority_level_test.go | 20 +-- common/client/node_selector_round_robin.go | 2 +- .../client/node_selector_round_robin_test.go | 8 +- .../client/node_selector_total_difficulty.go | 2 +- .../node_selector_total_difficulty_test.go | 40 ++--- common/client/send_only_node.go | 14 +- common/client/send_only_node_lifecycle.go | 6 +- common/client/send_only_node_test.go | 14 +- core/chains/evm/client/chain_client.go | 4 +- core/chains/evm/client/null_client.go | 2 +- core/chains/legacyevm/chain.go | 10 +- 21 files changed, 309 insertions(+), 407 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index ca69bad0c0d..ec0718c942a 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -46,7 +46,7 @@ type MultiNode[ // Returns error if `do` was not called or context returns an error. DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error // NodeStates - returns RPCs' states - NodeStates() map[string]string + NodeStates() map[string]NodeState Close() error } @@ -121,7 +121,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co if ctx.Err() != nil { return ctx.Err() } - if n.State() != nodeStateAlive { + if n.State() != NodeStateAlive { continue } if do(ctx, n.RPC(), false) { @@ -136,7 +136,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co if ctx.Err() != nil { return ctx.Err() } - if n.State() != nodeStateAlive { + if n.State() != NodeStateAlive { continue } do(ctx, n.RPC(), false) @@ -144,13 +144,13 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co return nil } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]string { - states := map[string]string{} +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]NodeState { + states := map[string]NodeState{} for _, n := range c.primaryNodes { - states[n.String()] = n.State().String() + states[n.String()] = n.State() } for _, n := range c.sendOnlyNodes { - states[n.String()] = n.State().String() + states[n.String()] = n.State() } return states } @@ -225,12 +225,12 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) SelectRPC() (rpc RPC return n.RPC(), nil } -// selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector. +// selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() - if node != nil && node.State() == nodeStateAlive { + if node != nil && node.State() == NodeStateAlive { return // still alive } @@ -238,7 +238,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N c.activeMu.Lock() defer c.activeMu.Unlock() node = c.activeNode - if node != nil && node.State() == nodeStateAlive { + if node != nil && node.State() == NodeStateAlive { return // another goroutine beat us here } @@ -246,8 +246,8 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N if c.activeNode == nil { c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) - //errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) - c.SvcErrBuffer.Append(ErroringNodeError) + errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) + c.SvcErrBuffer.Append(errmsg) err = ErroringNodeError } @@ -259,7 +259,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { totalDifficulty = big.NewInt(0) for _, n := range c.primaryNodes { - if s, chainInfo := n.StateAndLatest(); s == nodeStateAlive { + if s, chainInfo := n.StateAndLatest(); s == NodeStateAlive { nLiveNodes++ if chainInfo.BlockNumber > blockNumber { blockNumber = chainInfo.BlockNumber @@ -277,7 +277,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLease() { for _, n := range c.primaryNodes { // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription - if n.State() == nodeStateAlive && n != bestNode { + if n.State() == NodeStateAlive && n != bestNode { c.lggr.Infof("Switching to best node from %q to %q", n.String(), bestNode.String()) n.UnsubscribeAll() } @@ -336,7 +336,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) report() { state := n.State() nodeStates[i] = nodeWithState{n.String(), state.String()} total++ - if state != nodeStateAlive { + if state != NodeStateAlive { dead++ } counts[state]++ diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 3fc75303485..7bd7430ad39 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -48,7 +48,7 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { } func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { - return newNodeWithState(t, chainID, nodeStateAlive) + return newNodeWithState(t, chainID, NodeStateAlive) } func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { @@ -195,7 +195,7 @@ func TestMultiNode_Report(t *testing.T) { t.Parallel() chainID := types.RandomID() node1 := newHealthyNode(t, chainID) - node2 := newNodeWithState(t, chainID, nodeStateOutOfSync) + node2 := newNodeWithState(t, chainID, NodeStateOutOfSync) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, @@ -212,7 +212,7 @@ func TestMultiNode_Report(t *testing.T) { t.Run("Report critical error on all node failure", func(t *testing.T) { t.Parallel() chainID := types.RandomID() - node := newNodeWithState(t, chainID, nodeStateOutOfSync) + node := newNodeWithState(t, chainID, NodeStateOutOfSync) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, @@ -299,9 +299,9 @@ func TestMultiNode_CheckLease(t *testing.T) { t.Parallel() chainID := types.NewIDFromInt(10) nodes := map[string]NodeState{ - "node_1": nodeStateAlive, - "node_2": nodeStateUnreachable, - "node_3": nodeStateDialed, + "node_1": NodeStateAlive, + "node_2": NodeStateUnreachable, + "node_3": NodeStateDialed, } opts := multiNodeOpts{ @@ -309,7 +309,7 @@ func TestMultiNode_CheckLease(t *testing.T) { chainID: chainID, } - expectedResult := map[string]string{} + expectedResult := map[string]NodeState{} for name, state := range nodes { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) node.On("State").Return(state) @@ -322,8 +322,8 @@ func TestMultiNode_CheckLease(t *testing.T) { sendOnly.On("String").Return(sendOnlyName) opts.sendonlys = append(opts.sendonlys, sendOnly) - expectedResult[name] = state.String() - expectedResult[sendOnlyName] = state.String() + expectedResult[name] = state + expectedResult[sendOnlyName] = state } mn := newTestMultiNode(t, opts) @@ -338,7 +338,7 @@ func TestMultiNode_selectNode(t *testing.T) { t.Parallel() chainID := types.RandomID() node1 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node1.On("State").Return(nodeStateAlive).Once() + node1.On("State").Return(NodeStateAlive).Once() node1.On("String").Return("node1").Maybe() node2 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) node2.On("String").Return("node2").Maybe() @@ -376,7 +376,7 @@ func TestMultiNode_selectNode(t *testing.T) { require.NoError(t, err) require.Equal(t, oldBest.String(), activeNode.String()) // old best died, so we should replace it - oldBest.On("State").Return(nodeStateOutOfSync).Twice() + oldBest.On("State").Return(NodeStateOutOfSync).Twice() nodeSelector.On("Select").Return(newBest).Once() newActiveNode, err := mn.selectNode() require.NoError(t, err) @@ -426,28 +426,28 @@ func TestMultiNode_nLiveNodes(t *testing.T) { ExpectedNLiveNodes: 3, NodeParams: []nodeParams{ { - State: nodeStateOutOfSync, + State: NodeStateOutOfSync, chainInfo: ChainInfo{ BlockNumber: 1000, BlockDifficulty: big.NewInt(2000), }, }, { - State: nodeStateAlive, + State: NodeStateAlive, chainInfo: ChainInfo{ BlockNumber: 20, BlockDifficulty: big.NewInt(9), }, }, { - State: nodeStateAlive, + State: NodeStateAlive, chainInfo: ChainInfo{ BlockNumber: 19, BlockDifficulty: big.NewInt(10), }, }, { - State: nodeStateAlive, + State: NodeStateAlive, chainInfo: ChainInfo{ BlockNumber: 11, BlockDifficulty: nil, @@ -467,7 +467,6 @@ func TestMultiNode_nLiveNodes(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { for _, params := range tc.NodeParams { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - // TODO: Returns chainInfo not block number, difficulty! node.On("StateAndLatest").Return(params.State, params.chainInfo) mn.primaryNodes = append(mn.primaryNodes, node) } @@ -480,104 +479,7 @@ func TestMultiNode_nLiveNodes(t *testing.T) { } } -/* TODO: Multinode no longer contains this method; maybe test DoAll instead? -func TestMultiNode_BatchCallContextAll(t *testing.T) { - t.Parallel() - t.Run("Fails if failed to select active node", func(t *testing.T) { - chainID := types.RandomID() - mn := newTestMultiNode(t, multiNodeOpts{ - selectionMode: NodeSelectionModeRoundRobin, - chainID: chainID, - }) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - nodeSelector.On("Select").Return(nil).Once() - nodeSelector.On("Name").Return("MockedNodeSelector").Once() - mn.nodeSelector = nodeSelector - err := mn.BatchCallContextAll(tests.Context(t), nil) - require.EqualError(t, err, ErroringNodeError.Error()) - }) - t.Run("Returns error if RPC call fails for active node", func(t *testing.T) { - chainID := types.RandomID() - rpc := newMultiNodeRPCClient(t) - expectedError := errors.New("rpc failed to do the batch call") - rpc.On("BatchCallContext", mock.Anything, mock.Anything).Return(expectedError).Once() - node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node.On("RPC").Return(rpc) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - nodeSelector.On("Select").Return(node).Once() - mn := newTestMultiNode(t, multiNodeOpts{ - selectionMode: NodeSelectionModeRoundRobin, - chainID: chainID, - }) - mn.nodeSelector = nodeSelector - err := mn.BatchCallContextAll(tests.Context(t), nil) - require.EqualError(t, err, expectedError.Error()) - }) - t.Run("Waits for all nodes to complete the call and logs results", func(t *testing.T) { - // setup RPCs - failedRPC := newMultiNodeRPCClient(t) - failedRPC.On("BatchCallContext", mock.Anything, mock.Anything). - Return(errors.New("rpc failed to do the batch call")).Once() - okRPC := newMultiNodeRPCClient(t) - okRPC.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Twice() - - // setup ok and failed auxiliary nodes - okNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) - okNode.On("RPC").Return(okRPC).Once() - okNode.On("State").Return(nodeStateAlive) - failedNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - failedNode.On("RPC").Return(failedRPC).Once() - failedNode.On("State").Return(nodeStateAlive) - - // setup main node - mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - mainNode.On("RPC").Return(okRPC) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - nodeSelector.On("Select").Return(mainNode).Once() - lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) - mn := newTestMultiNode(t, multiNodeOpts{ - selectionMode: NodeSelectionModeRoundRobin, - chainID: types.RandomID(), - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{failedNode, mainNode}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{okNode}, - logger: lggr, - }) - mn.nodeSelector = nodeSelector - - err := mn.BatchCallContextAll(tests.Context(t), nil) - require.NoError(t, err) - tests.RequireLogMessage(t, observedLogs, "Secondary node BatchCallContext failed") - }) - t.Run("Does not call BatchCallContext for unhealthy nodes", func(t *testing.T) { - // setup RPCs - okRPC := newMultiNodeRPCClient(t) - okRPC.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Twice() - - // setup ok and failed auxiliary nodes - healthyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) - healthyNode.On("RPC").Return(okRPC).Once() - healthyNode.On("State").Return(nodeStateAlive) - deadNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - deadNode.On("State").Return(nodeStateUnreachable) - - // setup main node - mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - mainNode.On("RPC").Return(okRPC) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - nodeSelector.On("Select").Return(mainNode).Once() - mn := newTestMultiNode(t, multiNodeOpts{ - selectionMode: NodeSelectionModeRoundRobin, - chainID: types.RandomID(), - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{deadNode, mainNode}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{healthyNode, deadNode}, - }) - mn.nodeSelector = nodeSelector - - err := mn.BatchCallContextAll(tests.Context(t), nil) - require.NoError(t, err) - }) -} -*/ +/* TODO: Add test covereage for DoAll() /* TODO: Implement TransactionSender func TestMultiNode_SendTransaction(t *testing.T) { @@ -601,7 +503,7 @@ func TestMultiNode_SendTransaction(t *testing.T) { } newNode := func(t *testing.T, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { - return newNodeWithState(t, nodeStateAlive, txErr, sendTxRun) + return newNodeWithState(t, NodeStateAlive, txErr, sendTxRun) } newStartedMultiNode := func(t *testing.T, opts multiNodeOpts) testMultiNode { mn := newTestMultiNode(t, opts) @@ -744,8 +646,8 @@ func TestMultiNode_SendTransaction(t *testing.T) { mn := newStartedMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: types.RandomID(), - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{newNodeWithState(t, nodeStateUnreachable, nil, nil)}, - sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNodeWithState(t, nodeStateUnreachable, nil, nil)}, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{newNodeWithState(t, NodeStateUnreachable, nil, nil)}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNodeWithState(t, NodeStateUnreachable, nil, nil)}, classifySendTxError: classifySendTxError, }) err := mn.SendTransaction(tests.Context(t), nil) @@ -757,8 +659,8 @@ func TestMultiNode_SendTransaction(t *testing.T) { unexpectedCall := func(args mock.Arguments) { panic("SendTx must not be called for unhealthy node") } - unhealthyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) - unhealthySendOnlyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) + unhealthyNode := newNodeWithState(t, NodeStateUnreachable, nil, unexpectedCall) + unhealthySendOnlyNode := newNodeWithState(t, NodeStateUnreachable, nil, unexpectedCall) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) mn := newStartedMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, diff --git a/common/client/node.go b/common/client/node.go index 7f3393f0d0b..92730705d25 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -216,7 +216,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) close() error { defer n.stateMu.Unlock() close(n.stopCh) - n.state = nodeStateClosed + n.state = NodeStateClosed return nil } @@ -237,7 +237,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Start(startCtx context.Context) error // Node lifecycle is synchronous: only one goroutine should be running at a // time. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { - if n.state != nodeStateUndialed { + if n.state != NodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", n.state)) } @@ -246,7 +246,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { n.declareUnreachable() return } - n.setState(nodeStateDialed) + n.setState(NodeStateDialed) state := n.verifyConn(startCtx, n.lfcLog) n.declareState(state) @@ -263,11 +263,11 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte st := n.State() switch st { - case nodeStateClosed: + case NodeStateClosed: // The node is already closed, and any subsequent transition is invalid. // To make spotting such transitions a bit easier, return the invalid node state. - return nodeStateLen - case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing: + return NodeStateLen + case NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing: default: panic(fmt.Sprintf("cannot verify node in state %v", st)) } @@ -277,7 +277,7 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte if chainID, err = n.rpc.ChainID(callerCtx); err != nil { promFailed() lggr.Errorw("Failed to verify chain ID for node", "err", err, "nodeState", n.State()) - return nodeStateUnreachable + return NodeStateUnreachable } else if chainID.String() != n.chainID.String() { promFailed() err = fmt.Errorf( @@ -288,30 +288,30 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyChainID(callerCtx context.Conte errInvalidChainID, ) lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err, "nodeState", n.State()) - return nodeStateInvalidChainID + return NodeStateInvalidChainID } promPoolRPCNodeVerifiesSuccess.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() - return nodeStateAlive + return NodeStateAlive } // createVerifiedConn - establishes new connection with the RPC and verifies that it's valid: chainID matches, and it's not syncing. -// Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. +// Returns desired state if one of the verifications fails. Otherwise, returns NodeStateAlive. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) createVerifiedConn(ctx context.Context, lggr logger.Logger) NodeState { if err := n.rpc.Dial(ctx); err != nil { n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err, "nodeState", n.State()) - return nodeStateUnreachable + return NodeStateUnreachable } return n.verifyConn(ctx, lggr) } // verifyConn - verifies that current connection is valid: chainID matches, and it's not syncing. -// Returns desired state if one of the verifications fails. Otherwise, returns nodeStateAlive. +// Returns desired state if one of the verifications fails. Otherwise, returns NodeStateAlive. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr logger.Logger) NodeState { state := n.verifyChainID(ctx, lggr) - if state != nodeStateAlive { + if state != NodeStateAlive { return state } @@ -319,16 +319,16 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) verifyConn(ctx context.Context, lggr isSyncing, err := n.rpc.IsSyncing(ctx) if err != nil { lggr.Errorw("Unexpected error while verifying RPC node synchronization status", "err", err, "nodeState", n.State()) - return nodeStateUnreachable + return NodeStateUnreachable } if isSyncing { lggr.Errorw("Verification failed: Node is syncing", "nodeState", n.State()) - return nodeStateSyncing + return NodeStateSyncing } } - return nodeStateAlive + return NodeStateAlive } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Order() int32 { diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index a13bf722272..dcd2cb6e7c4 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -11,31 +11,31 @@ import ( var ( promPoolRPCNodeTransitionsToAlive = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_alive", - Help: transitionString(nodeStateAlive), + Help: transitionString(NodeStateAlive), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToInSync = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_in_sync", - Help: fmt.Sprintf("%s to %s", transitionString(nodeStateOutOfSync), nodeStateAlive), + Help: fmt.Sprintf("%s to %s", transitionString(NodeStateOutOfSync), NodeStateAlive), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToOutOfSync = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_out_of_sync", - Help: transitionString(nodeStateOutOfSync), + Help: transitionString(NodeStateOutOfSync), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToUnreachable = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_unreachable", - Help: transitionString(nodeStateUnreachable), + Help: transitionString(NodeStateUnreachable), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToInvalidChainID = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_invalid_chain_id", - Help: transitionString(nodeStateInvalidChainID), + Help: transitionString(NodeStateInvalidChainID), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToUnusable = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_unusable", - Help: transitionString(nodeStateUnusable), + Help: transitionString(NodeStateUnusable), }, []string{"chainID", "nodeName"}) promPoolRPCNodeTransitionsToSyncing = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "pool_rpc_node_num_transitions_to_syncing", - Help: transitionString(nodeStateSyncing), + Help: transitionString(NodeStateSyncing), }, []string{"chainID", "nodeName"}) ) @@ -45,23 +45,23 @@ type NodeState int func (n NodeState) String() string { switch n { - case nodeStateUndialed: + case NodeStateUndialed: return "Undialed" - case nodeStateDialed: + case NodeStateDialed: return "Dialed" - case nodeStateInvalidChainID: + case NodeStateInvalidChainID: return "InvalidChainID" - case nodeStateAlive: + case NodeStateAlive: return "Alive" - case nodeStateUnreachable: + case NodeStateUnreachable: return "Unreachable" - case nodeStateUnusable: + case NodeStateUnusable: return "Unusable" - case nodeStateOutOfSync: + case NodeStateOutOfSync: return "OutOfSync" - case nodeStateClosed: + case NodeStateClosed: return "Closed" - case nodeStateSyncing: + case NodeStateSyncing: return "Syncing" default: return fmt.Sprintf("NodeState(%d)", n) @@ -74,39 +74,39 @@ func (n NodeState) GoString() string { } const ( - // nodeStateUndialed is the first state of a virgin node - nodeStateUndialed = NodeState(iota) - // nodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID - nodeStateDialed - // nodeStateInvalidChainID is after chain ID verification failed - nodeStateInvalidChainID - // nodeStateAlive is a healthy node after chain ID verification succeeded - nodeStateAlive - // nodeStateUnreachable is a node that cannot be dialed or has disconnected - nodeStateUnreachable - // nodeStateOutOfSync is a node that is accepting connections but exceeded + // NodeStateUndialed is the first state of a virgin node + NodeStateUndialed = NodeState(iota) + // NodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID + NodeStateDialed + // NodeStateInvalidChainID is after chain ID verification failed + NodeStateInvalidChainID + // NodeStateAlive is a healthy node after chain ID verification succeeded + NodeStateAlive + // NodeStateUnreachable is a node that cannot be dialed or has disconnected + NodeStateUnreachable + // NodeStateOutOfSync is a node that is accepting connections but exceeded // the failure threshold without sending any new heads. It will be // disconnected, then put into a revive loop and re-awakened after redial // if a new head arrives - nodeStateOutOfSync - // nodeStateUnusable is a sendonly node that has an invalid URL that can never be reached - nodeStateUnusable - // nodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle - nodeStateClosed - // nodeStateSyncing is a node that is actively back-filling blockchain. Usually, it's a newly set up node that is - // still syncing the chain. The main difference from `nodeStateOutOfSync` is that it represents state relative - // to other primary nodes configured in the MultiNode. In contrast, `nodeStateSyncing` represents the internal state of + NodeStateOutOfSync + // NodeStateUnusable is a sendonly node that has an invalid URL that can never be reached + NodeStateUnusable + // NodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle + NodeStateClosed + // NodeStateSyncing is a node that is actively back-filling blockchain. Usually, it's a newly set up node that is + // still syncing the chain. The main difference from `NodeStateOutOfSync` is that it represents state relative + // to other primary nodes configured in the MultiNode. In contrast, `NodeStateSyncing` represents the internal state of // the node (RPC). - nodeStateSyncing - // nodeStateLen tracks the number of states - nodeStateLen + NodeStateSyncing + // NodeStateLen tracks the number of states + NodeStateLen ) // allNodeStates represents all possible states a node can be in var allNodeStates []NodeState func init() { - for s := NodeState(0); s < nodeStateLen; s++ { + for s := NodeState(0); s < NodeStateLen; s++ { allNodeStates = append(allNodeStates, s) } } @@ -158,14 +158,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) { promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing: - n.state = nodeStateAlive + case NodeStateDialed, NodeStateInvalidChainID, NodeStateSyncing: + n.state = NodeStateAlive default: - panic(transitionFail(n.state, nodeStateAlive)) + panic(transitionFail(n.state, NodeStateAlive)) } fn() } @@ -185,14 +185,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { promPoolRPCNodeTransitionsToInSync.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateOutOfSync, nodeStateSyncing: - n.state = nodeStateAlive + case NodeStateOutOfSync, NodeStateSyncing: + n.state = NodeStateAlive default: - panic(transitionFail(n.state, nodeStateAlive)) + panic(transitionFail(n.state, NodeStateAlive)) } fn() } @@ -211,15 +211,15 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { promPoolRPCNodeTransitionsToOutOfSync.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateAlive: + case NodeStateAlive: n.UnsubscribeAll() - n.state = nodeStateOutOfSync + n.state = NodeStateOutOfSync default: - panic(transitionFail(n.state, nodeStateOutOfSync)) + panic(transitionFail(n.state, NodeStateOutOfSync)) } fn() } @@ -236,31 +236,31 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing: + case NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing: n.UnsubscribeAll() - n.state = nodeStateUnreachable + n.state = NodeStateUnreachable default: - panic(transitionFail(n.state, nodeStateUnreachable)) + panic(transitionFail(n.state, NodeStateUnreachable)) } fn() } func (n *node[CHAIN_ID, HEAD, RPC]) declareState(state NodeState) { - if n.State() == nodeStateClosed { + if n.State() == NodeStateClosed { return } switch state { - case nodeStateInvalidChainID: + case NodeStateInvalidChainID: n.declareInvalidChainID() - case nodeStateUnreachable: + case NodeStateUnreachable: n.declareUnreachable() - case nodeStateSyncing: + case NodeStateSyncing: n.declareSyncing() - case nodeStateAlive: + case NodeStateAlive: n.declareAlive() default: panic(fmt.Sprintf("%#v state declaration is not implemented", state)) @@ -279,15 +279,15 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing: + case NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing: n.UnsubscribeAll() - n.state = nodeStateInvalidChainID + n.state = NodeStateInvalidChainID default: - panic(transitionFail(n.state, nodeStateInvalidChainID)) + panic(transitionFail(n.state, NodeStateInvalidChainID)) } fn() } @@ -304,19 +304,19 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { promPoolRPCNodeTransitionsToSyncing.WithLabelValues(n.chainID.String(), n.name).Inc() n.stateMu.Lock() defer n.stateMu.Unlock() - if n.state == nodeStateClosed { + if n.state == NodeStateClosed { return } switch n.state { - case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID: + case NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID: n.UnsubscribeAll() - n.state = nodeStateSyncing + n.state = NodeStateSyncing default: - panic(transitionFail(n.state, nodeStateSyncing)) + panic(transitionFail(n.state, NodeStateSyncing)) } if !n.nodePoolCfg.NodeIsSyncingEnabled() { - panic("unexpected transition to nodeStateSyncing, while it's disabled") + panic("unexpected transition to NodeStateSyncing, while it's disabled") } fn() } diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 71cc16d385a..a32a551183d 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -29,50 +29,50 @@ func TestUnit_Node_StateTransitions(t *testing.T) { t.Run("setState", func(t *testing.T) { n := newTestNode(t, testNodeOpts{rpc: nil, config: testNodeConfig{nodeIsSyncingEnabled: true}}) - assert.Equal(t, nodeStateUndialed, n.State()) - n.setState(nodeStateAlive) - assert.Equal(t, nodeStateAlive, n.State()) - n.setState(nodeStateUndialed) - assert.Equal(t, nodeStateUndialed, n.State()) + assert.Equal(t, NodeStateUndialed, n.State()) + n.setState(NodeStateAlive) + assert.Equal(t, NodeStateAlive, n.State()) + n.setState(NodeStateUndialed) + assert.Equal(t, NodeStateUndialed, n.State()) }) t.Run("transitionToAlive", func(t *testing.T) { - const destinationState = nodeStateAlive - allowedStates := []NodeState{nodeStateDialed, nodeStateInvalidChainID, nodeStateSyncing} + const destinationState = NodeStateAlive + allowedStates := []NodeState{NodeStateDialed, NodeStateInvalidChainID, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) }) t.Run("transitionToInSync", func(t *testing.T) { - const destinationState = nodeStateAlive - allowedStates := []NodeState{nodeStateOutOfSync, nodeStateSyncing} + const destinationState = NodeStateAlive + allowedStates := []NodeState{NodeStateOutOfSync, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) }) t.Run("transitionToOutOfSync", func(t *testing.T) { - const destinationState = nodeStateOutOfSync - allowedStates := []NodeState{nodeStateAlive} + const destinationState = NodeStateOutOfSync + allowedStates := []NodeState{NodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil, nil).Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { - const destinationState = nodeStateUnreachable - allowedStates := []NodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID, nodeStateSyncing} + const destinationState = NodeStateUnreachable + allowedStates := []NodeState{NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { - const destinationState = nodeStateInvalidChainID - allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateSyncing} + const destinationState = NodeStateInvalidChainID + allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { - const destinationState = nodeStateSyncing - allowedStates := []NodeState{nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID} + const destinationState = NodeStateSyncing + allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) @@ -81,10 +81,10 @@ func TestUnit_Node_StateTransitions(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", nil, nil).Once() node := newTestNode(t, testNodeOpts{rpc: rpc}) - node.setState(nodeStateDialed) + node.setState(NodeStateDialed) fn := new(fnMock) defer fn.AssertNotCalled(t) - assert.PanicsWithValue(t, "unexpected transition to nodeStateSyncing, while it's disabled", func() { + assert.PanicsWithValue(t, "unexpected transition to NodeStateSyncing, while it's disabled", func() { node.transitionToSyncing(fn.Fn) }) }) @@ -101,13 +101,13 @@ func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition } // noop on attempt to transition from Closed state m := new(fnMock) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) transition(node, m.Fn) m.AssertNotCalled(t) - assert.Equal(t, nodeStateClosed, node.State(), "Expected node to remain in closed state on transition attempt") + assert.Equal(t, NodeStateClosed, node.State(), "Expected node to remain in closed state on transition attempt") for _, nodeState := range allNodeStates { - if slices.Contains(allowedStates, nodeState) || nodeState == nodeStateClosed { + if slices.Contains(allowedStates, nodeState) || nodeState == NodeStateClosed { continue } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 4c6e592f12a..f17081b0c8b 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -92,8 +92,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { // sanity check state := n.State() switch state { - case nodeStateAlive: - case nodeStateClosed: + case NodeStateAlive: + case NodeStateClosed: return default: panic(fmt.Sprintf("aliveLoop can only run for node in Alive state, got: %s", state)) @@ -325,8 +325,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td // sanity check state := n.State() switch state { - case nodeStateOutOfSync: - case nodeStateClosed: + case NodeStateOutOfSync: + case NodeStateClosed: return default: panic(fmt.Sprintf("outOfSyncLoop can only run for node in OutOfSync state, got: %s", state)) @@ -340,7 +340,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td // Need to redial since out-of-sync nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) - if state != nodeStateAlive { + if state != NodeStateAlive { n.declareState(state) return } @@ -398,8 +398,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { // sanity check state := n.State() switch state { - case nodeStateUnreachable: - case nodeStateClosed: + case NodeStateUnreachable: + case NodeStateClosed: return default: panic(fmt.Sprintf("unreachableLoop can only run for node in Unreachable state, got: %s", state)) @@ -426,14 +426,14 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { continue } - n.setState(nodeStateDialed) + n.setState(NodeStateDialed) state := n.verifyConn(ctx, lggr) switch state { - case nodeStateUnreachable: - n.setState(nodeStateUnreachable) + case NodeStateUnreachable: + n.setState(NodeStateUnreachable) continue - case nodeStateAlive: + case NodeStateAlive: lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) fallthrough default: @@ -453,8 +453,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { // sanity check state := n.State() switch state { - case nodeStateInvalidChainID: - case nodeStateClosed: + case NodeStateInvalidChainID: + case NodeStateClosed: return default: panic(fmt.Sprintf("invalidChainIDLoop can only run for node in InvalidChainID state, got: %s", state)) @@ -469,7 +469,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { // Need to redial since invalid chain ID nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) - if state != nodeStateInvalidChainID { + if state != NodeStateInvalidChainID { n.declareState(state) return } @@ -485,9 +485,9 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { case <-time.After(chainIDRecheckBackoff.Duration()): state := n.verifyConn(ctx, lggr) switch state { - case nodeStateInvalidChainID: + case NodeStateInvalidChainID: continue - case nodeStateAlive: + case NodeStateAlive: lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) fallthrough default: @@ -507,11 +507,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { // sanity check state := n.State() switch state { - case nodeStateSyncing: - case nodeStateClosed: + case NodeStateSyncing: + case NodeStateClosed: return default: - panic(fmt.Sprintf("syncingLoop can only run for node in nodeStateSyncing state, got: %s", state)) + panic(fmt.Sprintf("syncingLoop can only run for node in NodeStateSyncing state, got: %s", state)) } } @@ -521,7 +521,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with syncing status", n.String()), "nodeState", n.State()) // Need to redial since syncing nodes are automatically disconnected state := n.createVerifiedConn(ctx, lggr) - if state != nodeStateSyncing { + if state != NodeStateSyncing { n.declareState(state) return } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 34036c1d47f..e8030c4c1c7 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -29,13 +29,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - node.setState(nodeStateDialed) + node.setState(NodeStateDialed) return node } t.Run("returns on closed", func(t *testing.T) { node := newTestNode(t, testNodeOpts{}) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) node.wg.Add(1) node.aliveLoop() }) @@ -54,7 +54,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { @@ -80,7 +80,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") - assert.Equal(t, nodeStateUnreachable, node.State()) + assert.Equal(t, NodeStateUnreachable, node.State()) }) newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { @@ -103,7 +103,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Head liveness checking disabled") tests.AssertLogEventually(t, observedLogs, "Polling disabled") - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }) t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { t.Parallel() @@ -128,7 +128,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { // 1. Return error several times, but below threshold rpc.On("Ping", mock.Anything).Return(pollError).Run(func(_ mock.Arguments) { // stays healthy while below threshold - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }).Times(pollFailureThreshold) // 2. Successful call that is expected to reset counter rpc.On("Ping", mock.Anything).Return(nil).Once() @@ -141,7 +141,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { return } ensuredAlive.Store(true) - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }).Once() // redundant call to stay in alive state rpc.On("Ping", mock.Anything).Return(nil) @@ -173,7 +173,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) tests.AssertEventually(t, func() bool { - return nodeStateUnreachable == node.State() + return NodeStateUnreachable == node.State() }) }) t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { @@ -197,7 +197,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Ping", mock.Anything).Return(pollError) node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailureThreshold)) - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }) t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { t.Parallel() @@ -221,13 +221,13 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Ping", mock.Anything).Return(nil) // tries to redial in outOfSync rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateOutOfSync, node.State()) + assert.Equal(t, NodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Run(func(_ mock.Arguments) { - require.Equal(t, nodeStateOutOfSync, node.State()) + require.Equal(t, NodeStateOutOfSync, node.State()) }).Return(errors.New("failed to dial")).Maybe() node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -276,7 +276,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Ping", mock.Anything).Return(nil) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, "Ping successful", 2) - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }) t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { @@ -292,7 +292,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() // tries to redial in outOfSync rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateOutOfSync, node.State()) + assert.Equal(t, NodeStateOutOfSync, node.State()) }).Once() // disconnects all on transfer to unreachable or outOfSync rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() @@ -302,7 +302,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertEventually(t, func() bool { // right after outOfSync we'll transfer to unreachable due to returned error on Dial // we check that we were in out of sync state on first Dial call - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { @@ -323,7 +323,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { } node.declareAlive() tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) - assert.Equal(t, nodeStateAlive, node.State()) + assert.Equal(t, NodeStateAlive, node.State()) }) t.Run("rpc closed head channel", func(t *testing.T) { @@ -352,7 +352,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") - assert.Equal(t, nodeStateUnreachable, node.State()) + assert.Equal(t, NodeStateUnreachable, node.State()) }) t.Run("updates block number and difficulty on new head", func(t *testing.T) { t.Parallel() @@ -374,7 +374,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertEventually(t, func() bool { state, chainInfo := node.StateAndLatest() - return state == nodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber + return state == NodeStateAlive && chainInfo.BlockNumber == expectedBlockNumber }) }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { @@ -545,7 +545,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { opts.rpc.On("Close").Return(nil).Once() // disconnects all on transfer to unreachable or outOfSync opts.rpc.On("UnsubscribeAllExcept", nil, nil) - node.setState(nodeStateAlive) + node.setState(NodeStateAlive) return node } @@ -556,7 +556,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) node.wg.Add(1) node.outOfSyncLoop(stubIsOutOfSync) }) @@ -590,7 +590,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { return true }) tests.AssertLogCountEventually(t, observedLogs, msgReceivedBlock, len(heads)) - assert.Equal(t, nodeStateOutOfSync, node.State()) + assert.Equal(t, NodeStateOutOfSync, node.State()) }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { t.Parallel() @@ -605,7 +605,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(expectedError) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("if fail to get chainID, transitions to unreachable", func(t *testing.T) { @@ -625,7 +625,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { @@ -645,7 +645,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateInvalidChainID + return node.State() == NodeStateInvalidChainID }) }) t.Run("if syncing, transitions to syncing", func(t *testing.T) { @@ -665,7 +665,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("IsSyncing", mock.Anything).Return(true, nil) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateSyncing + return node.State() == NodeStateSyncing }) }) t.Run("if fails to fetch syncing status, transitions to unreachable", func(t *testing.T) { @@ -688,7 +688,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing")) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { @@ -708,7 +708,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on subscription termination becomes unreachable", func(t *testing.T) { @@ -736,7 +736,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { @@ -765,7 +765,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) @@ -800,7 +800,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) tests.AssertLogEventually(t, observedLogs, msgInSync) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) t.Run("becomes alive if there is no other nodes", func(t *testing.T) { @@ -833,7 +833,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { node.declareOutOfSync(stubIsOutOfSync) tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) } @@ -847,13 +847,13 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { // disconnects all on transfer to unreachable opts.rpc.On("UnsubscribeAllExcept", nil, nil) - node.setState(nodeStateAlive) + node.setState(NodeStateAlive) return node } t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) node.wg.Add(1) node.unreachableLoop() }) @@ -887,7 +887,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateDialed, node.State()) + assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) node.declareUnreachable() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify chain ID for node", 2) @@ -907,7 +907,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareUnreachable() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateInvalidChainID + return node.State() == NodeStateInvalidChainID }) }) t.Run("on syncing status check failure, keeps trying", func(t *testing.T) { @@ -925,7 +925,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateDialed, node.State()) + assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) node.declareUnreachable() @@ -950,7 +950,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node.declareUnreachable() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateSyncing + return node.State() == NodeStateSyncing }) }) t.Run("on successful verification becomes alive", func(t *testing.T) { @@ -976,7 +976,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node.declareUnreachable() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { @@ -996,7 +996,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node.declareUnreachable() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) } @@ -1007,13 +1007,13 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - node.setState(nodeStateDialed) + node.setState(NodeStateDialed) return node } t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) node.wg.Add(1) node.invalidChainIDLoop() }) @@ -1032,7 +1032,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.declareInvalidChainID() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { @@ -1055,7 +1055,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.declareInvalidChainID() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on chainID mismatch keeps trying", func(t *testing.T) { @@ -1077,7 +1077,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.declareInvalidChainID() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateInvalidChainID + return node.State() == NodeStateInvalidChainID }) }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { @@ -1105,7 +1105,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.declareInvalidChainID() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) t.Run("on successful verification becomes alive", func(t *testing.T) { @@ -1133,7 +1133,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.declareInvalidChainID() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) } @@ -1166,7 +1166,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { @@ -1183,7 +1183,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateDialed, node.State()) + assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) // disconnects all on transfer to unreachable rpc.On("UnsubscribeAllExcept", nil, nil) @@ -1191,7 +1191,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { @@ -1212,7 +1212,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateInvalidChainID + return node.State() == NodeStateInvalidChainID }) }) t.Run("if syncing verification fails, becomes unreachable", func(t *testing.T) { @@ -1230,7 +1230,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { - assert.Equal(t, nodeStateDialed, node.State()) + assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) // disconnects all on transfer to unreachable @@ -1241,7 +1241,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on isSyncing transitions to syncing", func(t *testing.T) { @@ -1263,7 +1263,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateSyncing + return node.State() == NodeStateSyncing }) }) t.Run("on successful verification becomes alive", func(t *testing.T) { @@ -1289,7 +1289,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { @@ -1314,7 +1314,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) } @@ -1461,13 +1461,13 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.rpc.On("Close").Return(nil).Once() opts.rpc.On("UnsubscribeAllExcept", nil, nil) - node.setState(nodeStateDialed) + node.setState(NodeStateDialed) return node } t.Run("returns on closed", func(t *testing.T) { t.Parallel() node := newTestNode(t, testNodeOpts{}) - node.setState(nodeStateClosed) + node.setState(NodeStateClosed) node.wg.Add(1) node.syncingLoop() }) @@ -1484,7 +1484,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) node.declareSyncing() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { @@ -1506,7 +1506,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.declareSyncing() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on chainID mismatch transitions to invalidChainID", func(t *testing.T) { @@ -1527,7 +1527,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateInvalidChainID + return node.State() == NodeStateInvalidChainID }) }) t.Run("on failed Syncing check - becomes unreachable", func(t *testing.T) { @@ -1551,7 +1551,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.declareSyncing() tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") tests.AssertEventually(t, func() bool { - return node.State() == nodeStateUnreachable + return node.State() == NodeStateUnreachable }) }) t.Run("on IsSyncing - keeps trying", func(t *testing.T) { @@ -1572,7 +1572,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Verification failed: Node is syncing", 2) tests.AssertEventually(t, func() bool { - return node.State() == nodeStateSyncing + return node.State() == NodeStateSyncing }) }) t.Run("on successful verification becomes alive", func(t *testing.T) { @@ -1598,7 +1598,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.declareSyncing() tests.AssertEventually(t, func() bool { - return node.State() == nodeStateAlive + return node.State() == NodeStateAlive }) }) } diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index b341d91b5ef..11a74801637 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -26,7 +26,7 @@ func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HE for _, n := range s { state, chainInfo := n.StateAndLatest() currentHeadNumber := chainInfo.BlockNumber - if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber { + if state == NodeStateAlive && currentHeadNumber >= highestHeadNumber { if highestHeadNumber < currentHeadNumber { highestHeadNumber = currentHeadNumber highestHeadNodes = nil diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index db66e9777de..9d9612e82ee 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -24,13 +24,13 @@ func TestHighestHeadNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) + node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else if i == 1 { // second node is alive, LatestReceivedBlockNumber = 1 - node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1}) } else { // third node is alive, LatestReceivedBlockNumber = 2 (best node) - node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2}) } node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -42,7 +42,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -53,7 +53,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node.On("Order").Return(int32(1)) nodes = append(nodes, node) @@ -63,10 +63,10 @@ func TestHighestHeadNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1}) + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1}) node1.On("Order").Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: -1}) + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1}) node2.On("Order").Return(int32(1)) selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2}) assert.Same(t, node1, selector.Select()) @@ -83,10 +83,10 @@ func TestHighestHeadNodeSelector_None(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) + node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, ChainInfo{BlockNumber: 1}) + node.On("StateAndLatest").Return(NodeStateUnreachable, ChainInfo{BlockNumber: 1}) } nodes = append(nodes, node) } @@ -104,7 +104,7 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("same head and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) } @@ -115,15 +115,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("same head but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(2)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -134,15 +134,15 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 1}) + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 2}) + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 3}) + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(3)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} @@ -153,19 +153,19 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 10}) + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 10}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 11}) + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 11}) node2.On("Order").Maybe().Return(int32(4)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 12}) + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 12}) node3.On("Order").Maybe().Return(int32(3)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, ChainInfo{BlockNumber: 10}) + node4.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 10}) node4.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go index e137932479a..0565c4c4f2a 100644 --- a/common/client/node_selector_priority_level.go +++ b/common/client/node_selector_priority_level.go @@ -56,12 +56,12 @@ func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { return NodeSelectionModePriorityLevel } -// getHighestPriorityAliveTier filters nodes that are not in state nodeStateAlive and +// getHighestPriorityAliveTier filters nodes that are not in state NodeStateAlive and // returns only the highest tier of alive nodes func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, HEAD, RPC] { var nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC] for _, n := range s.nodes { - if n.State() == nodeStateAlive { + if n.State() == NodeStateAlive { nodes = append(nodes, nodeWithPriority[CHAIN_ID, HEAD, RPC]{n, n.Order()}) } } diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go index d9139a4ccaf..362625f4cf2 100644 --- a/common/client/node_selector_priority_level_test.go +++ b/common/client/node_selector_priority_level_test.go @@ -31,34 +31,34 @@ func TestPriorityLevelNodeSelector(t *testing.T) { { name: "TwoNodesSameOrder: Highest Allowed Order", nodes: []testNode{ - {order: 1, state: nodeStateAlive}, - {order: 1, state: nodeStateAlive}, + {order: 1, state: NodeStateAlive}, + {order: 1, state: NodeStateAlive}, }, expect: []int{0, 1, 0, 1, 0, 1}, }, { name: "TwoNodesSameOrder: Lowest Allowed Order", nodes: []testNode{ - {order: 100, state: nodeStateAlive}, - {order: 100, state: nodeStateAlive}, + {order: 100, state: NodeStateAlive}, + {order: 100, state: NodeStateAlive}, }, expect: []int{0, 1, 0, 1, 0, 1}, }, { name: "NoneAvailable", nodes: []testNode{ - {order: 1, state: nodeStateOutOfSync}, - {order: 1, state: nodeStateUnreachable}, - {order: 1, state: nodeStateUnreachable}, + {order: 1, state: NodeStateOutOfSync}, + {order: 1, state: NodeStateUnreachable}, + {order: 1, state: NodeStateUnreachable}, }, expect: []int{}, // no nodes should be selected }, { name: "DifferentOrder", nodes: []testNode{ - {order: 1, state: nodeStateAlive}, - {order: 2, state: nodeStateAlive}, - {order: 3, state: nodeStateAlive}, + {order: 1, state: NodeStateAlive}, + {order: 2, state: NodeStateAlive}, + {order: 3, state: NodeStateAlive}, }, expect: []int{0, 0}, // only the highest order node should be selected }, diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go index 8b5c1bc8b0f..a914c06c21e 100644 --- a/common/client/node_selector_round_robin.go +++ b/common/client/node_selector_round_robin.go @@ -28,7 +28,7 @@ func NewRoundRobinSelector[ func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { var liveNodes []Node[CHAIN_ID, HEAD, RPC] for _, n := range s.nodes { - if n.State() == nodeStateAlive { + if n.State() == NodeStateAlive { liveNodes = append(liveNodes, n) } } diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go index acd0e268849..866a02222ec 100644 --- a/common/client/node_selector_round_robin_test.go +++ b/common/client/node_selector_round_robin_test.go @@ -23,10 +23,10 @@ func TestRoundRobinNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("State").Return(nodeStateOutOfSync) + node.On("State").Return(NodeStateOutOfSync) } else { // second & third nodes are alive - node.On("State").Return(nodeStateAlive) + node.On("State").Return(NodeStateAlive) } nodes = append(nodes, node) } @@ -48,10 +48,10 @@ func TestRoundRobinNodeSelector_None(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("State").Return(nodeStateOutOfSync) + node.On("State").Return(NodeStateOutOfSync) } else { // others are unreachable - node.On("State").Return(nodeStateUnreachable) + node.On("State").Return(NodeStateUnreachable) } nodes = append(nodes, node) } diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go index a0e1dce5335..56ab0fbfae9 100644 --- a/common/client/node_selector_total_difficulty.go +++ b/common/client/node_selector_total_difficulty.go @@ -28,7 +28,7 @@ func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID for _, n := range s { state, chainInfo := n.StateAndLatest() - if state != nodeStateAlive { + if state != NodeStateAlive { continue } diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go index c03b923d76d..2e82998903a 100644 --- a/common/client/node_selector_total_difficulty_test.go +++ b/common/client/node_selector_total_difficulty_test.go @@ -24,15 +24,15 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, + node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) } else if i == 1 { // second node is alive - node.On("StateAndLatest").Return(nodeStateAlive, + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(7)}) } else { // third node is alive and best - node.On("StateAndLatest").Return(nodeStateAlive, + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2, BlockDifficulty: big.NewInt(8)}) } node.On("Order").Maybe().Return(int32(1)) @@ -45,7 +45,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("stick to the same node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fourth node is alive (same as 3rd) - node.On("StateAndLatest").Return(nodeStateAlive, + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2, BlockDifficulty: big.NewInt(8)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -57,7 +57,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("another best node", func(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) // fifth node is alive (better than 3rd and 4th) - node.On("StateAndLatest").Return(nodeStateAlive, + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(11)}) node.On("Order").Maybe().Return(int32(1)) nodes = append(nodes, node) @@ -68,11 +68,11 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Run("nodes never update latest block number", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node1.On("Order").Maybe().Return(int32(1)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node2.On("Order").Maybe().Return(int32(1)) nodes := []Node[types.ID, Head, nodeClient]{node1, node2} @@ -92,10 +92,10 @@ func TestTotalDifficultyNodeSelector_None(t *testing.T) { node := newMockNode[types.ID, Head, nodeClient](t) if i == 0 { // first node is out of sync - node.On("StateAndLatest").Return(nodeStateOutOfSync, ChainInfo{BlockNumber: -1}) + node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) } else { // others are unreachable - node.On("StateAndLatest").Return(nodeStateUnreachable, + node.On("StateAndLatest").Return(NodeStateUnreachable, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(7)}) } nodes = append(nodes, node) @@ -114,7 +114,7 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("same td and order", func(t *testing.T) { for i := 0; i < 3; i++ { node := newMockNode[types.ID, Head, nodeClient](t) - node.On("StateAndLatest").Return(nodeStateAlive, + node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) @@ -126,17 +126,17 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("same td but different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node2.On("Order").Return(int32(1)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node3.On("Order").Return(int32(2)) @@ -148,17 +148,17 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different td but same order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Maybe().Return(int32(3)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(11)}) node2.On("Order").Maybe().Return(int32(3)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(12)}) node3.On("Order").Return(int32(3)) @@ -170,22 +170,22 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Run("different head and different order", func(t *testing.T) { node1 := newMockNode[types.ID, Head, nodeClient](t) - node1.On("StateAndLatest").Return(nodeStateAlive, + node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(100)}) node1.On("Order").Maybe().Return(int32(4)) node2 := newMockNode[types.ID, Head, nodeClient](t) - node2.On("StateAndLatest").Return(nodeStateAlive, + node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node2.On("Order").Maybe().Return(int32(5)) node3 := newMockNode[types.ID, Head, nodeClient](t) - node3.On("StateAndLatest").Return(nodeStateAlive, + node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node3.On("Order").Maybe().Return(int32(1)) node4 := newMockNode[types.ID, Head, nodeClient](t) - node4.On("StateAndLatest").Return(nodeStateAlive, + node4.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(105)}) node4.On("Order").Maybe().Return(int32(2)) diff --git a/common/client/send_only_node.go b/common/client/send_only_node.go index 5d48bc172b9..ba70ec32461 100644 --- a/common/client/send_only_node.go +++ b/common/client/send_only_node.go @@ -96,7 +96,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) Start(ctx context.Context) error { // Start setups up and verifies the sendonly node // Should only be called once in a node's lifecycle func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { - if s.State() != nodeStateUndialed { + if s.State() != NodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", s.state)) } @@ -104,10 +104,10 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { if err != nil { promPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc() s.log.Errorw("Dial failed: SendOnly Node is unusable", "err", err) - s.setState(nodeStateUnusable) + s.setState(NodeStateUnusable) return } - s.setState(nodeStateDialed) + s.setState(NodeStateDialed) if s.chainID.String() == "0" { // Skip verification if chainID is zero @@ -119,7 +119,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { if err != nil { promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) - s.setState(nodeStateUnreachable) + s.setState(NodeStateUnreachable) } else { promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() s.log.Errorf( @@ -128,7 +128,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { s.chainID.String(), s.name, ) - s.setState(nodeStateInvalidChainID) + s.setState(NodeStateInvalidChainID) } // Since it has failed, spin up the verifyLoop that will keep // retrying until success @@ -139,7 +139,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { } promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() - s.setState(nodeStateAlive) + s.setState(NodeStateAlive) s.log.Infow("Sendonly RPC Node is online", "NodeState", s.state) } @@ -148,7 +148,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) Close() error { s.rpc.Close() close(s.chStop) s.wg.Wait() - s.setState(nodeStateClosed) + s.setState(NodeStateClosed) return nil }) } diff --git a/common/client/send_only_node_lifecycle.go b/common/client/send_only_node_lifecycle.go index 20d54ba68cf..a6ac112488b 100644 --- a/common/client/send_only_node_lifecycle.go +++ b/common/client/send_only_node_lifecycle.go @@ -26,7 +26,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { chainID, err := s.rpc.ChainID(ctx) if err != nil { ok := s.IfStarted(func() { - if changed := s.setState(nodeStateUnreachable); changed { + if changed := s.setState(NodeStateUnreachable); changed { promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() } }) @@ -37,7 +37,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { continue } else if chainID.String() != s.chainID.String() { ok := s.IfStarted(func() { - if changed := s.setState(nodeStateInvalidChainID); changed { + if changed := s.setState(NodeStateInvalidChainID); changed { promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() } }) @@ -54,7 +54,7 @@ func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { continue } ok := s.IfStarted(func() { - if changed := s.setState(nodeStateAlive); changed { + if changed := s.setState(NodeStateAlive); changed { promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() } }) diff --git a/common/client/send_only_node_test.go b/common/client/send_only_node_test.go index 532946da48f..352fb5b92ea 100644 --- a/common/client/send_only_node_test.go +++ b/common/client/send_only_node_test.go @@ -53,7 +53,7 @@ func TestStartSendOnlyNode(t *testing.T) { err := s.Start(tests.Context(t)) require.NoError(t, err) - assert.Equal(t, nodeStateUnusable, s.State()) + assert.Equal(t, NodeStateUnusable, s.State()) tests.RequireLogMessage(t, observedLogs, "Dial failed: SendOnly Node is unusable") }) t.Run("Default ChainID(0) produces warn and skips checks", func(t *testing.T) { @@ -68,7 +68,7 @@ func TestStartSendOnlyNode(t *testing.T) { err := s.Start(tests.Context(t)) require.NoError(t, err) - assert.Equal(t, nodeStateAlive, s.State()) + assert.Equal(t, NodeStateAlive, s.State()) tests.RequireLogMessage(t, observedLogs, "sendonly rpc ChainID verification skipped") }) t.Run("Can recover from chainID verification failure", func(t *testing.T) { @@ -89,10 +89,10 @@ func TestStartSendOnlyNode(t *testing.T) { err := s.Start(tests.Context(t)) require.NoError(t, err) - assert.Equal(t, nodeStateUnreachable, s.State()) + assert.Equal(t, NodeStateUnreachable, s.State()) tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Verify failed: %v", expectedError), failuresCount) tests.AssertEventually(t, func() bool { - return s.State() == nodeStateAlive + return s.State() == NodeStateAlive }) }) t.Run("Can recover from chainID mismatch", func(t *testing.T) { @@ -112,10 +112,10 @@ func TestStartSendOnlyNode(t *testing.T) { err := s.Start(tests.Context(t)) require.NoError(t, err) - assert.Equal(t, nodeStateInvalidChainID, s.State()) + assert.Equal(t, NodeStateInvalidChainID, s.State()) tests.AssertLogCountEventually(t, observedLogs, "sendonly rpc ChainID doesn't match local chain ID", failuresCount) tests.AssertEventually(t, func() bool { - return s.State() == nodeStateAlive + return s.State() == NodeStateAlive }) }) t.Run("Start with Random ChainID", func(t *testing.T) { @@ -132,7 +132,7 @@ func TestStartSendOnlyNode(t *testing.T) { err := s.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { - return s.State() == nodeStateAlive + return s.State() == NodeStateAlive }) assert.Equal(t, 0, observedLogs.Len()) // No warnings expected }) diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 7aceb8a30ab..5dc992039e2 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -37,7 +37,7 @@ type Client interface { // NodeStates returns a map of node Name->node state // It might be nil or empty, e.g. for mock clients etc - NodeStates() map[string]string + NodeStates() map[string]commonclient.NodeState TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) @@ -328,7 +328,7 @@ func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { return rpc.LatestBlockHeight(ctx) } -func (c *chainClient) NodeStates() map[string]string { +func (c *chainClient) NodeStates() map[string]commonclient.NodeState { return c.multiNode.NodeStates() } diff --git a/core/chains/evm/client/null_client.go b/core/chains/evm/client/null_client.go index 7615a0a68af..52d418c1405 100644 --- a/core/chains/evm/client/null_client.go +++ b/core/chains/evm/client/null_client.go @@ -221,7 +221,7 @@ func (nc *NullClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, er } // NodeStates implements evmclient.Client -func (nc *NullClient) NodeStates() map[string]string { return nil } +func (nc *NullClient) NodeStates() map[string]commonclient.NodeState { return nil } func (nc *NullClient) IsL2() bool { nc.lggr.Debug("IsL2") diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go index 1c94e3d7dfa..af1dc4c20d5 100644 --- a/core/chains/legacyevm/chain.go +++ b/core/chains/legacyevm/chain.go @@ -421,7 +421,6 @@ func (c *chain) listNodeStatuses(start, end int) ([]types.NodeStatus, int, error for _, n := range nodes[start:end] { var ( nodeState string - exists bool ) toml, err := gotoml.Marshal(n) if err != nil { @@ -430,10 +429,11 @@ func (c *chain) listNodeStatuses(start, end int) ([]types.NodeStatus, int, error if states == nil { nodeState = "Unknown" } else { - nodeState, exists = states[*n.Name] - if !exists { - // The node is in the DB and the chain is enabled but it's not running - nodeState = "NotLoaded" + // The node is in the DB and the chain is enabled but it's not running + nodeState = "NotLoaded" + s, exists := states[*n.Name] + if exists { + nodeState = s.String() } } stats = append(stats, types.NodeStatus{ From 7498ddec9e777e720f68b1fd12e1c3a7f8e98c77 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 10:51:17 -0400 Subject: [PATCH 29/58] Update multi_node_test.go --- common/client/multi_node_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 7bd7430ad39..10840dc24b3 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -312,14 +312,14 @@ func TestMultiNode_CheckLease(t *testing.T) { expectedResult := map[string]NodeState{} for name, state := range nodes { node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) - node.On("State").Return(state) - node.On("String").Return(name) + node.On("State").Return(state).Once() + node.On("String").Return(name).Once() opts.nodes = append(opts.nodes, node) sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) sendOnlyName := "send_only_" + name - sendOnly.On("State").Return(state) - sendOnly.On("String").Return(sendOnlyName) + sendOnly.On("State").Return(state).Once() + sendOnly.On("String").Return(sendOnlyName).Once() opts.sendonlys = append(opts.sendonlys, sendOnly) expectedResult[name] = state From 4796377498c632b84392ae653b6a6c0de05f55b2 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 10:57:25 -0400 Subject: [PATCH 30/58] Update Unsubscribe --- common/client/node_lifecycle.go | 3 +-- common/types/subscription.go | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index f17081b0c8b..bdecf6181f7 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -113,8 +113,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll - // falsely transition this node to unreachable state + n.stateMu.Lock() n.aliveLoopSub = sub n.stateMu.Unlock() diff --git a/common/types/subscription.go b/common/types/subscription.go index e0cd0a1660d..b341fb42c44 100644 --- a/common/types/subscription.go +++ b/common/types/subscription.go @@ -7,7 +7,8 @@ package types // This is a generic interface for Subscription to represent used by clients. type Subscription interface { // Unsubscribe cancels the sending of events to the data channel - // and closes the error channel. + // and closes the error channel. Unsubscribe should be callable multiple + // times without causing an error. Unsubscribe() // Err returns the subscription error channel. The error channel receives // a value if there is an issue with the subscription (e.g. the network connection From ce1214bb1ee58f307012ff32b8397011e644e64d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 11:16:56 -0400 Subject: [PATCH 31/58] Remove HEAD generic from Node interface --- common/client/mock_node_selector_test.go | 8 ++-- common/client/mock_node_test.go | 28 +++++------ common/client/multi_node.go | 8 ++-- common/client/multi_node_test.go | 48 +++++++++---------- common/client/node.go | 3 +- common/client/node_selector.go | 4 +- common/client/node_selector_highest_head.go | 8 ++-- .../client/node_selector_highest_head_test.go | 48 +++++++++---------- common/client/node_selector_priority_level.go | 14 +++--- .../node_selector_priority_level_test.go | 4 +- common/client/node_selector_round_robin.go | 8 ++-- .../client/node_selector_round_robin_test.go | 8 ++-- .../client/node_selector_total_difficulty.go | 10 ++-- .../node_selector_total_difficulty_test.go | 48 +++++++++---------- core/chains/evm/client/chain_client.go | 2 +- core/chains/evm/client/evm_client.go | 2 +- core/chains/evm/client/mocks/client.go | 8 ++-- 17 files changed, 129 insertions(+), 130 deletions(-) diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go index f068af84a1d..e303b813422 100644 --- a/common/client/mock_node_selector_test.go +++ b/common/client/mock_node_selector_test.go @@ -31,19 +31,19 @@ func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { } // Select provides a mock function with given fields: -func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { +func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for Select") } - var r0 Node[CHAIN_ID, HEAD, RPC] - if rf, ok := ret.Get(0).(func() Node[CHAIN_ID, HEAD, RPC]); ok { + var r0 Node[CHAIN_ID, RPC] + if rf, ok := ret.Get(0).(func() Node[CHAIN_ID, RPC]); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(Node[CHAIN_ID, HEAD, RPC]) + r0 = ret.Get(0).(Node[CHAIN_ID, RPC]) } } diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index af99efac1c3..8e669391b30 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -10,12 +10,12 @@ import ( ) // mockNode is an autogenerated mock type for the Node type -type mockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT interface{}] struct { +type mockNode[CHAIN_ID types.ID, RPC_CLIENT interface{}] struct { mock.Mock } // Close provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) Close() error { ret := _m.Called() if len(ret) == 0 { @@ -33,7 +33,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Close() error { } // ConfiguredChainID provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID { ret := _m.Called() if len(ret) == 0 { @@ -51,7 +51,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID { } // Name provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) Name() string { ret := _m.Called() if len(ret) == 0 { @@ -69,7 +69,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Name() string { } // Order provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Order() int32 { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) Order() int32 { ret := _m.Called() if len(ret) == 0 { @@ -87,7 +87,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Order() int32 { } // RPC provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) RPC() RPC_CLIENT { ret := _m.Called() if len(ret) == 0 { @@ -105,7 +105,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) RPC() RPC_CLIENT { } // Start provides a mock function with given fields: _a0 -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Start(_a0 context.Context) error { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) Start(_a0 context.Context) error { ret := _m.Called(_a0) if len(ret) == 0 { @@ -123,7 +123,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) Start(_a0 context.Context) error } // State provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) State() NodeState { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) State() NodeState { ret := _m.Called() if len(ret) == 0 { @@ -141,7 +141,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) State() NodeState { } // StateAndLatest provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) StateAndLatest() (NodeState, ChainInfo) { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) StateAndLatest() (NodeState, ChainInfo) { ret := _m.Called() if len(ret) == 0 { @@ -169,7 +169,7 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) StateAndLatest() (NodeState, Cha } // String provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) String() string { ret := _m.Called() if len(ret) == 0 { @@ -187,17 +187,17 @@ func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { } // UnsubscribeAll provides a mock function with given fields: -func (_m *mockNode[CHAIN_ID, HEAD, RPC_CLIENT]) UnsubscribeAll() { +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) UnsubscribeAll() { _m.Called() } // newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC_CLIENT interface{}](t interface { +func newMockNode[CHAIN_ID types.ID, RPC_CLIENT interface{}](t interface { mock.TestingT Cleanup(func()) -}) *mockNode[CHAIN_ID, HEAD, RPC_CLIENT] { - mock := &mockNode[CHAIN_ID, HEAD, RPC_CLIENT]{} +}) *mockNode[CHAIN_ID, RPC_CLIENT] { + mock := &mockNode[CHAIN_ID, RPC_CLIENT]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index ec0718c942a..81639939d2f 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -57,7 +57,7 @@ type multiNode[ RPC_CLIENT RPCClient[CHAIN_ID, HEAD], ] struct { services.StateMachine - primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + primaryNodes []Node[CHAIN_ID, RPC_CLIENT] sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] chainID CHAIN_ID lggr logger.SugaredLogger @@ -69,7 +69,7 @@ type multiNode[ reportInterval time.Duration activeMu sync.RWMutex - activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT] + activeNode Node[CHAIN_ID, RPC_CLIENT] chStop services.StopChan wg sync.WaitGroup @@ -84,7 +84,7 @@ func NewMultiNode[ lggr logger.Logger, selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) leaseDuration time.Duration, // defines interval on which new "best" RPC should be selected - primaryNodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], + primaryNodes []Node[CHAIN_ID, RPC_CLIENT], sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT], chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics @@ -226,7 +226,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) SelectRPC() (rpc RPC } // selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) { +func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 10840dc24b3..9f3de8ef61c 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -29,7 +29,7 @@ type multiNodeOpts struct { logger logger.Logger selectionMode string leaseDuration time.Duration - nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + nodes []Node[types.ID, multiNodeRPCClient] sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] chainID types.ID chainFamily string @@ -47,12 +47,12 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { } } -func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { +func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, multiNodeRPCClient] { return newNodeWithState(t, chainID, NodeStateAlive) } -func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { - node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) +func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode[types.ID, multiNodeRPCClient] { + node := newMockNode[types.ID, multiNodeRPCClient](t) node.On("ConfiguredChainID").Return(chainID).Once() node.On("Start", mock.Anything).Return(nil).Once() node.On("Close").Return(nil).Once() @@ -63,7 +63,7 @@ func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode func TestMultiNode_Dial(t *testing.T) { t.Parallel() - newMockNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] + newMockNode := newMockNode[types.ID, multiNodeRPCClient] newMockSendOnlyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient] t.Run("Fails without nodes", func(t *testing.T) { @@ -86,7 +86,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: multiNodeChainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) err := mn.Dial(tests.Context(t)) assert.EqualError(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID)) @@ -101,7 +101,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) err := mn.Dial(tests.Context(t)) assert.EqualError(t, err, expectedError.Error()) @@ -119,7 +119,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, }) err := mn.Dial(tests.Context(t)) assert.EqualError(t, err, expectedError.Error()) @@ -137,7 +137,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: multiNodeChainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly}, }) err := mn.Dial(tests.Context(t)) @@ -164,7 +164,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, }) err := mn.Dial(tests.Context(t)) @@ -177,7 +177,7 @@ func TestMultiNode_Dial(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, }) defer func() { assert.NoError(t, mn.Close()) }() @@ -200,7 +200,7 @@ func TestMultiNode_Report(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, logger: lggr, }) mn.reportInterval = tests.TestInterval @@ -217,7 +217,7 @@ func TestMultiNode_Report(t *testing.T) { mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, logger: lggr, }) mn.reportInterval = tests.TestInterval @@ -242,7 +242,7 @@ func TestMultiNode_CheckLease(t *testing.T) { selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) defer func() { assert.NoError(t, mn.Close()) }() err := mn.Dial(tests.Context(t)) @@ -258,7 +258,7 @@ func TestMultiNode_CheckLease(t *testing.T) { selectionMode: NodeSelectionModeHighestHead, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + nodes: []Node[types.ID, multiNodeRPCClient]{node}, leaseDuration: 0, }) defer func() { assert.NoError(t, mn.Close()) }() @@ -280,7 +280,7 @@ func TestMultiNode_CheckLease(t *testing.T) { selectionMode: NodeSelectionModeHighestHead, chainID: chainID, logger: lggr, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node, bestNode}, + nodes: []Node[types.ID, multiNodeRPCClient]{node, bestNode}, leaseDuration: tests.TestInterval, }) defer func() { assert.NoError(t, mn.Close()) }() @@ -311,7 +311,7 @@ func TestMultiNode_CheckLease(t *testing.T) { expectedResult := map[string]NodeState{} for name, state := range nodes { - node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node := newMockNode[types.ID, multiNodeRPCClient](t) node.On("State").Return(state).Once() node.On("String").Return(name).Once() opts.nodes = append(opts.nodes, node) @@ -337,15 +337,15 @@ func TestMultiNode_selectNode(t *testing.T) { t.Run("Returns same node, if it's still healthy", func(t *testing.T) { t.Parallel() chainID := types.RandomID() - node1 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node1 := newMockNode[types.ID, multiNodeRPCClient](t) node1.On("State").Return(NodeStateAlive).Once() node1.On("String").Return("node1").Maybe() - node2 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node2 := newMockNode[types.ID, multiNodeRPCClient](t) node2.On("String").Return("node2").Maybe() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, }) nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) nodeSelector.On("Select").Return(node1).Once() @@ -360,14 +360,14 @@ func TestMultiNode_selectNode(t *testing.T) { t.Run("Updates node if active is not healthy", func(t *testing.T) { t.Parallel() chainID := types.RandomID() - oldBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + oldBest := newMockNode[types.ID, multiNodeRPCClient](t) oldBest.On("String").Return("oldBest").Maybe() - newBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + newBest := newMockNode[types.ID, multiNodeRPCClient](t) newBest.On("String").Return("newBest").Maybe() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, - nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{oldBest, newBest}, + nodes: []Node[types.ID, multiNodeRPCClient]{oldBest, newBest}, }) nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) nodeSelector.On("Select").Return(oldBest).Once() @@ -466,7 +466,7 @@ func TestMultiNode_nLiveNodes(t *testing.T) { tc := testCases[i] t.Run(tc.Name, func(t *testing.T) { for _, params := range tc.NodeParams { - node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node := newMockNode[types.ID, multiNodeRPCClient](t) node.On("StateAndLatest").Return(params.State, params.chainInfo) mn.primaryNodes = append(mn.primaryNodes, node) } diff --git a/common/client/node.go b/common/client/node.go index 92730705d25..593665bf970 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -65,7 +65,6 @@ type ChainInfo struct { //go:generate mockery --quiet --name Node --structname mockNode --filename "mock_node_test.go" --inpackage --case=underscore type Node[ CHAIN_ID types.ID, - HEAD Head, RPC_CLIENT any, ] interface { // State returns health state of the underlying RPC @@ -145,7 +144,7 @@ func NewNode[ nodeOrder int32, rpc RPC_CLIENT, chainFamily string, -) Node[CHAIN_ID, HEAD, RPC_CLIENT] { +) Node[CHAIN_ID, RPC_CLIENT] { n := new(node[CHAIN_ID, HEAD, RPC_CLIENT]) n.name = name n.id = id diff --git a/common/client/node_selector.go b/common/client/node_selector.go index 9ec0d956f19..345217132b6 100644 --- a/common/client/node_selector.go +++ b/common/client/node_selector.go @@ -21,7 +21,7 @@ type NodeSelector[ ] interface { // Select returns a Node, or nil if none can be selected. // Implementation must be thread-safe. - Select() Node[CHAIN_ID, HEAD, RPC] + Select() Node[CHAIN_ID, RPC] // Name returns the strategy name, e.g. "HighestHead" or "RoundRobin" Name() string } @@ -30,7 +30,7 @@ func newNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](selectionMode string, nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { +](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { switch selectionMode { case NodeSelectionModeHighestHead: return NewHighestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index 11a74801637..b9dd6345b31 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -10,19 +10,19 @@ type highestHeadNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -] []Node[CHAIN_ID, HEAD, RPC] +] []Node[CHAIN_ID, RPC] func NewHighestHeadNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return highestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) } -func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { +func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { var highestHeadNumber int64 = math.MinInt64 - var highestHeadNodes []Node[CHAIN_ID, HEAD, RPC] + var highestHeadNodes []Node[CHAIN_ID, RPC] for _, n := range s { state, chainInfo := n.StateAndLatest() currentHeadNumber := chainInfo.BlockNumber diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index 9d9612e82ee..15d6489e95a 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -18,10 +18,10 @@ func TestHighestHeadNodeSelector(t *testing.T) { type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) @@ -40,7 +40,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { assert.Same(t, nodes[2], selector.Select()) t.Run("stick to the same node", func(t *testing.T) { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2}) node.On("Order").Return(int32(1)) @@ -51,7 +51,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { }) t.Run("another best node", func(t *testing.T) { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node.On("Order").Return(int32(1)) @@ -62,13 +62,13 @@ func TestHighestHeadNodeSelector(t *testing.T) { }) t.Run("nodes never update latest block number", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1}) node1.On("Order").Return(int32(1)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1}) node2.On("Order").Return(int32(1)) - selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2}) + selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, nodeClient]{node1, node2}) assert.Same(t, node1, selector.Select()) }) } @@ -77,10 +77,10 @@ func TestHighestHeadNodeSelector_None(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) @@ -99,11 +99,11 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] t.Run("same head and order", func(t *testing.T) { for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1}) node.On("Order").Return(int32(2)) nodes = append(nodes, node) @@ -114,61 +114,61 @@ func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { }) t.Run("same head but different order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node1.On("Order").Return(int32(3)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node2.On("Order").Return(int32(1)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(2)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) //Should select the second node as it has the highest priority assert.Same(t, nodes[1], selector.Select()) }) t.Run("different head but same order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2}) node2.On("Order").Maybe().Return(int32(3)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3}) node3.On("Order").Return(int32(3)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) //Should select the third node as it has the highest head assert.Same(t, nodes[2], selector.Select()) }) t.Run("different head and different order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 10}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 11}) node2.On("Order").Maybe().Return(int32(4)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 12}) node3.On("Order").Maybe().Return(int32(3)) - node4 := newMockNode[types.ID, Head, nodeClient](t) + node4 := newMockNode[types.ID, nodeClient](t) node4.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 10}) node4.On("Order").Maybe().Return(int32(1)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) //Should select the third node as it has the highest head and will win the priority tie-breaker assert.Same(t, nodes[2], selector.Select()) diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go index 0565c4c4f2a..5587345cc20 100644 --- a/common/client/node_selector_priority_level.go +++ b/common/client/node_selector_priority_level.go @@ -13,7 +13,7 @@ type priorityLevelNodeSelector[ HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], ] struct { - nodes []Node[CHAIN_ID, HEAD, RPC] + nodes []Node[CHAIN_ID, RPC] roundRobinCount []atomic.Uint32 } @@ -22,7 +22,7 @@ type nodeWithPriority[ HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], ] struct { - node Node[CHAIN_ID, HEAD, RPC] + node Node[CHAIN_ID, RPC] priority int32 } @@ -30,14 +30,14 @@ func NewPriorityLevelNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return &priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]{ nodes: nodes, roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)), } } -func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { +func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { nodes := s.getHighestPriorityAliveTier() if len(nodes) == 0 { @@ -100,7 +100,7 @@ func nrOfPriorityTiers[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) int32 { +](nodes []Node[CHAIN_ID, RPC]) int32 { highestPriority := int32(0) for _, n := range nodes { priority := n.Order() @@ -116,9 +116,9 @@ func firstOrHighestPriority[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) Node[CHAIN_ID, HEAD, RPC] { +](nodes []Node[CHAIN_ID, RPC]) Node[CHAIN_ID, RPC] { hp := int32(math.MaxInt32) - var node Node[CHAIN_ID, HEAD, RPC] + var node Node[CHAIN_ID, RPC] for _, n := range nodes { if n.Order() < hp { hp = n.Order() diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go index 362625f4cf2..b3f69d18d3c 100644 --- a/common/client/node_selector_priority_level_test.go +++ b/common/client/node_selector_priority_level_test.go @@ -66,9 +66,9 @@ func TestPriorityLevelNodeSelector(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for _, tn := range tc.nodes { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) node.On("State").Return(tn.state) node.On("Order").Return(tn.order) nodes = append(nodes, node) diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go index a914c06c21e..23bd0474bf2 100644 --- a/common/client/node_selector_round_robin.go +++ b/common/client/node_selector_round_robin.go @@ -11,7 +11,7 @@ type roundRobinSelector[ HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], ] struct { - nodes []Node[CHAIN_ID, HEAD, RPC] + nodes []Node[CHAIN_ID, RPC] roundRobinCount atomic.Uint32 } @@ -19,14 +19,14 @@ func NewRoundRobinSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return &roundRobinSelector[CHAIN_ID, HEAD, RPC]{ nodes: nodes, } } -func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { - var liveNodes []Node[CHAIN_ID, HEAD, RPC] +func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { + var liveNodes []Node[CHAIN_ID, RPC] for _, n := range s.nodes { if n.State() == NodeStateAlive { liveNodes = append(liveNodes, n) diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go index 866a02222ec..148c1a320bd 100644 --- a/common/client/node_selector_round_robin_test.go +++ b/common/client/node_selector_round_robin_test.go @@ -17,10 +17,10 @@ func TestRoundRobinNodeSelector(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("State").Return(NodeStateOutOfSync) @@ -42,10 +42,10 @@ func TestRoundRobinNodeSelector_None(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("State").Return(NodeStateOutOfSync) diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go index 56ab0fbfae9..36039661bf8 100644 --- a/common/client/node_selector_total_difficulty.go +++ b/common/client/node_selector_total_difficulty.go @@ -10,21 +10,21 @@ type totalDifficultyNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -] []Node[CHAIN_ID, HEAD, RPC] +] []Node[CHAIN_ID, RPC] func NewTotalDifficultyNodeSelector[ CHAIN_ID types.ID, HEAD Head, RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { return totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) } -func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { +func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil var highestTD *big.Int - var nodes []Node[CHAIN_ID, HEAD, RPC] - var aliveNodes []Node[CHAIN_ID, HEAD, RPC] + var nodes []Node[CHAIN_ID, RPC] + var aliveNodes []Node[CHAIN_ID, RPC] for _, n := range s { state, chainInfo := n.StateAndLatest() diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go index 2e82998903a..42573f59f0e 100644 --- a/common/client/node_selector_total_difficulty_test.go +++ b/common/client/node_selector_total_difficulty_test.go @@ -18,10 +18,10 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(NodeStateOutOfSync, @@ -43,7 +43,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { assert.Same(t, nodes[2], selector.Select()) t.Run("stick to the same node", func(t *testing.T) { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) // fourth node is alive (same as 3rd) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 2, BlockDifficulty: big.NewInt(8)}) @@ -55,7 +55,7 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { }) t.Run("another best node", func(t *testing.T) { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) // fifth node is alive (better than 3rd and 4th) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(11)}) @@ -67,15 +67,15 @@ func TestTotalDifficultyNodeSelector(t *testing.T) { }) t.Run("nodes never update latest block number", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node1.On("Order").Maybe().Return(int32(1)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: -1, BlockDifficulty: nil}) node2.On("Order").Maybe().Return(int32(1)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2} + nodes := []Node[types.ID, nodeClient]{node1, node2} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) assert.Same(t, node1, selector.Select()) @@ -86,10 +86,10 @@ func TestTotalDifficultyNodeSelector_None(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) if i == 0 { // first node is out of sync node.On("StateAndLatest").Return(NodeStateOutOfSync, ChainInfo{BlockNumber: -1}) @@ -109,11 +109,11 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { t.Parallel() type nodeClient RPCClient[types.ID, Head] - var nodes []Node[types.ID, Head, nodeClient] + var nodes []Node[types.ID, nodeClient] t.Run("same td and order", func(t *testing.T) { for i := 0; i < 3; i++ { - node := newMockNode[types.ID, Head, nodeClient](t) + node := newMockNode[types.ID, nodeClient](t) node.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node.On("Order").Return(int32(2)) @@ -125,71 +125,71 @@ func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { }) t.Run("same td but different order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Return(int32(3)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node2.On("Order").Return(int32(1)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 3, BlockDifficulty: big.NewInt(10)}) node3.On("Order").Return(int32(2)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) //Should select the second node as it has the highest priority assert.Same(t, nodes[1], selector.Select()) }) t.Run("different td but same order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(10)}) node1.On("Order").Maybe().Return(int32(3)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(11)}) node2.On("Order").Maybe().Return(int32(3)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(12)}) node3.On("Order").Return(int32(3)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) //Should select the third node as it has the highest td assert.Same(t, nodes[2], selector.Select()) }) t.Run("different head and different order", func(t *testing.T) { - node1 := newMockNode[types.ID, Head, nodeClient](t) + node1 := newMockNode[types.ID, nodeClient](t) node1.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(100)}) node1.On("Order").Maybe().Return(int32(4)) - node2 := newMockNode[types.ID, Head, nodeClient](t) + node2 := newMockNode[types.ID, nodeClient](t) node2.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node2.On("Order").Maybe().Return(int32(5)) - node3 := newMockNode[types.ID, Head, nodeClient](t) + node3 := newMockNode[types.ID, nodeClient](t) node3.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(110)}) node3.On("Order").Maybe().Return(int32(1)) - node4 := newMockNode[types.ID, Head, nodeClient](t) + node4 := newMockNode[types.ID, nodeClient](t) node4.On("StateAndLatest").Return(NodeStateAlive, ChainInfo{BlockNumber: 1, BlockDifficulty: big.NewInt(105)}) node4.On("Order").Maybe().Return(int32(2)) - nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} + nodes := []Node[types.ID, nodeClient]{node1, node2, node3, node4} selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) //Should select the third node as it has the highest td and will win the priority tie-breaker assert.Same(t, nodes[2], selector.Select()) diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 5dc992039e2..d3d4ff84181 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -114,7 +114,7 @@ func NewChainClient( lggr logger.Logger, selectionMode string, leaseDuration time.Duration, - nodes []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient], + nodes []commonclient.Node[*big.Int, EvmRpcClient], sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient], chainID *big.Int, clientErrors evmconfig.ClientErrors, diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index f427b350ef5..33723c8723e 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -15,7 +15,7 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node, chainType chaintype.ChainType) Client { var empty url.URL - var primaries []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient] + var primaries []commonclient.Node[*big.Int, EvmRpcClient] var sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient] for i, node := range nodes { rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, diff --git a/core/chains/evm/client/mocks/client.go b/core/chains/evm/client/mocks/client.go index 34299f1b393..555bc331227 100644 --- a/core/chains/evm/client/mocks/client.go +++ b/core/chains/evm/client/mocks/client.go @@ -618,19 +618,19 @@ func (_m *Client) LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, err } // NodeStates provides a mock function with given fields: -func (_m *Client) NodeStates() map[string]string { +func (_m *Client) NodeStates() map[string]commonclient.NodeState { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for NodeStates") } - var r0 map[string]string - if rf, ok := ret.Get(0).(func() map[string]string); ok { + var r0 map[string]commonclient.NodeState + if rf, ok := ret.Get(0).(func() map[string]commonclient.NodeState); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) + r0 = ret.Get(0).(map[string]commonclient.NodeState) } } From ae2afe013d5f47429dbf95097f5bb7f62d2899e9 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 12:18:42 -0400 Subject: [PATCH 32/58] Remove unneeded generics --- common/client/multi_node.go | 40 +++++++++---------- common/client/multi_node_test.go | 6 +-- common/client/node_selector.go | 14 +++---- common/client/node_selector_highest_head.go | 14 +++---- .../client/node_selector_highest_head_test.go | 4 +- common/client/node_selector_priority_level.go | 36 +++++++---------- .../node_selector_priority_level_test.go | 2 +- common/client/node_selector_round_robin.go | 14 +++---- .../client/node_selector_round_robin_test.go | 2 +- common/client/node_selector_test.go | 2 +- .../client/node_selector_total_difficulty.go | 14 +++---- .../node_selector_total_difficulty_test.go | 4 +- .../evm/client/simulated_backend_client.go | 2 +- 13 files changed, 69 insertions(+), 85 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 81639939d2f..82995e203cf 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -52,9 +52,7 @@ type MultiNode[ type multiNode[ CHAIN_ID types.ID, - BLOCK_HASH types.Hashable, - HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPCClient[CHAIN_ID, HEAD], + RPC_CLIENT any, ] struct { services.StateMachine primaryNodes []Node[CHAIN_ID, RPC_CLIENT] @@ -62,7 +60,7 @@ type multiNode[ chainID CHAIN_ID lggr logger.SugaredLogger selectionMode string - nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] + nodeSelector NodeSelector[CHAIN_ID, RPC_CLIENT] leaseDuration time.Duration leaseTicker *time.Ticker chainFamily string @@ -77,9 +75,7 @@ type multiNode[ func NewMultiNode[ CHAIN_ID types.ID, - BLOCK_HASH types.Hashable, - HEAD types.Head[BLOCK_HASH], - RPC_CLIENT RPCClient[CHAIN_ID, HEAD], + RPC_CLIENT any, //RPCClient[CHAIN_ID, HEAD], ]( lggr logger.Logger, selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) @@ -93,7 +89,7 @@ func NewMultiNode[ // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) const reportInterval = 6500 * time.Millisecond - c := &multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]{ + c := &multiNode[CHAIN_ID, RPC_CLIENT]{ primaryNodes: primaryNodes, sendOnlyNodes: sendOnlyNodes, chainID: chainID, @@ -111,11 +107,11 @@ func NewMultiNode[ return c } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) ChainID() CHAIN_ID { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) ChainID() CHAIN_ID { return c.chainID } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { callsCompleted := 0 for _, n := range c.primaryNodes { if ctx.Err() != nil { @@ -144,7 +140,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) DoAll(ctx context.Co return nil } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[string]NodeState { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { states := map[string]NodeState{} for _, n := range c.primaryNodes { states[n.String()] = n.State() @@ -159,7 +155,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) NodeStates() map[str // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Context) error { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) @@ -169,7 +165,8 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Con if n.ConfiguredChainID().String() != c.chainID.String() { return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) } - rawNode, ok := n.(*node[CHAIN_ID, HEAD, RPC_CLIENT]) + /* TODO: Dmytro's PR on local finality handles this better. + rawNode, ok := n.(*node[CHAIN_ID, *evmtypes.Head, RPC_CLIENT]) if ok { // This is a bit hacky but it allows the node to be aware of // pool state and prevent certain state transitions that might @@ -177,6 +174,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Con // node in a degraded state than no primaryNodes at all. rawNode.nLiveNodes = c.nLiveNodes } + */ // node will handle its own redialing and automatic recovery if err := ms.Start(ctx, n); err != nil { return err @@ -206,7 +204,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Dial(ctx context.Con } // Close tears down the MultiNode and closes all nodes -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Close() error { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Close() error { return c.StopOnce("MultiNode", func() error { close(c.chStop) c.wg.Wait() @@ -217,7 +215,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) Close() error { // SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error. // Call this method from your chain-specific client implementation to access any chain-specific rpc calls. -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { n, err := c.selectNode() if err != nil { return rpc, err @@ -226,7 +224,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) SelectRPC() (rpc RPC } // selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() @@ -256,7 +254,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) selectNode() (node N // nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. // totalDifficulty will be 0 if all nodes return nil. -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { totalDifficulty = big.NewInt(0) for _, n := range c.primaryNodes { if s, chainInfo := n.StateAndLatest(); s == NodeStateAlive { @@ -272,7 +270,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveN return } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLease() { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { bestNode := c.nodeSelector.Select() for _, n := range c.primaryNodes { // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new @@ -290,7 +288,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLease() { c.activeMu.Unlock() } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLeaseLoop() { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { defer c.wg.Done() c.leaseTicker = time.NewTicker(c.leaseDuration) defer c.leaseTicker.Stop() @@ -305,7 +303,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) checkLeaseLoop() { } } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) runLoop() { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) runLoop() { defer c.wg.Done() c.report() @@ -323,7 +321,7 @@ func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) runLoop() { } } -func (c *multiNode[CHAIN_ID, BLOCK_HASH, HEAD, RPC_CLIENT]) report() { +func (c *multiNode[CHAIN_ID, RPC_CLIENT]) report() { type nodeWithState struct { Node string State string diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 9f3de8ef61c..0641992142c 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -22,7 +22,7 @@ import ( type multiNodeRPCClient RPCClient[types.ID, types.Head[Hashable]] type testMultiNode struct { - *multiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient] + *multiNode[types.ID, multiNodeRPCClient] } type multiNodeOpts struct { @@ -40,10 +40,10 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { opts.logger = logger.Test(t) } - result := NewMultiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient]( + result := NewMultiNode[types.ID, multiNodeRPCClient]( opts.logger, opts.selectionMode, opts.leaseDuration, opts.nodes, opts.sendonlys, opts.chainID, opts.chainFamily) return testMultiNode{ - result.(*multiNode[types.ID, Hashable, types.Head[Hashable], multiNodeRPCClient]), + result.(*multiNode[types.ID, multiNodeRPCClient]), } } diff --git a/common/client/node_selector.go b/common/client/node_selector.go index 345217132b6..d62fac9a1e5 100644 --- a/common/client/node_selector.go +++ b/common/client/node_selector.go @@ -16,7 +16,6 @@ const ( //go:generate mockery --quiet --name NodeSelector --structname mockNodeSelector --filename "mock_node_selector_test.go" --inpackage --case=underscore type NodeSelector[ CHAIN_ID types.ID, - HEAD Head, RPC any, ] interface { // Select returns a Node, or nil if none can be selected. @@ -28,18 +27,17 @@ type NodeSelector[ func newNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + RPC any, //RPCClient[CHAIN_ID, HEAD], +](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { switch selectionMode { case NodeSelectionModeHighestHead: - return NewHighestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + return NewHighestHeadNodeSelector[CHAIN_ID, RPC](nodes) case NodeSelectionModeRoundRobin: - return NewRoundRobinSelector[CHAIN_ID, HEAD, RPC](nodes) + return NewRoundRobinSelector[CHAIN_ID, RPC](nodes) case NodeSelectionModeTotalDifficulty: - return NewTotalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + return NewTotalDifficultyNodeSelector[CHAIN_ID, RPC](nodes) case NodeSelectionModePriorityLevel: - return NewPriorityLevelNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + return NewPriorityLevelNodeSelector[CHAIN_ID, RPC](nodes) default: panic(fmt.Sprintf("unsupported NodeSelectionMode: %s", selectionMode)) } diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index b9dd6345b31..3ec5da1c205 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -8,19 +8,17 @@ import ( type highestHeadNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, //RPCClient[CHAIN_ID, HEAD], ] []Node[CHAIN_ID, RPC] func NewHighestHeadNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { - return highestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + RPC any, //RPCClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return highestHeadNodeSelector[CHAIN_ID, RPC](nodes) } -func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { +func (s highestHeadNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { var highestHeadNumber int64 = math.MinInt64 var highestHeadNodes []Node[CHAIN_ID, RPC] for _, n := range s { @@ -37,6 +35,6 @@ func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RP return firstOrHighestPriority(highestHeadNodes) } -func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { +func (s highestHeadNodeSelector[CHAIN_ID, RPC]) Name() string { return NodeSelectionModeHighestHead } diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go index 15d6489e95a..9b79dbc794d 100644 --- a/common/client/node_selector_highest_head_test.go +++ b/common/client/node_selector_highest_head_test.go @@ -9,7 +9,7 @@ import ( ) func TestHighestHeadNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead) } @@ -36,7 +36,7 @@ func TestHighestHeadNodeSelector(t *testing.T) { nodes = append(nodes, node) } - selector := newNodeSelector[types.ID, Head, nodeClient](NodeSelectionModeHighestHead, nodes) + selector := newNodeSelector[types.ID, nodeClient](NodeSelectionModeHighestHead, nodes) assert.Same(t, nodes[2], selector.Select()) t.Run("stick to the same node", func(t *testing.T) { diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go index 5587345cc20..d9a45c2d5de 100644 --- a/common/client/node_selector_priority_level.go +++ b/common/client/node_selector_priority_level.go @@ -10,8 +10,7 @@ import ( type priorityLevelNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ] struct { nodes []Node[CHAIN_ID, RPC] roundRobinCount []atomic.Uint32 @@ -19,8 +18,7 @@ type priorityLevelNodeSelector[ type nodeWithPriority[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ] struct { node Node[CHAIN_ID, RPC] priority int32 @@ -28,16 +26,15 @@ type nodeWithPriority[ func NewPriorityLevelNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { - return &priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]{ + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return &priorityLevelNodeSelector[CHAIN_ID, RPC]{ nodes: nodes, roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)), } } -func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { nodes := s.getHighestPriorityAliveTier() if len(nodes) == 0 { @@ -52,17 +49,17 @@ func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, return nodes[idx].node } -func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) Name() string { return NodeSelectionModePriorityLevel } // getHighestPriorityAliveTier filters nodes that are not in state NodeStateAlive and // returns only the highest tier of alive nodes -func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, HEAD, RPC] { - var nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC] +func (s priorityLevelNodeSelector[CHAIN_ID, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, RPC] { + var nodes []nodeWithPriority[CHAIN_ID, RPC] for _, n := range s.nodes { if n.State() == NodeStateAlive { - nodes = append(nodes, nodeWithPriority[CHAIN_ID, HEAD, RPC]{n, n.Order()}) + nodes = append(nodes, nodeWithPriority[CHAIN_ID, RPC]{n, n.Order()}) } } @@ -76,14 +73,13 @@ func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveT // removeLowerTiers take a slice of nodeWithPriority[CHAIN_ID, BLOCK_HASH, HEAD, RPC] and keeps only the highest tier func removeLowerTiers[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC]) []nodeWithPriority[CHAIN_ID, HEAD, RPC] { + RPC any, +](nodes []nodeWithPriority[CHAIN_ID, RPC]) []nodeWithPriority[CHAIN_ID, RPC] { sort.SliceStable(nodes, func(i, j int) bool { return nodes[i].priority > nodes[j].priority }) - var nodes2 []nodeWithPriority[CHAIN_ID, HEAD, RPC] + var nodes2 []nodeWithPriority[CHAIN_ID, RPC] currentPriority := nodes[len(nodes)-1].priority for _, n := range nodes { @@ -98,8 +94,7 @@ func removeLowerTiers[ // nrOfPriorityTiers calculates the total number of priority tiers func nrOfPriorityTiers[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ](nodes []Node[CHAIN_ID, RPC]) int32 { highestPriority := int32(0) for _, n := range nodes { @@ -114,8 +109,7 @@ func nrOfPriorityTiers[ // firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority func firstOrHighestPriority[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ](nodes []Node[CHAIN_ID, RPC]) Node[CHAIN_ID, RPC] { hp := int32(math.MaxInt32) var node Node[CHAIN_ID, RPC] diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go index b3f69d18d3c..b85a6209a3b 100644 --- a/common/client/node_selector_priority_level_test.go +++ b/common/client/node_selector_priority_level_test.go @@ -9,7 +9,7 @@ import ( ) func TestPriorityLevelNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel) } diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go index 23bd0474bf2..50b648594e6 100644 --- a/common/client/node_selector_round_robin.go +++ b/common/client/node_selector_round_robin.go @@ -8,8 +8,7 @@ import ( type roundRobinSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ] struct { nodes []Node[CHAIN_ID, RPC] roundRobinCount atomic.Uint32 @@ -17,15 +16,14 @@ type roundRobinSelector[ func NewRoundRobinSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { - return &roundRobinSelector[CHAIN_ID, HEAD, RPC]{ + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return &roundRobinSelector[CHAIN_ID, RPC]{ nodes: nodes, } } -func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { +func (s *roundRobinSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { var liveNodes []Node[CHAIN_ID, RPC] for _, n := range s.nodes { if n.State() == NodeStateAlive { @@ -45,6 +43,6 @@ func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { return liveNodes[idx] } -func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Name() string { +func (s *roundRobinSelector[CHAIN_ID, RPC]) Name() string { return NodeSelectionModeRoundRobin } diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go index 148c1a320bd..6b59e299248 100644 --- a/common/client/node_selector_round_robin_test.go +++ b/common/client/node_selector_round_robin_test.go @@ -9,7 +9,7 @@ import ( ) func TestRoundRobinNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin) } diff --git a/common/client/node_selector_test.go b/common/client/node_selector_test.go index ac280f7142e..f652bfc50ad 100644 --- a/common/client/node_selector_test.go +++ b/common/client/node_selector_test.go @@ -12,7 +12,7 @@ func TestNodeSelector(t *testing.T) { // rest of the tests are located in specific node selectors tests t.Run("panics on unknown type", func(t *testing.T) { assert.Panics(t, func() { - _ = newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]]("unknown", nil) + _ = newNodeSelector[types.ID, RPCClient[types.ID, Head]]("unknown", nil) }) }) } diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go index 36039661bf8..b413c717194 100644 --- a/common/client/node_selector_total_difficulty.go +++ b/common/client/node_selector_total_difficulty.go @@ -8,19 +8,17 @@ import ( type totalDifficultyNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], + RPC any, ] []Node[CHAIN_ID, RPC] func NewTotalDifficultyNodeSelector[ CHAIN_ID types.ID, - HEAD Head, - RPC RPCClient[CHAIN_ID, HEAD], -](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { - return totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + RPC any, +](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { + return totalDifficultyNodeSelector[CHAIN_ID, RPC](nodes) } -func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { +func (s totalDifficultyNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil var highestTD *big.Int var nodes []Node[CHAIN_ID, RPC] @@ -51,6 +49,6 @@ func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID return firstOrHighestPriority(nodes) } -func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { +func (s totalDifficultyNodeSelector[CHAIN_ID, RPC]) Name() string { return NodeSelectionModeTotalDifficulty } diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go index 42573f59f0e..cc8b37a4cae 100644 --- a/common/client/node_selector_total_difficulty_test.go +++ b/common/client/node_selector_total_difficulty_test.go @@ -1,7 +1,7 @@ package client import ( - big "math/big" + "math/big" "testing" "github.com/smartcontractkit/chainlink/v2/common/types" @@ -10,7 +10,7 @@ import ( ) func TestTotalDifficultyNodeSelectorName(t *testing.T) { - selector := newNodeSelector[types.ID, Head, RPCClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) + selector := newNodeSelector[types.ID, RPCClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty) } diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go index 90dbcaafecc..ae5fe023ebf 100644 --- a/core/chains/evm/client/simulated_backend_client.go +++ b/core/chains/evm/client/simulated_backend_client.go @@ -505,7 +505,7 @@ func (c *SimulatedBackendClient) Backend() *backends.SimulatedBackend { } // NodeStates implements evmclient.Client -func (c *SimulatedBackendClient) NodeStates() map[string]string { return nil } +func (c *SimulatedBackendClient) NodeStates() map[string]commonclient.NodeState { return nil } // Commit imports all the pending transactions as a single block and starts a // fresh new state. From 0454491e6d61702756f888357e179abf339a062a Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 12:35:48 -0400 Subject: [PATCH 33/58] Remove unneeded generics from Multinode --- common/client/mock_node_selector_test.go | 12 +-- common/client/multi_node.go | 2 +- common/client/multi_node_test.go | 8 +- core/chains/evm/client/chain_client.go | 10 +- core/chains/evm/client/chain_client_test.go | 4 +- core/chains/evm/client/evm_client.go | 6 +- core/chains/evm/client/helpers_test.go | 14 +-- ..._test.go => mock_chain_client_rpc_test.go} | 100 +++++++++--------- core/chains/evm/client/rpc_client.go | 6 +- 9 files changed, 81 insertions(+), 81 deletions(-) rename core/chains/evm/client/{mock_evm_rpc_client_test.go => mock_chain_client_rpc_test.go} (83%) diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go index e303b813422..798eda09f06 100644 --- a/common/client/mock_node_selector_test.go +++ b/common/client/mock_node_selector_test.go @@ -8,12 +8,12 @@ import ( ) // mockNodeSelector is an autogenerated mock type for the NodeSelector type -type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC interface{}] struct { +type mockNodeSelector[CHAIN_ID types.ID, RPC interface{}] struct { mock.Mock } // Name provides a mock function with given fields: -func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { +func (_m *mockNodeSelector[CHAIN_ID, RPC]) Name() string { ret := _m.Called() if len(ret) == 0 { @@ -31,7 +31,7 @@ func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { } // Select provides a mock function with given fields: -func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { +func (_m *mockNodeSelector[CHAIN_ID, RPC]) Select() Node[CHAIN_ID, RPC] { ret := _m.Called() if len(ret) == 0 { @@ -52,11 +52,11 @@ func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, RPC] { // newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC interface{}](t interface { +func newMockNodeSelector[CHAIN_ID types.ID, RPC interface{}](t interface { mock.TestingT Cleanup(func()) -}) *mockNodeSelector[CHAIN_ID, HEAD, RPC] { - mock := &mockNodeSelector[CHAIN_ID, HEAD, RPC]{} +}) *mockNodeSelector[CHAIN_ID, RPC] { + mock := &mockNodeSelector[CHAIN_ID, RPC]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 82995e203cf..a7f1cba0393 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -75,7 +75,7 @@ type multiNode[ func NewMultiNode[ CHAIN_ID types.ID, - RPC_CLIENT any, //RPCClient[CHAIN_ID, HEAD], + RPC_CLIENT any, ]( lggr logger.Logger, selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 0641992142c..7218db1a06e 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -273,7 +273,7 @@ func TestMultiNode_CheckLease(t *testing.T) { //node.On("SubscribersCount").Return(int32(2)) node.On("UnsubscribeAll") bestNode := newHealthyNode(t, chainID) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(bestNode) lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) mn := newTestMultiNode(t, multiNodeOpts{ @@ -347,7 +347,7 @@ func TestMultiNode_selectNode(t *testing.T) { chainID: chainID, nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, }) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(node1).Once() mn.nodeSelector = nodeSelector prevActiveNode, err := mn.selectNode() @@ -369,7 +369,7 @@ func TestMultiNode_selectNode(t *testing.T) { chainID: chainID, nodes: []Node[types.ID, multiNodeRPCClient]{oldBest, newBest}, }) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(oldBest).Once() mn.nodeSelector = nodeSelector activeNode, err := mn.selectNode() @@ -391,7 +391,7 @@ func TestMultiNode_selectNode(t *testing.T) { chainID: chainID, logger: lggr, }) - nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) nodeSelector.On("Select").Return(nil).Once() nodeSelector.On("Name").Return("MockedNodeSelector").Once() mn.nodeSelector = nodeSelector diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index d3d4ff84181..fe2bd36a4b1 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -103,7 +103,7 @@ func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc type chainClient struct { multiNode commonclient.MultiNode[ *big.Int, - EvmRpcClient, + ChainClientRPC, ] logger logger.SugaredLogger chainType chaintype.ChainType @@ -114,8 +114,8 @@ func NewChainClient( lggr logger.Logger, selectionMode string, leaseDuration time.Duration, - nodes []commonclient.Node[*big.Int, EvmRpcClient], - sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient], + nodes []commonclient.Node[*big.Int, ChainClientRPC], + sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC], chainID *big.Int, clientErrors evmconfig.ClientErrors, ) Client { @@ -165,13 +165,13 @@ func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchE return selectionErr } - doFunc := func(ctx context.Context, rpc EvmRpcClient, isSendOnly bool) bool { + doFunc := func(ctx context.Context, rpc ChainClientRPC, isSendOnly bool) bool { if rpc == main { return true } // Parallel call made to all other nodes with ignored return value wg.Add(1) - go func(rpc EvmRpcClient) { + go func(rpc ChainClientRPC) { defer wg.Done() err := rpc.BatchCallContext(ctx, b) if err != nil { diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index f5189f2ad87..5addd0a5bfc 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -745,8 +745,8 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { } */ -func newMockRpc(t *testing.T) *client.MockEvmRpcClient { - mockRpc := client.NewMockEvmRpcClient(t) +func newMockRpc(t *testing.T) *client.MockChainClientRPC { + mockRpc := client.NewMockChainClientRPC(t) mockRpc.On("Dial", mock.Anything).Return(nil).Once() mockRpc.On("Close").Return(nil).Once() mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 33723c8723e..e8207e25088 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -15,12 +15,12 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node, chainType chaintype.ChainType) Client { var empty url.URL - var primaries []commonclient.Node[*big.Int, EvmRpcClient] - var sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient] + var primaries []commonclient.Node[*big.Int, ChainClientRPC] + var sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC] for i, node := range nodes { rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, commonclient.Secondary) - newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient](cfg, chainCfg, + newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC](cfg, chainCfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM") diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 0c6974550a4..0d77f33e62f 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -134,18 +134,18 @@ func NewChainClientWithTestNode( } rpc := NewRPCClient(nodePoolCfg, lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) - n := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient]{n} + primaries := []commonclient.Node[*big.Int, ChainClientRPC]{n} - var sendonlys []commonclient.SendOnlyNode[*big.Int, EvmRpcClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC] for i, u := range sendonlyRPCURLs { if u.Scheme != "http" && u.Scheme != "https" { return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL rpc := NewRPCClient(nodePoolCfg, lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) - s := commonclient.NewSendOnlyNode[*big.Int, EvmRpcClient]( + s := commonclient.NewSendOnlyNode[*big.Int, ChainClientRPC]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) } @@ -176,7 +176,7 @@ func NewChainClientWithMockedRpc( leaseDuration time.Duration, noNewHeadsThreshold time.Duration, chainID *big.Int, - rpc EvmRpcClient, + rpc ChainClientRPC, ) Client { lggr := logger.Test(t) @@ -185,9 +185,9 @@ func NewChainClientWithMockedRpc( } parsed, _ := url.ParseRequestURI("ws://test") - n := commonclient.NewNode[*big.Int, *evmtypes.Head, EvmRpcClient]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC]( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, *evmtypes.Head, EvmRpcClient]{n} + primaries := []commonclient.Node[*big.Int, ChainClientRPC]{n} clientErrors := NewTestClientErrors() c := NewChainClient(lggr, selectionMode, leaseDuration, primaries, nil, chainID, &clientErrors) t.Cleanup(c.Close) diff --git a/core/chains/evm/client/mock_evm_rpc_client_test.go b/core/chains/evm/client/mock_chain_client_rpc_test.go similarity index 83% rename from core/chains/evm/client/mock_evm_rpc_client_test.go rename to core/chains/evm/client/mock_chain_client_rpc_test.go index 23433d846b1..fcf81c3dfb7 100644 --- a/core/chains/evm/client/mock_evm_rpc_client_test.go +++ b/core/chains/evm/client/mock_chain_client_rpc_test.go @@ -26,13 +26,13 @@ import ( types "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) -// MockEvmRpcClient is an autogenerated mock type for the EvmRpcClient type -type MockEvmRpcClient struct { +// MockChainClientRPC is an autogenerated mock type for the ChainClientRPC type +type MockChainClientRPC struct { mock.Mock } // BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *MockEvmRpcClient) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { +func (_m *MockChainClientRPC) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { ret := _m.Called(ctx, accountAddress, blockNumber) if len(ret) == 0 { @@ -62,7 +62,7 @@ func (_m *MockEvmRpcClient) BalanceAt(ctx context.Context, accountAddress common } // BatchCallContext provides a mock function with given fields: ctx, b -func (_m *MockEvmRpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { +func (_m *MockChainClientRPC) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { ret := _m.Called(ctx, b) if len(ret) == 0 { @@ -80,7 +80,7 @@ func (_m *MockEvmRpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchE } // BlockByHash provides a mock function with given fields: ctx, hash -func (_m *MockEvmRpcClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { +func (_m *MockChainClientRPC) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { ret := _m.Called(ctx, hash) if len(ret) == 0 { @@ -110,7 +110,7 @@ func (_m *MockEvmRpcClient) BlockByHash(ctx context.Context, hash common.Hash) ( } // BlockByHashGeth provides a mock function with given fields: ctx, hash -func (_m *MockEvmRpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { +func (_m *MockChainClientRPC) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { ret := _m.Called(ctx, hash) if len(ret) == 0 { @@ -140,7 +140,7 @@ func (_m *MockEvmRpcClient) BlockByHashGeth(ctx context.Context, hash common.Has } // BlockByNumber provides a mock function with given fields: ctx, number -func (_m *MockEvmRpcClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { +func (_m *MockChainClientRPC) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { ret := _m.Called(ctx, number) if len(ret) == 0 { @@ -170,7 +170,7 @@ func (_m *MockEvmRpcClient) BlockByNumber(ctx context.Context, number *big.Int) } // BlockByNumberGeth provides a mock function with given fields: ctx, number -func (_m *MockEvmRpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { +func (_m *MockChainClientRPC) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { ret := _m.Called(ctx, number) if len(ret) == 0 { @@ -200,7 +200,7 @@ func (_m *MockEvmRpcClient) BlockByNumberGeth(ctx context.Context, number *big.I } // CallContext provides a mock function with given fields: ctx, result, method, args -func (_m *MockEvmRpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { +func (_m *MockChainClientRPC) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { var _ca []interface{} _ca = append(_ca, ctx, result, method) _ca = append(_ca, args...) @@ -221,7 +221,7 @@ func (_m *MockEvmRpcClient) CallContext(ctx context.Context, result interface{}, } // CallContract provides a mock function with given fields: ctx, msg, blockNumber -func (_m *MockEvmRpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { +func (_m *MockChainClientRPC) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, msg, blockNumber) if len(ret) == 0 { @@ -251,7 +251,7 @@ func (_m *MockEvmRpcClient) CallContract(ctx context.Context, msg interface{}, b } // ChainID provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) ChainID(ctx context.Context) (*big.Int, error) { +func (_m *MockChainClientRPC) ChainID(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -281,7 +281,7 @@ func (_m *MockEvmRpcClient) ChainID(ctx context.Context) (*big.Int, error) { } // ClientVersion provides a mock function with given fields: _a0 -func (_m *MockEvmRpcClient) ClientVersion(_a0 context.Context) (string, error) { +func (_m *MockChainClientRPC) ClientVersion(_a0 context.Context) (string, error) { ret := _m.Called(_a0) if len(ret) == 0 { @@ -309,12 +309,12 @@ func (_m *MockEvmRpcClient) ClientVersion(_a0 context.Context) (string, error) { } // Close provides a mock function with given fields: -func (_m *MockEvmRpcClient) Close() { +func (_m *MockChainClientRPC) Close() { _m.Called() } // CodeAt provides a mock function with given fields: ctx, account, blockNumber -func (_m *MockEvmRpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { +func (_m *MockChainClientRPC) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, account, blockNumber) if len(ret) == 0 { @@ -344,7 +344,7 @@ func (_m *MockEvmRpcClient) CodeAt(ctx context.Context, account common.Address, } // Dial provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) Dial(ctx context.Context) error { +func (_m *MockChainClientRPC) Dial(ctx context.Context) error { ret := _m.Called(ctx) if len(ret) == 0 { @@ -362,7 +362,7 @@ func (_m *MockEvmRpcClient) Dial(ctx context.Context) error { } // DialHTTP provides a mock function with given fields: -func (_m *MockEvmRpcClient) DialHTTP() error { +func (_m *MockChainClientRPC) DialHTTP() error { ret := _m.Called() if len(ret) == 0 { @@ -380,12 +380,12 @@ func (_m *MockEvmRpcClient) DialHTTP() error { } // DisconnectAll provides a mock function with given fields: -func (_m *MockEvmRpcClient) DisconnectAll() { +func (_m *MockChainClientRPC) DisconnectAll() { _m.Called() } // EstimateGas provides a mock function with given fields: ctx, call -func (_m *MockEvmRpcClient) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { +func (_m *MockChainClientRPC) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { ret := _m.Called(ctx, call) if len(ret) == 0 { @@ -413,7 +413,7 @@ func (_m *MockEvmRpcClient) EstimateGas(ctx context.Context, call interface{}) ( } // FilterEvents provides a mock function with given fields: ctx, query -func (_m *MockEvmRpcClient) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { +func (_m *MockChainClientRPC) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { ret := _m.Called(ctx, query) if len(ret) == 0 { @@ -443,7 +443,7 @@ func (_m *MockEvmRpcClient) FilterEvents(ctx context.Context, query ethereum.Fil } // HeaderByHash provides a mock function with given fields: ctx, h -func (_m *MockEvmRpcClient) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { +func (_m *MockChainClientRPC) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { ret := _m.Called(ctx, h) if len(ret) == 0 { @@ -473,7 +473,7 @@ func (_m *MockEvmRpcClient) HeaderByHash(ctx context.Context, h common.Hash) (*c } // HeaderByNumber provides a mock function with given fields: ctx, n -func (_m *MockEvmRpcClient) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { +func (_m *MockChainClientRPC) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { ret := _m.Called(ctx, n) if len(ret) == 0 { @@ -503,7 +503,7 @@ func (_m *MockEvmRpcClient) HeaderByNumber(ctx context.Context, n *big.Int) (*co } // IsSyncing provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) IsSyncing(ctx context.Context) (bool, error) { +func (_m *MockChainClientRPC) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -531,7 +531,7 @@ func (_m *MockEvmRpcClient) IsSyncing(ctx context.Context) (bool, error) { } // LINKBalance provides a mock function with given fields: ctx, accountAddress, linkAddress -func (_m *MockEvmRpcClient) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { +func (_m *MockChainClientRPC) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { ret := _m.Called(ctx, accountAddress, linkAddress) if len(ret) == 0 { @@ -561,7 +561,7 @@ func (_m *MockEvmRpcClient) LINKBalance(ctx context.Context, accountAddress comm } // LatestBlockHeight provides a mock function with given fields: _a0 -func (_m *MockEvmRpcClient) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { +func (_m *MockChainClientRPC) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { ret := _m.Called(_a0) if len(ret) == 0 { @@ -591,7 +591,7 @@ func (_m *MockEvmRpcClient) LatestBlockHeight(_a0 context.Context) (*big.Int, er } // LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { +func (_m *MockChainClientRPC) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -621,7 +621,7 @@ func (_m *MockEvmRpcClient) LatestFinalizedBlock(ctx context.Context) (*types.He } // PendingCallContract provides a mock function with given fields: ctx, msg -func (_m *MockEvmRpcClient) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { +func (_m *MockChainClientRPC) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { ret := _m.Called(ctx, msg) if len(ret) == 0 { @@ -651,7 +651,7 @@ func (_m *MockEvmRpcClient) PendingCallContract(ctx context.Context, msg interfa } // PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *MockEvmRpcClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { +func (_m *MockChainClientRPC) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { ret := _m.Called(ctx, account) if len(ret) == 0 { @@ -681,7 +681,7 @@ func (_m *MockEvmRpcClient) PendingCodeAt(ctx context.Context, account common.Ad } // PendingSequenceAt provides a mock function with given fields: ctx, addr -func (_m *MockEvmRpcClient) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { +func (_m *MockChainClientRPC) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { ret := _m.Called(ctx, addr) if len(ret) == 0 { @@ -709,7 +709,7 @@ func (_m *MockEvmRpcClient) PendingSequenceAt(ctx context.Context, addr common.A } // Ping provides a mock function with given fields: _a0 -func (_m *MockEvmRpcClient) Ping(_a0 context.Context) error { +func (_m *MockChainClientRPC) Ping(_a0 context.Context) error { ret := _m.Called(_a0) if len(ret) == 0 { @@ -727,7 +727,7 @@ func (_m *MockEvmRpcClient) Ping(_a0 context.Context) error { } // SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress -func (_m *MockEvmRpcClient) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { +func (_m *MockChainClientRPC) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) if len(ret) == 0 { @@ -755,7 +755,7 @@ func (_m *MockEvmRpcClient) SendEmptyTransaction(ctx context.Context, newTxAttem } // SendTransaction provides a mock function with given fields: ctx, tx -func (_m *MockEvmRpcClient) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { +func (_m *MockChainClientRPC) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { ret := _m.Called(ctx, tx) if len(ret) == 0 { @@ -773,7 +773,7 @@ func (_m *MockEvmRpcClient) SendTransaction(ctx context.Context, tx *coretypes.T } // SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *MockEvmRpcClient) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { +func (_m *MockChainClientRPC) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { ret := _m.Called(ctx, accountAddress, blockNumber) if len(ret) == 0 { @@ -801,12 +801,12 @@ func (_m *MockEvmRpcClient) SequenceAt(ctx context.Context, accountAddress commo } // SetAliveLoopSub provides a mock function with given fields: _a0 -func (_m *MockEvmRpcClient) SetAliveLoopSub(_a0 commontypes.Subscription) { +func (_m *MockChainClientRPC) SetAliveLoopSub(_a0 commontypes.Subscription) { _m.Called(_a0) } // SimulateTransaction provides a mock function with given fields: ctx, tx -func (_m *MockEvmRpcClient) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { +func (_m *MockChainClientRPC) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { ret := _m.Called(ctx, tx) if len(ret) == 0 { @@ -824,7 +824,7 @@ func (_m *MockEvmRpcClient) SimulateTransaction(ctx context.Context, tx *coretyp } // SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *MockEvmRpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { +func (_m *MockChainClientRPC) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { ret := _m.Called(ctx, q, ch) if len(ret) == 0 { @@ -854,7 +854,7 @@ func (_m *MockEvmRpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum. } // SubscribeToFinalizedHeads provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) SubscribeToFinalizedHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { +func (_m *MockChainClientRPC) SubscribeToFinalizedHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -893,7 +893,7 @@ func (_m *MockEvmRpcClient) SubscribeToFinalizedHeads(ctx context.Context) (<-ch } // SubscribeToHeads provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { +func (_m *MockChainClientRPC) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -932,7 +932,7 @@ func (_m *MockEvmRpcClient) SubscribeToHeads(ctx context.Context) (<-chan *types } // SubscribersCount provides a mock function with given fields: -func (_m *MockEvmRpcClient) SubscribersCount() int32 { +func (_m *MockChainClientRPC) SubscribersCount() int32 { ret := _m.Called() if len(ret) == 0 { @@ -950,7 +950,7 @@ func (_m *MockEvmRpcClient) SubscribersCount() int32 { } // SuggestGasPrice provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { +func (_m *MockChainClientRPC) SuggestGasPrice(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -980,7 +980,7 @@ func (_m *MockEvmRpcClient) SuggestGasPrice(ctx context.Context) (*big.Int, erro } // SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *MockEvmRpcClient) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { +func (_m *MockChainClientRPC) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -1010,7 +1010,7 @@ func (_m *MockEvmRpcClient) SuggestGasTipCap(ctx context.Context) (*big.Int, err } // TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress -func (_m *MockEvmRpcClient) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { +func (_m *MockChainClientRPC) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { ret := _m.Called(ctx, accountAddress, tokenAddress) if len(ret) == 0 { @@ -1040,7 +1040,7 @@ func (_m *MockEvmRpcClient) TokenBalance(ctx context.Context, accountAddress com } // TransactionByHash provides a mock function with given fields: ctx, txHash -func (_m *MockEvmRpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { +func (_m *MockChainClientRPC) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { @@ -1070,7 +1070,7 @@ func (_m *MockEvmRpcClient) TransactionByHash(ctx context.Context, txHash common } // TransactionReceipt provides a mock function with given fields: ctx, txHash -func (_m *MockEvmRpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { +func (_m *MockChainClientRPC) TransactionReceipt(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { @@ -1100,7 +1100,7 @@ func (_m *MockEvmRpcClient) TransactionReceipt(ctx context.Context, txHash commo } // TransactionReceiptGeth provides a mock function with given fields: ctx, txHash -func (_m *MockEvmRpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { +func (_m *MockChainClientRPC) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { ret := _m.Called(ctx, txHash) if len(ret) == 0 { @@ -1130,7 +1130,7 @@ func (_m *MockEvmRpcClient) TransactionReceiptGeth(ctx context.Context, txHash c } // UnsubscribeAllExcept provides a mock function with given fields: subs -func (_m *MockEvmRpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { +func (_m *MockChainClientRPC) UnsubscribeAllExcept(subs ...commontypes.Subscription) { _va := make([]interface{}, len(subs)) for _i := range subs { _va[_i] = subs[_i] @@ -1141,17 +1141,17 @@ func (_m *MockEvmRpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscriptio } // UnsubscribeAllExceptAliveLoop provides a mock function with given fields: -func (_m *MockEvmRpcClient) UnsubscribeAllExceptAliveLoop() { +func (_m *MockChainClientRPC) UnsubscribeAllExceptAliveLoop() { _m.Called() } -// NewMockEvmRpcClient creates a new instance of MockEvmRpcClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewMockChainClientRPC creates a new instance of MockChainClientRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewMockEvmRpcClient(t interface { +func NewMockChainClientRPC(t interface { mock.TestingT Cleanup(func()) -}) *MockEvmRpcClient { - mock := &MockEvmRpcClient{} +}) *MockChainClientRPC { + mock := &MockChainClientRPC{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 2674bd9dcbe..ca7c3118464 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -34,8 +34,8 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) -//go:generate mockery --quiet --name EvmRpcClient --structname MockEvmRpcClient --filename "mock_evm_rpc_client_test.go" --inpackage --case=underscore -type EvmRpcClient interface { +//go:generate mockery --quiet --name ChainClientRPC --structname MockChainClientRPC --filename "mock_chain_client_rpc_test.go" --inpackage --case=underscore +type ChainClientRPC interface { commonclient.RPCClient[*big.Int, *evmtypes.Head] BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error @@ -189,7 +189,7 @@ func NewRPCClient( id int32, chainID *big.Int, tier commonclient.NodeTier, -) EvmRpcClient { +) ChainClientRPC { r := new(RpcClient) r.cfg = cfg r.name = name From 107a767fb22e1ff00c08e7995e0294fc59ed03de Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 12:58:37 -0400 Subject: [PATCH 34/58] Remove Multinode as interface --- common/client/multi_node.go | 45 ++++++++------------------ common/client/multi_node_test.go | 6 ++-- core/chains/evm/client/chain_client.go | 2 +- 3 files changed, 18 insertions(+), 35 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index a7f1cba0393..bce7c87d8f3 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -36,23 +36,6 @@ var ( type MultiNode[ CHAIN_ID types.ID, RPC_CLIENT any, -] interface { - Dial(ctx context.Context) error - ChainID() CHAIN_ID - // SelectRPC - returns the best healthy RPCClient - SelectRPC() (RPC_CLIENT, error) - // DoAll - calls `do` sequentially on all healthy RPCClients. - // `do` can abort subsequent calls by returning `false`. - // Returns error if `do` was not called or context returns an error. - DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error - // NodeStates - returns RPCs' states - NodeStates() map[string]NodeState - Close() error -} - -type multiNode[ - CHAIN_ID types.ID, - RPC_CLIENT any, ] struct { services.StateMachine primaryNodes []Node[CHAIN_ID, RPC_CLIENT] @@ -84,12 +67,12 @@ func NewMultiNode[ sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT], chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics -) MultiNode[CHAIN_ID, RPC_CLIENT] { +) *MultiNode[CHAIN_ID, RPC_CLIENT] { nodeSelector := newNodeSelector(selectionMode, primaryNodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) const reportInterval = 6500 * time.Millisecond - c := &multiNode[CHAIN_ID, RPC_CLIENT]{ + c := &MultiNode[CHAIN_ID, RPC_CLIENT]{ primaryNodes: primaryNodes, sendOnlyNodes: sendOnlyNodes, chainID: chainID, @@ -107,11 +90,11 @@ func NewMultiNode[ return c } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) ChainID() CHAIN_ID { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) ChainID() CHAIN_ID { return c.chainID } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { callsCompleted := 0 for _, n := range c.primaryNodes { if ctx.Err() != nil { @@ -140,7 +123,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx return nil } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { states := map[string]NodeState{} for _, n := range c.primaryNodes { states[n.String()] = n.State() @@ -155,7 +138,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) @@ -204,7 +187,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { } // Close tears down the MultiNode and closes all nodes -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Close() error { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Close() error { return c.StopOnce("MultiNode", func() error { close(c.chStop) c.wg.Wait() @@ -215,7 +198,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) Close() error { // SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error. // Call this method from your chain-specific client implementation to access any chain-specific rpc calls. -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { n, err := c.selectNode() if err != nil { return rpc, err @@ -224,7 +207,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error } // selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() @@ -254,7 +237,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_ // nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. // totalDifficulty will be 0 if all nodes return nil. -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { totalDifficulty = big.NewInt(0) for _, n := range c.primaryNodes { if s, chainInfo := n.StateAndLatest(); s == NodeStateAlive { @@ -270,7 +253,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNum return } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { bestNode := c.nodeSelector.Select() for _, n := range c.primaryNodes { // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new @@ -288,7 +271,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { c.activeMu.Unlock() } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { defer c.wg.Done() c.leaseTicker = time.NewTicker(c.leaseDuration) defer c.leaseTicker.Stop() @@ -303,7 +286,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { } } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) runLoop() { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) runLoop() { defer c.wg.Done() c.report() @@ -321,7 +304,7 @@ func (c *multiNode[CHAIN_ID, RPC_CLIENT]) runLoop() { } } -func (c *multiNode[CHAIN_ID, RPC_CLIENT]) report() { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) report() { type nodeWithState struct { Node string State string diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 7218db1a06e..2f8aa6ff008 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -22,7 +22,7 @@ import ( type multiNodeRPCClient RPCClient[types.ID, types.Head[Hashable]] type testMultiNode struct { - *multiNode[types.ID, multiNodeRPCClient] + *MultiNode[types.ID, multiNodeRPCClient] } type multiNodeOpts struct { @@ -43,7 +43,7 @@ func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { result := NewMultiNode[types.ID, multiNodeRPCClient]( opts.logger, opts.selectionMode, opts.leaseDuration, opts.nodes, opts.sendonlys, opts.chainID, opts.chainFamily) return testMultiNode{ - result.(*multiNode[types.ID, multiNodeRPCClient]), + result, } } @@ -640,7 +640,7 @@ func TestMultiNode_SendTransaction(t *testing.T) { require.NoError(t, err) require.NoError(t, mn.Close()) err = mn.SendTransaction(tests.Context(t), nil) - require.EqualError(t, err, "aborted while broadcasting tx - multiNode is stopped: context canceled") + require.EqualError(t, err, "aborted while broadcasting tx - MultiNode is stopped: context canceled") }) t.Run("Returns error if there is no healthy primary nodes", func(t *testing.T) { mn := newStartedMultiNode(t, multiNodeOpts{ diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index fe2bd36a4b1..e3f7a5559b0 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -101,7 +101,7 @@ func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc } type chainClient struct { - multiNode commonclient.MultiNode[ + multiNode *commonclient.MultiNode[ *big.Int, ChainClientRPC, ] From b291867eedecac65d2431308265c017f87846592 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Wed, 12 Jun 2024 13:26:57 -0400 Subject: [PATCH 35/58] Add PoolChainInfoProvider --- common/client/mock_node_test.go | 5 ++ .../mock_pool_chain_info_provider_test.go | 70 +++++++++++++++++++ common/client/multi_node.go | 44 +++++++++--- common/client/multi_node_test.go | 4 +- common/client/node.go | 8 +++ common/client/types.go | 12 ++++ 6 files changed, 132 insertions(+), 11 deletions(-) create mode 100644 common/client/mock_pool_chain_info_provider_test.go diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go index 8e669391b30..4cf399ddffb 100644 --- a/common/client/mock_node_test.go +++ b/common/client/mock_node_test.go @@ -104,6 +104,11 @@ func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) RPC() RPC_CLIENT { return r0 } +// SetPoolChainInfoProvider provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) SetPoolChainInfoProvider(_a0 PoolChainInfoProvider) { + _m.Called(_a0) +} + // Start provides a mock function with given fields: _a0 func (_m *mockNode[CHAIN_ID, RPC_CLIENT]) Start(_a0 context.Context) error { ret := _m.Called(_a0) diff --git a/common/client/mock_pool_chain_info_provider_test.go b/common/client/mock_pool_chain_info_provider_test.go new file mode 100644 index 00000000000..563641f701d --- /dev/null +++ b/common/client/mock_pool_chain_info_provider_test.go @@ -0,0 +1,70 @@ +// Code generated by mockery v2.42.2. DO NOT EDIT. + +package client + +import mock "github.com/stretchr/testify/mock" + +// mockPoolChainInfoProvider is an autogenerated mock type for the PoolChainInfoProvider type +type mockPoolChainInfoProvider struct { + mock.Mock +} + +// HighestChainInfo provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) HighestChainInfo() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestChainInfo") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// LatestChainInfo provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) LatestChainInfo() (int, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestChainInfo") + } + + var r0 int + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (int, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// newMockPoolChainInfoProvider creates a new instance of mockPoolChainInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockPoolChainInfoProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *mockPoolChainInfoProvider { + mock := &mockPoolChainInfoProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go index bce7c87d8f3..28b56910352 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -134,6 +134,39 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { return states } +// LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being marked as out-of-sync. +// Return highest ChainInfo most recently received by the alive nodes. +// E.g. If Node A's the most recent block is 10 and highest 15 and for Node B it's - 12 and 14. This method will return 12. +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) LatestChainInfo() (int, ChainInfo) { + var nLiveNodes int + ch := ChainInfo{ + BlockDifficulty: big.NewInt(0), + } + for _, n := range c.primaryNodes { + if s, nodeChainInfo := n.StateAndLatest(); s == NodeStateAlive { + nLiveNodes++ + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.LatestFinalizedBlock = max(ch.LatestFinalizedBlock, nodeChainInfo.LatestFinalizedBlock) + ch.BlockDifficulty = nodeChainInfo.BlockDifficulty + } + } + return nLiveNodes, ch +} + +// HighestChainInfo - returns highest ChainInfo ever observed by any node in the pool. +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { + ch := ChainInfo{ + BlockDifficulty: big.NewInt(0), + } + for _, n := range c.primaryNodes { + _, nodeChainInfo := n.StateAndLatest() + ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) + ch.LatestFinalizedBlock = max(ch.LatestFinalizedBlock, nodeChainInfo.LatestFinalizedBlock) + ch.BlockDifficulty = nodeChainInfo.BlockDifficulty + } + return ch +} + // Dial starts every node in the pool // // Nodes handle their own redialing and runloops, so this function does not @@ -148,16 +181,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { if n.ConfiguredChainID().String() != c.chainID.String() { return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) } - /* TODO: Dmytro's PR on local finality handles this better. - rawNode, ok := n.(*node[CHAIN_ID, *evmtypes.Head, RPC_CLIENT]) - if ok { - // This is a bit hacky but it allows the node to be aware of - // pool state and prevent certain state transitions that might - // otherwise leave no primaryNodes available. It is better to have one - // node in a degraded state than no primaryNodes at all. - rawNode.nLiveNodes = c.nLiveNodes - } - */ + n.SetPoolChainInfoProvider(c) // node will handle its own redialing and automatic recovery if err := ms.Start(ctx, n); err != nil { return err diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 2f8aa6ff008..3981e05a3cc 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -58,6 +58,7 @@ func newNodeWithState(t *testing.T, chainID types.ID, state NodeState) *mockNode node.On("Close").Return(nil).Once() node.On("State").Return(state).Maybe() node.On("String").Return(fmt.Sprintf("healthy_node_%d", rand.Int())).Maybe() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() return node } func TestMultiNode_Dial(t *testing.T) { @@ -98,6 +99,7 @@ func TestMultiNode_Dial(t *testing.T) { node.On("ConfiguredChainID").Return(chainID).Once() expectedError := errors.New("failed to start node") node.On("Start", mock.Anything).Return(expectedError).Once() + node.On("SetPoolChainInfoProvider", mock.Anything).Once() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, chainID: chainID, @@ -115,6 +117,7 @@ func TestMultiNode_Dial(t *testing.T) { node2.On("ConfiguredChainID").Return(chainID).Once() expectedError := errors.New("failed to start node") node2.On("Start", mock.Anything).Return(expectedError).Once() + node2.On("SetPoolChainInfoProvider", mock.Anything).Once() mn := newTestMultiNode(t, multiNodeOpts{ selectionMode: NodeSelectionModeRoundRobin, @@ -270,7 +273,6 @@ func TestMultiNode_CheckLease(t *testing.T) { t.Parallel() chainID := types.RandomID() node := newHealthyNode(t, chainID) - //node.On("SubscribersCount").Return(int32(2)) node.On("UnsubscribeAll") bestNode := newHealthyNode(t, chainID) nodeSelector := newMockNodeSelector[types.ID, multiNodeRPCClient](t) diff --git a/common/client/node.go b/common/client/node.go index 593665bf970..edb05cd9a12 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -71,6 +71,7 @@ type Node[ State() NodeState // StateAndLatest returns health state with the latest received block number & total difficulty. StateAndLatest() (NodeState, ChainInfo) + SetPoolChainInfoProvider(PoolChainInfoProvider) // Name is a unique identifier for this node. Name() string // String - returns string representation of the node, useful for debugging (name + URLS used to connect to the RPC) @@ -110,6 +111,9 @@ type node[ stateMu sync.RWMutex // protects state* fields state NodeState + + poolInfoProvider PoolChainInfoProvider + // Each node is tracking the last received head number and total difficulty stateLatestBlockNumber int64 stateLatestTotalDifficulty *big.Int @@ -173,6 +177,10 @@ func NewNode[ return n } +func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) SetPoolChainInfoProvider(poolInfoProvider PoolChainInfoProvider) { + n.poolInfoProvider = poolInfoProvider +} + func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { s := fmt.Sprintf("(%s)%s:%s", Primary.String(), n.name, n.ws.String()) if n.http != nil { diff --git a/common/client/types.go b/common/client/types.go index 74b9408e475..2dbc1d568a5 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -10,6 +10,18 @@ import ( "github.com/smartcontractkit/chainlink/v2/common/types" ) +// PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo +// +//go:generate mockery --quiet --name PoolChainInfoProvider --structname mockPoolChainInfoProvider --filename "mock_pool_chain_info_provider_test.go" --inpackage --case=underscore +type PoolChainInfoProvider interface { + // LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being. + // Return highest latest ChainInfo within the alive nodes. E.g. most recent block number and highest block number + // observed by Node A are 10 and 15; Node B - 12 and 14. This method will return 12. + LatestChainInfo() (int, ChainInfo) + // HighestChainInfo - returns highest ChainInfo ever observed by any node in the pool. + HighestChainInfo() ChainInfo +} + // RPCClient includes all the necessary generalized RPC methods along with any additional chain-specific methods. // //go:generate mockery --quiet --name RPCClient --structname MockRPCClient --filename "mock_rpc_client_test.go" --inpackage --case=underscore From f7425d92d7a1098aa53a838ddc2261691d30b2d2 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 09:57:03 -0400 Subject: [PATCH 36/58] Setup SendOnly nodes --- core/chains/evm/client/evm_client.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index e8207e25088..6a36b8567a2 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -10,7 +10,6 @@ import ( evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/chaintype" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" - evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ) func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node, chainType chaintype.ChainType) Client { @@ -18,16 +17,19 @@ func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, cli var primaries []commonclient.Node[*big.Int, ChainClientRPC] var sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC] for i, node := range nodes { - rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, - commonclient.Secondary) - newNode := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC](cfg, chainCfg, - lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, - rpc, "EVM") - if node.SendOnly != nil && *node.SendOnly { - sendonlys = append(sendonlys, newNode) + rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, + commonclient.Secondary) + sendonly := commonclient.NewSendOnlyNode(lggr, (url.URL)(*node.HTTPURL), + *node.Name, chainID, rpc) + sendonlys = append(sendonlys, sendonly) } else { - primaries = append(primaries, newNode) + rpc := NewRPCClient(cfg, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), + chainID, commonclient.Primary) + primaryNode := commonclient.NewNode(cfg, chainCfg, + lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, + rpc, "EVM") + primaries = append(primaries, primaryNode) } } From cd3fdc97331dc395435108d5b5dc15aa9228fa4e Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 12:57:27 -0400 Subject: [PATCH 37/58] Test empty context --- common/client/multi_node.go | 2 ++ common/client/node.go | 2 ++ core/chains/evm/client/chain_client.go | 2 ++ 3 files changed, 6 insertions(+) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 28b56910352..3f3bcca1ceb 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -172,6 +172,8 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { + fmt.Println("MULTINODE DIAL") + ctx = context.Background() // TODO: remove this line return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) diff --git a/common/client/node.go b/common/client/node.go index edb05cd9a12..0ba84c4984c 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -244,6 +244,8 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Start(startCtx context.Context) error // Node lifecycle is synchronous: only one goroutine should be running at a // time. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { + fmt.Println("NODE START") + startCtx = context.Background() // TODO: remove this line if n.state != NodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", n.state)) } diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index e3f7a5559b0..712dd1ae0d0 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -2,6 +2,7 @@ package client import ( "context" + "fmt" "math/big" "sync" "time" @@ -258,6 +259,7 @@ func (c *chainClient) ConfiguredChainID() *big.Int { } func (c *chainClient) Dial(ctx context.Context) error { + fmt.Println("CHAINCLIENT DIAL") // TODO: REMOVE return c.multiNode.Dial(ctx) } From 252c4882749808a248198dfe84c11338f10ed21d Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 13:30:38 -0400 Subject: [PATCH 38/58] Add err to log --- common/client/node_lifecycle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index bdecf6181f7..98862438c41 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -109,7 +109,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { headsC, sub, err := n.rpc.SubscribeToHeads(ctx) if err != nil { - lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) + lggr.Errorw("Initial subscribe for heads failed", "err", err, "nodeState", n.State()) n.declareUnreachable() return } From e50e1f3371e19b4478464d181354ce0e412e6894 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 14:17:59 -0400 Subject: [PATCH 39/58] Add rpc newHeads method --- common/client/multi_node.go | 1 - common/client/node.go | 1 - common/client/node_lifecycle.go | 2 -- core/chains/evm/client/chain_client.go | 2 -- core/chains/evm/client/chain_client_test.go | 4 ++- core/chains/evm/client/rpc_client.go | 4 ++- tools/bin/go_core_race_tests_updated | 36 --------------------- 7 files changed, 6 insertions(+), 44 deletions(-) delete mode 100755 tools/bin/go_core_race_tests_updated diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 3f3bcca1ceb..019807eb890 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -172,7 +172,6 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { - fmt.Println("MULTINODE DIAL") ctx = context.Background() // TODO: remove this line return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { diff --git a/common/client/node.go b/common/client/node.go index 0ba84c4984c..d3299ba7143 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -244,7 +244,6 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Start(startCtx context.Context) error // Node lifecycle is synchronous: only one goroutine should be running at a // time. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { - fmt.Println("NODE START") startCtx = context.Background() // TODO: remove this line if n.state != NodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", n.state)) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 98862438c41..0a16c83107d 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -74,8 +74,6 @@ const ( msgDegradedState = "Chainlink is now operating in a degraded state and urgent action is required to resolve the issue" ) -// const rpcSubscriptionMethodNewHeads = "newHeads" - // Node is a FSM // Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. // Only one loop must run at a time. diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 712dd1ae0d0..e3f7a5559b0 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -2,7 +2,6 @@ package client import ( "context" - "fmt" "math/big" "sync" "time" @@ -259,7 +258,6 @@ func (c *chainClient) ConfiguredChainID() *big.Int { } func (c *chainClient) Dial(ctx context.Context) error { - fmt.Println("CHAINCLIENT DIAL") // TODO: REMOVE return c.multiNode.Dial(ctx) } diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 5addd0a5bfc..2a050aa1472 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -704,6 +704,7 @@ func (x *sendTxService) SendRawTransaction(ctx context.Context, signRawTx hexuti x.sentCount.Add(1) return nil } +*/ func TestEthClient_SubscribeNewHead(t *testing.T) { t.Parallel() @@ -727,10 +728,12 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { ethClient := mustNewChainClientWithChainID(t, wsURL, chainId) err := ethClient.Dial(tests.Context(t)) + fmt.Println("DIALLED!!") require.NoError(t, err) headCh, sub, err := ethClient.SubscribeNewHead(ctx) require.NoError(t, err) + fmt.Println("SUBSCRIBED!!") select { case err := <-sub.Err(): @@ -743,7 +746,6 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { } sub.Unsubscribe() } -*/ func newMockRpc(t *testing.T) *client.MockChainClientRPC { mockRpc := client.NewMockChainClientRPC(t) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index ca7c3118464..fc427b2993a 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -117,6 +117,8 @@ var ( }, []string{"evmChainID", "nodeName", "rpcHost", "isSendOnly", "success", "rpcCallName"}) ) +const rpcSubscriptionMethodNewHeads = "newHeads" + // RPCClient includes all the necessary generalized RPC methods along with any additional chain-specific methods. // //go:generate mockery --quiet --name RPCClient --output ./mocks --case=underscore @@ -215,7 +217,7 @@ func NewRPCClient( func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { channel := make(chan *evmtypes.Head) - sub, err := r.subscribe(ctx, channel) + sub, err := r.subscribe(ctx, channel, rpcSubscriptionMethodNewHeads) return channel, sub, err } diff --git a/tools/bin/go_core_race_tests_updated b/tools/bin/go_core_race_tests_updated deleted file mode 100755 index 55b9182a8e9..00000000000 --- a/tools/bin/go_core_race_tests_updated +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -set -ex - -OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} -USE_TEE="${USE_TEE:-true}" -TIMEOUT="${TIMEOUT:-30s}" -COUNT="${COUNT:-10}" -GO_LDFLAGS=$(bash tools/bin/ldflags) - -use_tee() { - if [ "$USE_TEE" = "true" ]; then - tee "$@" - else - cat > "$@" - fi -} - -# Run the tests with the race detector enabled, silencing the test output -GORACE="log_path=$PWD/race" go test -json -race -ldflags "$GO_LDFLAGS" -shuffle on -timeout "$TIMEOUT" -count "$COUNT" $1 > /dev/null | use_tee "$OUTPUT_FILE" -EXITCODE=${PIPESTATUS[0]} - -# Fail if any race logs are present and display the race logs -if ls race.* &>/dev/null -then - echo "Race(s) detected:" - cat race.* - exit 1 -fi - -# Exit with the appropriate exit code -if test $EXITCODE -gt 1 -then - exit $EXITCODE -else - exit 0 -fi From caa83e6ebb76ab935c6e6b45e894b2b278966c1c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 14:39:15 -0400 Subject: [PATCH 40/58] Fix context --- common/client/multi_node.go | 1 - common/client/node.go | 1 - core/chains/evm/client/chain_client_test.go | 2 -- 3 files changed, 4 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 019807eb890..28b56910352 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -172,7 +172,6 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { - ctx = context.Background() // TODO: remove this line return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) diff --git a/common/client/node.go b/common/client/node.go index d3299ba7143..edb05cd9a12 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -244,7 +244,6 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) Start(startCtx context.Context) error // Node lifecycle is synchronous: only one goroutine should be running at a // time. func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) start(startCtx context.Context) { - startCtx = context.Background() // TODO: remove this line if n.state != NodeStateUndialed { panic(fmt.Sprintf("cannot dial node with state %v", n.state)) } diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 2a050aa1472..ef21c303a37 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -728,12 +728,10 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { ethClient := mustNewChainClientWithChainID(t, wsURL, chainId) err := ethClient.Dial(tests.Context(t)) - fmt.Println("DIALLED!!") require.NoError(t, err) headCh, sub, err := ethClient.SubscribeNewHead(ctx) require.NoError(t, err) - fmt.Println("SUBSCRIBED!!") select { case err := <-sub.Err(): From 7cd64ef1217db0af0144dfcf1328f472db941d12 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 15:41:57 -0400 Subject: [PATCH 41/58] Changeset --- .changeset/orange-feet-share.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .changeset/orange-feet-share.md diff --git a/.changeset/orange-feet-share.md b/.changeset/orange-feet-share.md new file mode 100644 index 00000000000..a2c050e2c51 --- /dev/null +++ b/.changeset/orange-feet-share.md @@ -0,0 +1,8 @@ +--- +"chainlink": minor +--- + +Implemented new EVM Multinode design. The Multinode is now called by chain clients to retrieve the best healthy RPC rather than performing RPC calls directly. +Multinode performs verious health checks on RPCs, and in turn increases reliability. +This new EVM Multinode design will also be implemented for non-EVMs chains in the future. +#updated #changed From 511a7a21a9c616a5579c871387e72f31dda56404 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Tue, 18 Jun 2024 16:00:11 -0400 Subject: [PATCH 42/58] Remove unused mocks --- common/client/mock_node_client_test.go | 248 ------------------------- common/client/types.go | 2 - 2 files changed, 250 deletions(-) delete mode 100644 common/client/mock_node_client_test.go diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go deleted file mode 100644 index ec83158a5ff..00000000000 --- a/common/client/mock_node_client_test.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by mockery v2.42.2. DO NOT EDIT. - -package client - -import ( - context "context" - - types "github.com/smartcontractkit/chainlink/v2/common/types" - mock "github.com/stretchr/testify/mock" -) - -// mockNodeClient is an autogenerated mock type for the NodeClient type -type mockNodeClient[CHAIN_ID types.ID, HEAD Head] struct { - mock.Mock -} - -// ChainID provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ChainID") - } - - var r0 CHAIN_ID - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(CHAIN_ID) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientVersion provides a mock function with given fields: _a0 -func (_m *mockNodeClient[CHAIN_ID, HEAD]) ClientVersion(_a0 context.Context) (string, error) { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for ClientVersion") - } - - var r0 string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(context.Context) string); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Close provides a mock function with given fields: -func (_m *mockNodeClient[CHAIN_ID, HEAD]) Close() { - _m.Called() -} - -// Dial provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Dial") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DialHTTP provides a mock function with given fields: -func (_m *mockNodeClient[CHAIN_ID, HEAD]) DialHTTP() error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for DialHTTP") - } - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DisconnectAll provides a mock function with given fields: -func (_m *mockNodeClient[CHAIN_ID, HEAD]) DisconnectAll() { - _m.Called() -} - -// IsSyncing provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for IsSyncing") - } - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) bool); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *mockNodeClient[CHAIN_ID, HEAD]) LatestFinalizedBlock(ctx context.Context) (HEAD, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for LatestFinalizedBlock") - } - - var r0 HEAD - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (HEAD, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) HEAD); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(HEAD) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SetAliveLoopSub provides a mock function with given fields: _a0 -func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription) { - _m.Called(_a0) -} - -// Subscribe provides a mock function with given fields: ctx, channel, args -func (_m *mockNodeClient[CHAIN_ID, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) { - var _ca []interface{} - _ca = append(_ca, ctx, channel) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 types.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok { - return rf(ctx, channel, args...) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok { - r0 = rf(ctx, channel, args...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok { - r1 = rf(ctx, channel, args...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SubscribersCount provides a mock function with given fields: -func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribersCount() int32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for SubscribersCount") - } - - var r0 int32 - if rf, ok := ret.Get(0).(func() int32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int32) - } - - return r0 -} - -// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: -func (_m *mockNodeClient[CHAIN_ID, HEAD]) UnsubscribeAllExceptAliveLoop() { - _m.Called() -} - -// newMockNodeClient creates a new instance of mockNodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockNodeClient[CHAIN_ID types.ID, HEAD Head](t interface { - mock.TestingT - Cleanup(func()) -}) *mockNodeClient[CHAIN_ID, HEAD] { - mock := &mockNodeClient[CHAIN_ID, HEAD]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/common/client/types.go b/common/client/types.go index 2dbc1d568a5..0494e2c433e 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -94,8 +94,6 @@ type Head interface { } // NodeClient includes all the necessary RPC methods required by a node. -// -//go:generate mockery --quiet --name NodeClient --structname mockNodeClient --filename "mock_node_client_test.go" --inpackage --case=underscore type NodeClient[ CHAIN_ID types.ID, HEAD Head, From 8886d0c6f934e6aeeb7669bcd3f377074c7c8b23 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 27 Jun 2024 16:09:01 -0400 Subject: [PATCH 43/58] Address comments --- .changeset/orange-feet-share.md | 2 +- common/client/multi_node.go | 13 ++++++----- common/client/multi_node_test.go | 24 ++++++++++----------- common/client/node.go | 2 -- common/client/node_lifecycle_test.go | 10 +-------- core/chains/evm/client/chain_client.go | 19 ++-------------- core/chains/evm/client/chain_client_test.go | 4 +--- 7 files changed, 23 insertions(+), 51 deletions(-) diff --git a/.changeset/orange-feet-share.md b/.changeset/orange-feet-share.md index a2c050e2c51..1df7e85ca9e 100644 --- a/.changeset/orange-feet-share.md +++ b/.changeset/orange-feet-share.md @@ -5,4 +5,4 @@ Implemented new EVM Multinode design. The Multinode is now called by chain clients to retrieve the best healthy RPC rather than performing RPC calls directly. Multinode performs verious health checks on RPCs, and in turn increases reliability. This new EVM Multinode design will also be implemented for non-EVMs chains in the future. -#updated #changed +#updated #changed #internal diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 28b56910352..9824f52e4fc 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -103,12 +103,11 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx if n.State() != NodeStateAlive { continue } - if do(ctx, n.RPC(), false) { - callsCompleted++ - } + do(ctx, n.RPC(), false) + callsCompleted++ } if callsCompleted == 0 { - return fmt.Errorf("no calls were completed") + return ErroringNodeError } for _, n := range c.sendOnlyNodes { @@ -118,7 +117,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx if n.State() != NodeStateAlive { continue } - do(ctx, n.RPC(), false) + do(ctx, n.RPC(), true) } return nil } @@ -167,11 +166,11 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { return ch } -// Dial starts every node in the pool +// Start starts every node in the pool // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Dial(ctx context.Context) error { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Start(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 3981e05a3cc..1ff61c7fa83 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -73,7 +73,7 @@ func TestMultiNode_Dial(t *testing.T) { selectionMode: NodeSelectionModeRoundRobin, chainID: types.RandomID(), }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, fmt.Sprintf("no available nodes for chain %s", mn.chainID.String())) }) t.Run("Fails with wrong node's chainID", func(t *testing.T) { @@ -89,7 +89,7 @@ func TestMultiNode_Dial(t *testing.T) { chainID: multiNodeChainID, nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID)) }) t.Run("Fails if node fails", func(t *testing.T) { @@ -105,7 +105,7 @@ func TestMultiNode_Dial(t *testing.T) { chainID: chainID, nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, expectedError.Error()) }) @@ -124,7 +124,7 @@ func TestMultiNode_Dial(t *testing.T) { chainID: chainID, nodes: []Node[types.ID, multiNodeRPCClient]{node1, node2}, }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, expectedError.Error()) }) t.Run("Fails with wrong send only node's chainID", func(t *testing.T) { @@ -143,7 +143,7 @@ func TestMultiNode_Dial(t *testing.T) { nodes: []Node[types.ID, multiNodeRPCClient]{node}, sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly}, }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, fmt.Sprintf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", sendOnlyName, sendOnlyChainID, mn.chainID)) }) @@ -170,7 +170,7 @@ func TestMultiNode_Dial(t *testing.T) { nodes: []Node[types.ID, multiNodeRPCClient]{node}, sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, }) - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) assert.EqualError(t, err, expectedError.Error()) }) t.Run("Starts successfully with healthy nodes", func(t *testing.T) { @@ -184,7 +184,7 @@ func TestMultiNode_Dial(t *testing.T) { sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, }) defer func() { assert.NoError(t, mn.Close()) }() - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) selectedNode, err := mn.selectNode() require.NoError(t, err) @@ -208,7 +208,7 @@ func TestMultiNode_Report(t *testing.T) { }) mn.reportInterval = tests.TestInterval defer func() { assert.NoError(t, mn.Close()) }() - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) tests.AssertLogCountEventually(t, observedLogs, "At least one primary node is dead: 1/2 nodes are alive", 2) }) @@ -225,7 +225,7 @@ func TestMultiNode_Report(t *testing.T) { }) mn.reportInterval = tests.TestInterval defer func() { assert.NoError(t, mn.Close()) }() - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) tests.AssertLogCountEventually(t, observedLogs, "no primary nodes available: 0/1 nodes are alive", 2) err = mn.Healthy() @@ -248,7 +248,7 @@ func TestMultiNode_CheckLease(t *testing.T) { nodes: []Node[types.ID, multiNodeRPCClient]{node}, }) defer func() { assert.NoError(t, mn.Close()) }() - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") }) @@ -265,7 +265,7 @@ func TestMultiNode_CheckLease(t *testing.T) { leaseDuration: 0, }) defer func() { assert.NoError(t, mn.Close()) }() - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") }) @@ -287,7 +287,7 @@ func TestMultiNode_CheckLease(t *testing.T) { }) defer func() { assert.NoError(t, mn.Close()) }() mn.nodeSelector = nodeSelector - err := mn.Dial(tests.Context(t)) + err := mn.Start(tests.Context(t)) require.NoError(t, err) tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("Switching to best node from %q to %q", node.String(), bestNode.String())) tests.AssertEventually(t, func() bool { diff --git a/common/client/node.go b/common/client/node.go index edb05cd9a12..cb06052557c 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -190,8 +190,6 @@ func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) String() string { } func (n *node[CHAIN_ID, HEAD, RPC_CLIENT]) ConfiguredChainID() (chainID CHAIN_ID) { - n.stateMu.RLock() - defer n.stateMu.RUnlock() return n.chainID } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index e8030c4c1c7..e516519ea7d 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -49,7 +49,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() - rpc.On("UnsubscribeAllExcept", nil, nil) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -74,8 +73,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() - // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -1143,6 +1140,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { newNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) + opts.rpc.On("UnsubscribeAllExcept", nil, nil) opts.rpc.On("Close").Return(nil).Once() return node @@ -1161,7 +1159,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -1186,7 +1183,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1208,7 +1204,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1234,7 +1229,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check syncing status")) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil, nil) // fail to redial to stay in unreachable state rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")) err := node.Start(tests.Context(t)) @@ -1259,7 +1253,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", nil, nil) err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertEventually(t, func() bool { @@ -1459,7 +1452,6 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.config.nodeIsSyncingEnabled = true node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() - opts.rpc.On("UnsubscribeAllExcept", nil, nil) node.setState(NodeStateDialed) return node diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index e3f7a5559b0..5214fd54fc3 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -32,8 +32,6 @@ type Client interface { Close() // ChainID locally stored for quick access ConfiguredChainID() *big.Int - // ChainID RPC call - ChainID() (*big.Int, error) // NodeStates returns a map of node Name->node state // It might be nil or empty, e.g. for mock clients etc @@ -231,16 +229,6 @@ func (c *chainClient) PendingCallContract(ctx context.Context, msg ethereum.Call return rpc.PendingCallContract(ctx, msg) } -// TODO-1663: change this to actual ChainID() call once client.go is deprecated. -func (c *chainClient) ChainID() (*big.Int, error) { - rpc, err := c.multiNode.SelectRPC() - if err != nil { - return nil, err - } - // TODO: Progagate context - return rpc.ChainID(context.Background()) -} - func (c *chainClient) Close() { _ = c.multiNode.Close() } @@ -258,7 +246,7 @@ func (c *chainClient) ConfiguredChainID() *big.Int { } func (c *chainClient) Dial(ctx context.Context) error { - return c.multiNode.Dial(ctx) + return c.multiNode.Start(ctx) } func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { @@ -390,10 +378,7 @@ func (c *chainClient) SubscribeNewHead(ctx context.Context) (<-chan *evmtypes.He if err != nil { return nil, nil, err } - chainID, err := c.ChainID() - if err != nil { - return nil, nil, err - } + chainID := c.ConfiguredChainID() forwardCh, csf := newChainIDSubForwarder(chainID, ch) err = csf.start(sub, err) if err != nil { diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index ef21c303a37..822604e5ee5 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -828,11 +828,9 @@ func TestEthClient_ErroringClient(t *testing.T) { _, err = erroringClient.CallContract(ctx, ethereum.CallMsg{}, nil) require.Equal(t, err, commonclient.ErroringNodeError) - // TODO-1663: test actual ChainID() call once client.go is deprecated. - id, err := erroringClient.ChainID() + id := erroringClient.ConfiguredChainID() var expected *big.Int require.Equal(t, id, expected) - require.Equal(t, err, commonclient.ErroringNodeError) _, err = erroringClient.CodeAt(ctx, common.Address{}, nil) require.Equal(t, err, commonclient.ErroringNodeError) From a83d34225aee7d46966387e1c4a86639cee8fdde Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 28 Jun 2024 11:36:08 -0400 Subject: [PATCH 44/58] Remove ChainClientRPC interface --- common/client/multi_node.go | 2 +- common/client/node_lifecycle_test.go | 6 +- core/chains/evm/client/chain_client.go | 13 +- core/chains/evm/client/chain_client_test.go | 139 +- core/chains/evm/client/evm_client.go | 4 +- core/chains/evm/client/helpers_test.go | 14 +- .../evm/client/mock_chain_client_rpc_test.go | 1160 ----------------- core/chains/evm/client/rpc_client.go | 43 +- 8 files changed, 109 insertions(+), 1272 deletions(-) delete mode 100644 core/chains/evm/client/mock_chain_client_rpc_test.go diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 9824f52e4fc..892b7788ed4 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -94,7 +94,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) ChainID() CHAIN_ID { return c.chainID } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool) bool) error { +func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool)) error { callsCompleted := 0 for _, n := range c.primaryNodes { if ctx.Err() != nil { diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index e516519ea7d..f0afcd00f73 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -28,6 +28,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() + opts.rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() node.setState(NodeStateDialed) return node @@ -164,7 +165,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { pollError := errors.New("failed to get ClientVersion") rpc.On("Ping", mock.Anything).Return(pollError) // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -344,7 +344,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() // disconnects all on transfer to unreachable or outOfSync - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -1140,7 +1139,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { newNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) - opts.rpc.On("UnsubscribeAllExcept", nil, nil) + opts.rpc.On("UnsubscribeAllExcept", nil, nil).Maybe() opts.rpc.On("Close").Return(nil).Once() return node @@ -1452,6 +1451,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { opts.config.nodeIsSyncingEnabled = true node := newTestNode(t, opts) opts.rpc.On("Close").Return(nil).Once() + opts.rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() node.setState(NodeStateDialed) return node diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 5214fd54fc3..a0090efa0f6 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -101,7 +101,7 @@ func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc type chainClient struct { multiNode *commonclient.MultiNode[ *big.Int, - ChainClientRPC, + *RpcClient, ] logger logger.SugaredLogger chainType chaintype.ChainType @@ -112,8 +112,8 @@ func NewChainClient( lggr logger.Logger, selectionMode string, leaseDuration time.Duration, - nodes []commonclient.Node[*big.Int, ChainClientRPC], - sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC], + nodes []commonclient.Node[*big.Int, *RpcClient], + sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient], chainID *big.Int, clientErrors evmconfig.ClientErrors, ) Client { @@ -163,13 +163,13 @@ func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchE return selectionErr } - doFunc := func(ctx context.Context, rpc ChainClientRPC, isSendOnly bool) bool { + doFunc := func(ctx context.Context, rpc *RpcClient, isSendOnly bool) { if rpc == main { - return true + return } // Parallel call made to all other nodes with ignored return value wg.Add(1) - go func(rpc ChainClientRPC) { + go func(rpc *RpcClient) { defer wg.Done() err := rpc.BatchCallContext(ctx, b) if err != nil { @@ -178,7 +178,6 @@ func (c *chainClient) BatchCallContextAll(ctx context.Context, b []ethrpc.BatchE c.logger.Debug("Secondary node BatchCallContext success") } }(rpc) - return true } if err := c.multiNode.DoAll(ctx, doFunc); err != nil { diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 822604e5ee5..24c16b28752 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "encoding/json" - "errors" "fmt" "math/big" "net/url" @@ -16,10 +15,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" pkgerrors "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -745,60 +742,103 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { sub.Unsubscribe() } -func newMockRpc(t *testing.T) *client.MockChainClientRPC { - mockRpc := client.NewMockChainClientRPC(t) - mockRpc.On("Dial", mock.Anything).Return(nil).Once() - mockRpc.On("Close").Return(nil).Once() - mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() - // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes - mockRpc.On("Subscribe", mock.Anything, mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() - mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() - sub := client.NewMockSubscription() - mockRpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan *evmtypes.Head), sub, nil).Maybe() - mockRpc.On("Unsubscribe", mock.Anything).Return(nil).Maybe() - return mockRpc -} - -func TestChainClient_BatchCallContext(t *testing.T) { +/* +func TestEthClient_BatchCallContext(t *testing.T) { t.Parallel() - t.Run("batch requests return errors", func(t *testing.T) { - ctx := tests.Context(t) - rpcError := errors.New("something went wrong") - blockNumResp := "" - blockNum := hexutil.EncodeBig(big.NewInt(42)) - b := []rpc.BatchElem{ - { - Method: "eth_getBlockByNumber", - Args: []interface{}{blockNum, true}, - Result: &types.Block{}, - }, - { - Method: "eth_blockNumber", - Result: &blockNumResp, - }, + // Set up the WebSocket server + wsServer := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + return + case "eth_unsubscribe": + resp.Result = "true" + return } + return + }) + defer wsServer.Close() + wsURL := wsServer.WSURL().String() + + // Set up the HTTP mock server + handler := func(w http.ResponseWriter, r *http.Request) { + var requests []rpc.BatchElem + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&requests) + require.NoError(t, err) - mockRpc := newMockRpc(t) - mockRpc.On("BatchCallContext", mock.Anything, b).Run(func(args mock.Arguments) { - reqs := args.Get(1).([]rpc.BatchElem) - for i := 0; i < len(reqs); i++ { - elem := &reqs[i] - elem.Error = rpcError + responses := make([]map[string]interface{}, len(requests)) + for i, req := range requests { + resp := map[string]interface{}{ + "jsonrpc": "2.0", + "id": req.ID, } - }).Return(nil).Maybe() - client := client.NewChainClientWithMockedRpc(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID, mockRpc) - err := client.Dial(ctx) - require.NoError(t, err) + switch req.Method { + case "eth_getBlockByNumber": + block := map[string]interface{}{ + "number": "0x2a", // 42 in hex + "hash": "0x123", + "transactions": []interface{}{}, + "uncles": []interface{}{}, + } + resp["result"] = block + case "eth_blockNumber": + resp["result"] = "0x2a" // 42 in hex + default: + resp["error"] = map[string]interface{}{ + "code": -32601, + "message": "Method not found", + } + } + responses[i] = resp + } - err = client.BatchCallContext(ctx, b) + encoder := json.NewEncoder(w) + err = encoder.Encode(responses) require.NoError(t, err) - for _, elem := range b { - require.ErrorIs(t, rpcError, elem.Error) - } - }) + } + + httpServer := httptest.NewServer(http.HandlerFunc(handler)) + defer httpServer.Close() + + parsedHttpURL, err := url.Parse(httpServer.URL) + require.NoError(t, err) + + // Create a client and connect to the mock servers + cfg := client.TestNodePoolConfig{ + NodeSelectionMode: commonclient.NodeSelectionModeRoundRobin, + } + c, err := client.NewChainClientWithTestNode(t, cfg, time.Second*0, cfg.NodeLeaseDuration, wsURL, parsedHttpURL, nil, 42, testutils.FixtureChainID) + require.NoError(t, err) + require.NoError(t, c.Dial(context.Background())) + + // Prepare batch requests + blockNum := hexutil.EncodeBig(big.NewInt(42)) + batch := []rpc.BatchElem{ + { + Method: "eth_getBlockByNumber", + Args: []interface{}{blockNum, false}, + Result: new(types.Block), + }, + { + Method: "eth_blockNumber", + Result: new(hexutil.Big), + }, + } + + // Execute batch call + err = c.BatchCallContext(context.Background(), batch) + require.NoError(t, err) + + // Verify responses + block := batch[0].Result.(*types.Block) + assert.Equal(t, big.NewInt(42), block.Number()) + assert.Equal(t, common.HexToHash("0x123"), block.Hash()) + assert.Equal(t, big.NewInt(42), (*big.Int)(batch[1].Result.(*hexutil.Big))) } +*/ func TestEthClient_ErroringClient(t *testing.T) { t.Parallel() @@ -829,8 +869,7 @@ func TestEthClient_ErroringClient(t *testing.T) { require.Equal(t, err, commonclient.ErroringNodeError) id := erroringClient.ConfiguredChainID() - var expected *big.Int - require.Equal(t, id, expected) + require.Equal(t, id, big.NewInt(0)) _, err = erroringClient.CodeAt(ctx, common.Address{}, nil) require.Equal(t, err, commonclient.ErroringNodeError) diff --git a/core/chains/evm/client/evm_client.go b/core/chains/evm/client/evm_client.go index 6a36b8567a2..d0c61056d3f 100644 --- a/core/chains/evm/client/evm_client.go +++ b/core/chains/evm/client/evm_client.go @@ -14,8 +14,8 @@ import ( func NewEvmClient(cfg evmconfig.NodePool, chainCfg commonclient.ChainConfig, clientErrors evmconfig.ClientErrors, lggr logger.Logger, chainID *big.Int, nodes []*toml.Node, chainType chaintype.ChainType) Client { var empty url.URL - var primaries []commonclient.Node[*big.Int, ChainClientRPC] - var sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC] + var primaries []commonclient.Node[*big.Int, *RpcClient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] for i, node := range nodes { if node.SendOnly != nil && *node.SendOnly { rpc := NewRPCClient(cfg, lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 0d77f33e62f..4def4f9c881 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -134,18 +134,18 @@ func NewChainClientWithTestNode( } rpc := NewRPCClient(nodePoolCfg, lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) - n := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient]( nodeCfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, ChainClientRPC]{n} + primaries := []commonclient.Node[*big.Int, *RpcClient]{n} - var sendonlys []commonclient.SendOnlyNode[*big.Int, ChainClientRPC] + var sendonlys []commonclient.SendOnlyNode[*big.Int, *RpcClient] for i, u := range sendonlyRPCURLs { if u.Scheme != "http" && u.Scheme != "https" { return nil, pkgerrors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) } var empty url.URL rpc := NewRPCClient(nodePoolCfg, lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) - s := commonclient.NewSendOnlyNode[*big.Int, ChainClientRPC]( + s := commonclient.NewSendOnlyNode[*big.Int, *RpcClient]( lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) sendonlys = append(sendonlys, s) } @@ -176,7 +176,7 @@ func NewChainClientWithMockedRpc( leaseDuration time.Duration, noNewHeadsThreshold time.Duration, chainID *big.Int, - rpc ChainClientRPC, + rpc *RpcClient, ) Client { lggr := logger.Test(t) @@ -185,9 +185,9 @@ func NewChainClientWithMockedRpc( } parsed, _ := url.ParseRequestURI("ws://test") - n := commonclient.NewNode[*big.Int, *evmtypes.Head, ChainClientRPC]( + n := commonclient.NewNode[*big.Int, *evmtypes.Head, *RpcClient]( cfg, clientMocks.ChainConfig{NoNewHeadsThresholdVal: noNewHeadsThreshold}, lggr, *parsed, nil, "eth-primary-node-0", 1, chainID, 1, rpc, "EVM") - primaries := []commonclient.Node[*big.Int, ChainClientRPC]{n} + primaries := []commonclient.Node[*big.Int, *RpcClient]{n} clientErrors := NewTestClientErrors() c := NewChainClient(lggr, selectionMode, leaseDuration, primaries, nil, chainID, &clientErrors) t.Cleanup(c.Close) diff --git a/core/chains/evm/client/mock_chain_client_rpc_test.go b/core/chains/evm/client/mock_chain_client_rpc_test.go deleted file mode 100644 index fcf81c3dfb7..00000000000 --- a/core/chains/evm/client/mock_chain_client_rpc_test.go +++ /dev/null @@ -1,1160 +0,0 @@ -// Code generated by mockery v2.42.2. DO NOT EDIT. - -package client - -import ( - big "math/big" - - assets "github.com/smartcontractkit/chainlink-common/pkg/assets" - - common "github.com/ethereum/go-ethereum/common" - - commontypes "github.com/smartcontractkit/chainlink/v2/common/types" - - context "context" - - coretypes "github.com/ethereum/go-ethereum/core/types" - - ethereum "github.com/ethereum/go-ethereum" - - evmassets "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" - - mock "github.com/stretchr/testify/mock" - - rpc "github.com/ethereum/go-ethereum/rpc" - - types "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" -) - -// MockChainClientRPC is an autogenerated mock type for the ChainClientRPC type -type MockChainClientRPC struct { - mock.Mock -} - -// BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *MockChainClientRPC) BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) { - ret := _m.Called(ctx, accountAddress, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for BalanceAt") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (*big.Int, error)); ok { - return rf(ctx, accountAddress, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) *big.Int); ok { - r0 = rf(ctx, accountAddress, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, accountAddress, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BatchCallContext provides a mock function with given fields: ctx, b -func (_m *MockChainClientRPC) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { - ret := _m.Called(ctx, b) - - if len(ret) == 0 { - panic("no return value specified for BatchCallContext") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { - r0 = rf(ctx, b) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *MockChainClientRPC) BlockByHash(ctx context.Context, hash common.Hash) (*types.Head, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHash") - } - - var r0 *types.Head - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Head, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Head); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Head) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByHashGeth provides a mock function with given fields: ctx, hash -func (_m *MockChainClientRPC) BlockByHashGeth(ctx context.Context, hash common.Hash) (*coretypes.Block, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHashGeth") - } - - var r0 *coretypes.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Block, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Block); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByNumber provides a mock function with given fields: ctx, number -func (_m *MockChainClientRPC) BlockByNumber(ctx context.Context, number *big.Int) (*types.Head, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumber") - } - - var r0 *types.Head - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Head, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Head); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Head) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByNumberGeth provides a mock function with given fields: ctx, number -func (_m *MockChainClientRPC) BlockByNumberGeth(ctx context.Context, number *big.Int) (*coretypes.Block, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumberGeth") - } - - var r0 *coretypes.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Block, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Block); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CallContext provides a mock function with given fields: ctx, result, method, args -func (_m *MockChainClientRPC) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { - var _ca []interface{} - _ca = append(_ca, ctx, result, method) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for CallContext") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { - r0 = rf(ctx, result, method, args...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CallContract provides a mock function with given fields: ctx, msg, blockNumber -func (_m *MockChainClientRPC) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, msg, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) ([]byte, error)); ok { - return rf(ctx, msg, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) []byte); ok { - r0 = rf(ctx, msg, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, interface{}, *big.Int) error); ok { - r1 = rf(ctx, msg, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ChainID provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) ChainID(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ChainID") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientVersion provides a mock function with given fields: _a0 -func (_m *MockChainClientRPC) ClientVersion(_a0 context.Context) (string, error) { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for ClientVersion") - } - - var r0 string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(context.Context) string); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Close provides a mock function with given fields: -func (_m *MockChainClientRPC) Close() { - _m.Called() -} - -// CodeAt provides a mock function with given fields: ctx, account, blockNumber -func (_m *MockChainClientRPC) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, account, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { - return rf(ctx, account, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { - r0 = rf(ctx, account, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, account, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Dial provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) Dial(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Dial") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DialHTTP provides a mock function with given fields: -func (_m *MockChainClientRPC) DialHTTP() error { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for DialHTTP") - } - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DisconnectAll provides a mock function with given fields: -func (_m *MockChainClientRPC) DisconnectAll() { - _m.Called() -} - -// EstimateGas provides a mock function with given fields: ctx, call -func (_m *MockChainClientRPC) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { - ret := _m.Called(ctx, call) - - if len(ret) == 0 { - panic("no return value specified for EstimateGas") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, interface{}) (uint64, error)); ok { - return rf(ctx, call) - } - if rf, ok := ret.Get(0).(func(context.Context, interface{}) uint64); ok { - r0 = rf(ctx, call) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { - r1 = rf(ctx, call) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FilterEvents provides a mock function with given fields: ctx, query -func (_m *MockChainClientRPC) FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]coretypes.Log, error) { - ret := _m.Called(ctx, query) - - if len(ret) == 0 { - panic("no return value specified for FilterEvents") - } - - var r0 []coretypes.Log - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]coretypes.Log, error)); ok { - return rf(ctx, query) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []coretypes.Log); ok { - r0 = rf(ctx, query) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]coretypes.Log) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { - r1 = rf(ctx, query) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HeaderByHash provides a mock function with given fields: ctx, h -func (_m *MockChainClientRPC) HeaderByHash(ctx context.Context, h common.Hash) (*coretypes.Header, error) { - ret := _m.Called(ctx, h) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *coretypes.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Header, error)); ok { - return rf(ctx, h) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Header); ok { - r0 = rf(ctx, h) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, h) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HeaderByNumber provides a mock function with given fields: ctx, n -func (_m *MockChainClientRPC) HeaderByNumber(ctx context.Context, n *big.Int) (*coretypes.Header, error) { - ret := _m.Called(ctx, n) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *coretypes.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { - return rf(ctx, n) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { - r0 = rf(ctx, n) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, n) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsSyncing provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) IsSyncing(ctx context.Context) (bool, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for IsSyncing") - } - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) bool); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LINKBalance provides a mock function with given fields: ctx, accountAddress, linkAddress -func (_m *MockChainClientRPC) LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*assets.Link, error) { - ret := _m.Called(ctx, accountAddress, linkAddress) - - if len(ret) == 0 { - panic("no return value specified for LINKBalance") - } - - var r0 *assets.Link - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*assets.Link, error)); ok { - return rf(ctx, accountAddress, linkAddress) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *assets.Link); ok { - r0 = rf(ctx, accountAddress, linkAddress) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*assets.Link) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { - r1 = rf(ctx, accountAddress, linkAddress) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LatestBlockHeight provides a mock function with given fields: _a0 -func (_m *MockChainClientRPC) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for LatestBlockHeight") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LatestFinalizedBlock provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) LatestFinalizedBlock(ctx context.Context) (*types.Head, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for LatestFinalizedBlock") - } - - var r0 *types.Head - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*types.Head, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *types.Head); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Head) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingCallContract provides a mock function with given fields: ctx, msg -func (_m *MockChainClientRPC) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { - ret := _m.Called(ctx, msg) - - if len(ret) == 0 { - panic("no return value specified for PendingCallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, interface{}) ([]byte, error)); ok { - return rf(ctx, msg) - } - if rf, ok := ret.Get(0).(func(context.Context, interface{}) []byte); ok { - r0 = rf(ctx, msg) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { - r1 = rf(ctx, msg) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *MockChainClientRPC) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingCodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { - r0 = rf(ctx, account) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingSequenceAt provides a mock function with given fields: ctx, addr -func (_m *MockChainClientRPC) PendingSequenceAt(ctx context.Context, addr common.Address) (types.Nonce, error) { - ret := _m.Called(ctx, addr) - - if len(ret) == 0 { - panic("no return value specified for PendingSequenceAt") - } - - var r0 types.Nonce - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) (types.Nonce, error)); ok { - return rf(ctx, addr) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) types.Nonce); ok { - r0 = rf(ctx, addr) - } else { - r0 = ret.Get(0).(types.Nonce) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, addr) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Ping provides a mock function with given fields: _a0 -func (_m *MockChainClientRPC) Ping(_a0 context.Context) error { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for Ping") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress -func (_m *MockChainClientRPC) SendEmptyTransaction(ctx context.Context, newTxAttempt func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), seq types.Nonce, gasLimit uint32, fee *evmassets.Wei, fromAddress common.Address) (string, error) { - ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) - - if len(ret) == 0 { - panic("no return value specified for SendEmptyTransaction") - } - - var r0 string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) (string, error)); ok { - return rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) - } - if rf, ok := ret.Get(0).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) string); ok { - r0 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) - } else { - r0 = ret.Get(0).(string) - } - - if rf, ok := ret.Get(1).(func(context.Context, func(types.Nonce, uint32, *evmassets.Wei, common.Address) (interface{}, error), types.Nonce, uint32, *evmassets.Wei, common.Address) error); ok { - r1 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SendTransaction provides a mock function with given fields: ctx, tx -func (_m *MockChainClientRPC) SendTransaction(ctx context.Context, tx *coretypes.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber -func (_m *MockChainClientRPC) SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (types.Nonce, error) { - ret := _m.Called(ctx, accountAddress, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for SequenceAt") - } - - var r0 types.Nonce - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (types.Nonce, error)); ok { - return rf(ctx, accountAddress, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) types.Nonce); ok { - r0 = rf(ctx, accountAddress, blockNumber) - } else { - r0 = ret.Get(0).(types.Nonce) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, accountAddress, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SetAliveLoopSub provides a mock function with given fields: _a0 -func (_m *MockChainClientRPC) SetAliveLoopSub(_a0 commontypes.Subscription) { - _m.Called(_a0) -} - -// SimulateTransaction provides a mock function with given fields: ctx, tx -func (_m *MockChainClientRPC) SimulateTransaction(ctx context.Context, tx *coretypes.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SimulateTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *coretypes.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *MockChainClientRPC) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- coretypes.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- coretypes.Log) error); ok { - r1 = rf(ctx, q, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SubscribeToFinalizedHeads provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) SubscribeToFinalizedHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SubscribeToFinalizedHeads") - } - - var r0 <-chan *types.Head - var r1 commontypes.Subscription - var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan *types.Head) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { - r1 = rf(ctx) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(commontypes.Subscription) - } - } - - if rf, ok := ret.Get(2).(func(context.Context) error); ok { - r2 = rf(ctx) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// SubscribeToHeads provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) SubscribeToHeads(ctx context.Context) (<-chan *types.Head, commontypes.Subscription, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SubscribeToHeads") - } - - var r0 <-chan *types.Head - var r1 commontypes.Subscription - var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (<-chan *types.Head, commontypes.Subscription, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) <-chan *types.Head); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan *types.Head) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) commontypes.Subscription); ok { - r1 = rf(ctx) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(commontypes.Subscription) - } - } - - if rf, ok := ret.Get(2).(func(context.Context) error); ok { - r2 = rf(ctx) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// SubscribersCount provides a mock function with given fields: -func (_m *MockChainClientRPC) SubscribersCount() int32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for SubscribersCount") - } - - var r0 int32 - if rf, ok := ret.Get(0).(func() int32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int32) - } - - return r0 -} - -// SuggestGasPrice provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasPrice") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *MockChainClientRPC) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasTipCap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress -func (_m *MockChainClientRPC) TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) { - ret := _m.Called(ctx, accountAddress, tokenAddress) - - if len(ret) == 0 { - panic("no return value specified for TokenBalance") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*big.Int, error)); ok { - return rf(ctx, accountAddress, tokenAddress) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *big.Int); ok { - r0 = rf(ctx, accountAddress, tokenAddress) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { - r1 = rf(ctx, accountAddress, tokenAddress) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TransactionByHash provides a mock function with given fields: ctx, txHash -func (_m *MockChainClientRPC) TransactionByHash(ctx context.Context, txHash common.Hash) (*coretypes.Transaction, error) { - ret := _m.Called(ctx, txHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionByHash") - } - - var r0 *coretypes.Transaction - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Transaction, error)); ok { - return rf(ctx, txHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Transaction); ok { - r0 = rf(ctx, txHash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Transaction) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, txHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TransactionReceipt provides a mock function with given fields: ctx, txHash -func (_m *MockChainClientRPC) TransactionReceipt(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { - ret := _m.Called(ctx, txHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionReceipt") - } - - var r0 *coretypes.Receipt - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Receipt, error)); ok { - return rf(ctx, txHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Receipt); ok { - r0 = rf(ctx, txHash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Receipt) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, txHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TransactionReceiptGeth provides a mock function with given fields: ctx, txHash -func (_m *MockChainClientRPC) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*coretypes.Receipt, error) { - ret := _m.Called(ctx, txHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionReceiptGeth") - } - - var r0 *coretypes.Receipt - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*coretypes.Receipt, error)); ok { - return rf(ctx, txHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *coretypes.Receipt); ok { - r0 = rf(ctx, txHash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Receipt) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, txHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UnsubscribeAllExcept provides a mock function with given fields: subs -func (_m *MockChainClientRPC) UnsubscribeAllExcept(subs ...commontypes.Subscription) { - _va := make([]interface{}, len(subs)) - for _i := range subs { - _va[_i] = subs[_i] - } - var _ca []interface{} - _ca = append(_ca, _va...) - _m.Called(_ca...) -} - -// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: -func (_m *MockChainClientRPC) UnsubscribeAllExceptAliveLoop() { - _m.Called() -} - -// NewMockChainClientRPC creates a new instance of MockChainClientRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewMockChainClientRPC(t interface { - mock.TestingT - Cleanup(func()) -}) *MockChainClientRPC { - mock := &MockChainClientRPC{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index fc427b2993a..661860aa3dc 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -34,47 +34,6 @@ import ( ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" ) -//go:generate mockery --quiet --name ChainClientRPC --structname MockChainClientRPC --filename "mock_chain_client_rpc_test.go" --inpackage --case=underscore -type ChainClientRPC interface { - commonclient.RPCClient[*big.Int, *evmtypes.Head] - BalanceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (*big.Int, error) - BatchCallContext(ctx context.Context, b []rpc.BatchElem) error - BlockByHash(ctx context.Context, hash common.Hash) (*evmtypes.Head, error) - BlockByHashGeth(ctx context.Context, hash common.Hash) (*types.Block, error) - BlockByNumber(ctx context.Context, number *big.Int) (*evmtypes.Head, error) - BlockByNumberGeth(ctx context.Context, number *big.Int) (*types.Block, error) - CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error - CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) - ClientVersion(_a0 context.Context) (string, error) - CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) - DialHTTP() error - DisconnectAll() - EstimateGas(ctx context.Context, call interface{}) (uint64, error) - FilterEvents(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) - HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) - HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) - LINKBalance(ctx context.Context, accountAddress common.Address, linkAddress common.Address) (*commonassets.Link, error) - LatestBlockHeight(_a0 context.Context) (*big.Int, error) - LatestFinalizedBlock(ctx context.Context) (*evmtypes.Head, error) - PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) - PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) - PendingSequenceAt(ctx context.Context, addr common.Address) (evmtypes.Nonce, error) - SendEmptyTransaction(ctx context.Context, newTxAttempt func(evmtypes.Nonce, uint32, *assets.Wei, common.Address) (interface{}, error), seq evmtypes.Nonce, gasLimit uint32, fee *assets.Wei, fromAddress common.Address) (string, error) - SendTransaction(ctx context.Context, tx *types.Transaction) error - SequenceAt(ctx context.Context, accountAddress common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) - SetAliveLoopSub(_a0 commontypes.Subscription) - SimulateTransaction(ctx context.Context, tx *types.Transaction) error - SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) - SubscribersCount() int32 - SuggestGasPrice(ctx context.Context) (*big.Int, error) - SuggestGasTipCap(ctx context.Context) (*big.Int, error) - TokenBalance(ctx context.Context, accountAddress common.Address, tokenAddress common.Address) (*big.Int, error) - TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) - TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) - TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (*types.Receipt, error) - UnsubscribeAllExceptAliveLoop() -} - var ( promEVMPoolRPCNodeDials = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "evm_pool_rpc_node_dials_total", @@ -191,7 +150,7 @@ func NewRPCClient( id int32, chainID *big.Int, tier commonclient.NodeTier, -) ChainClientRPC { +) *RpcClient { r := new(RpcClient) r.cfg = cfg r.name = name From 71a2b7f7ca8b9f98c83d541d3a1f85efa189ddc2 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 28 Jun 2024 11:52:46 -0400 Subject: [PATCH 45/58] Remove unneeded test --- core/chains/evm/client/chain_client_test.go | 98 --------------------- 1 file changed, 98 deletions(-) diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 24c16b28752..e3ec9b16183 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -742,104 +742,6 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { sub.Unsubscribe() } -/* -func TestEthClient_BatchCallContext(t *testing.T) { - t.Parallel() - - // Set up the WebSocket server - wsServer := testutils.NewWSServer(t, testutils.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { - switch method { - case "eth_subscribe": - resp.Result = `"0x00"` - return - case "eth_unsubscribe": - resp.Result = "true" - return - } - return - }) - defer wsServer.Close() - wsURL := wsServer.WSURL().String() - - // Set up the HTTP mock server - handler := func(w http.ResponseWriter, r *http.Request) { - var requests []rpc.BatchElem - decoder := json.NewDecoder(r.Body) - err := decoder.Decode(&requests) - require.NoError(t, err) - - responses := make([]map[string]interface{}, len(requests)) - for i, req := range requests { - resp := map[string]interface{}{ - "jsonrpc": "2.0", - "id": req.ID, - } - - switch req.Method { - case "eth_getBlockByNumber": - block := map[string]interface{}{ - "number": "0x2a", // 42 in hex - "hash": "0x123", - "transactions": []interface{}{}, - "uncles": []interface{}{}, - } - resp["result"] = block - case "eth_blockNumber": - resp["result"] = "0x2a" // 42 in hex - default: - resp["error"] = map[string]interface{}{ - "code": -32601, - "message": "Method not found", - } - } - responses[i] = resp - } - - encoder := json.NewEncoder(w) - err = encoder.Encode(responses) - require.NoError(t, err) - } - - httpServer := httptest.NewServer(http.HandlerFunc(handler)) - defer httpServer.Close() - - parsedHttpURL, err := url.Parse(httpServer.URL) - require.NoError(t, err) - - // Create a client and connect to the mock servers - cfg := client.TestNodePoolConfig{ - NodeSelectionMode: commonclient.NodeSelectionModeRoundRobin, - } - c, err := client.NewChainClientWithTestNode(t, cfg, time.Second*0, cfg.NodeLeaseDuration, wsURL, parsedHttpURL, nil, 42, testutils.FixtureChainID) - require.NoError(t, err) - require.NoError(t, c.Dial(context.Background())) - - // Prepare batch requests - blockNum := hexutil.EncodeBig(big.NewInt(42)) - batch := []rpc.BatchElem{ - { - Method: "eth_getBlockByNumber", - Args: []interface{}{blockNum, false}, - Result: new(types.Block), - }, - { - Method: "eth_blockNumber", - Result: new(hexutil.Big), - }, - } - - // Execute batch call - err = c.BatchCallContext(context.Background(), batch) - require.NoError(t, err) - - // Verify responses - block := batch[0].Result.(*types.Block) - assert.Equal(t, big.NewInt(42), block.Number()) - assert.Equal(t, common.HexToHash("0x123"), block.Hash()) - assert.Equal(t, big.NewInt(42), (*big.Int)(batch[1].Result.(*hexutil.Big))) -} -*/ - func TestEthClient_ErroringClient(t *testing.T) { t.Parallel() ctx := tests.Context(t) From 181a38b59414a2a4d98e0382402761d11959410f Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 28 Jun 2024 12:08:02 -0400 Subject: [PATCH 46/58] Generate mocks --- core/chains/evm/client/mocks/client.go | 30 -------------------------- 1 file changed, 30 deletions(-) diff --git a/core/chains/evm/client/mocks/client.go b/core/chains/evm/client/mocks/client.go index 555bc331227..df5e91a1674 100644 --- a/core/chains/evm/client/mocks/client.go +++ b/core/chains/evm/client/mocks/client.go @@ -208,36 +208,6 @@ func (_m *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockN return r0, r1 } -// ChainID provides a mock function with given fields: -func (_m *Client) ChainID() (*big.Int, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ChainID") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func() (*big.Int, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *big.Int); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CheckTxValidity provides a mock function with given fields: ctx, from, to, data func (_m *Client) CheckTxValidity(ctx context.Context, from common.Address, to common.Address, data []byte) *client.SendError { ret := _m.Called(ctx, from, to, data) From 70cea08090c33c167df1a45a51af7a70de3e8077 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 11:46:06 -0400 Subject: [PATCH 47/58] fix tests --- common/client/mock_node_test.go | 229 ++++++++++++++++++ .../mock_pool_chain_info_provider_test.go | 70 ++++++ common/client/mock_rpc_client_test.go | 35 ++- common/client/mock_rpc_test.go | 37 +-- common/client/node_fsm_test.go | 10 +- common/client/types.go | 27 ++- core/chains/evm/client/rpc_client_test.go | 2 + 7 files changed, 377 insertions(+), 33 deletions(-) create mode 100644 common/client/mock_node_test.go create mode 100644 common/client/mock_pool_chain_info_provider_test.go diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go new file mode 100644 index 00000000000..2a051c07ec1 --- /dev/null +++ b/common/client/mock_node_test.go @@ -0,0 +1,229 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/smartcontractkit/chainlink/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// mockNode is an autogenerated mock type for the Node type +type mockNode[CHAIN_ID types.ID, RPC interface{}] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// HighestUserObservations provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Order provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) Order() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Order") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// RPC provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) RPC() RPC { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPC") + } + + var r0 RPC + if rf, ok := ret.Get(0).(func() RPC); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(RPC) + } + + return r0 +} + +// SetPoolChainInfoProvider provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, RPC]) SetPoolChainInfoProvider(_a0 PoolChainInfoProvider) { + _m.Called(_a0) +} + +// Start provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) State() NodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 NodeState + if rf, ok := ret.Get(0).(func() NodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(NodeState) + } + + return r0 +} + +// StateAndLatest provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) StateAndLatest() (NodeState, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StateAndLatest") + } + + var r0 NodeState + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (NodeState, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() NodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(NodeState) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// String provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, RPC]) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNode[CHAIN_ID types.ID, RPC interface{}](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNode[CHAIN_ID, RPC] { + mock := &mockNode[CHAIN_ID, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_pool_chain_info_provider_test.go b/common/client/mock_pool_chain_info_provider_test.go new file mode 100644 index 00000000000..4e4955e7381 --- /dev/null +++ b/common/client/mock_pool_chain_info_provider_test.go @@ -0,0 +1,70 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package client + +import mock "github.com/stretchr/testify/mock" + +// mockPoolChainInfoProvider is an autogenerated mock type for the PoolChainInfoProvider type +type mockPoolChainInfoProvider struct { + mock.Mock +} + +// HighestUserObservations provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) HighestUserObservations() ChainInfo { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestUserObservations") + } + + var r0 ChainInfo + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + return r0 +} + +// LatestChainInfo provides a mock function with given fields: +func (_m *mockPoolChainInfoProvider) LatestChainInfo() (int, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestChainInfo") + } + + var r0 int + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (int, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + +// newMockPoolChainInfoProvider creates a new instance of mockPoolChainInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockPoolChainInfoProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *mockPoolChainInfoProvider { + mock := &mockPoolChainInfoProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_rpc_client_test.go b/common/client/mock_rpc_client_test.go index 9e89f22844d..80708c3df71 100644 --- a/common/client/mock_rpc_client_test.go +++ b/common/client/mock_rpc_client_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.2. DO NOT EDIT. +// Code generated by mockery v2.43.2. DO NOT EDIT. package client @@ -65,6 +65,39 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { return r0 } +// DisconnectAll provides a mock function with given fields: +func (_m *MockRPCClient[CHAIN_ID, HEAD]) DisconnectAll() { + _m.Called() +} + +// GetInterceptedChainInfo provides a mock function with given fields: +func (_m *MockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetInterceptedChainInfo") + } + + var r0 ChainInfo + var r1 ChainInfo + if rf, ok := ret.Get(0).(func() (ChainInfo, ChainInfo)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ChainInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ChainInfo) + } + + if rf, ok := ret.Get(1).(func() ChainInfo); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(ChainInfo) + } + + return r0, r1 +} + // IsSyncing provides a mock function with given fields: ctx func (_m *MockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go index 81bac04547d..b2e65998785 100644 --- a/common/client/mock_rpc_test.go +++ b/common/client/mock_rpc_test.go @@ -665,34 +665,43 @@ func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS return r0 } -// SubscribeNewHead provides a mock function with given fields: ctx, channel -func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) { - ret := _m.Called(ctx, channel) +// SubscribeNewHead provides a mock function with given fields: ctx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, BATCH_ELEM]) SubscribeNewHead(ctx context.Context) (<-chan HEAD, types.Subscription, error) { + ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for SubscribeNewHead") } - var r0 types.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) (types.Subscription, error)); ok { - return rf(ctx, channel) + var r0 <-chan HEAD + var r1 types.Subscription + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (<-chan HEAD, types.Subscription, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD) types.Subscription); ok { - r0 = rf(ctx, channel) + if rf, ok := ret.Get(0).(func(context.Context) <-chan HEAD); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Subscription) + r0 = ret.Get(0).(<-chan HEAD) } } - if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD) error); ok { - r1 = rf(ctx, channel) + if rf, ok := ret.Get(1).(func(context.Context) types.Subscription); ok { + r1 = rf(ctx) } else { - r1 = ret.Error(1) + if ret.Get(1) != nil { + r1 = ret.Get(1).(types.Subscription) + } } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // SubscribersCount provides a mock function with given fields: diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index a32a551183d..f6855dab548 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -53,33 +53,33 @@ func TestUnit_Node_StateTransitions(t *testing.T) { const destinationState = NodeStateOutOfSync allowedStates := []NodeState{NodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil, nil).Once() + rpc.On("DisconnectAll").Once() testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = NodeStateUnreachable allowedStates := []NodeState{NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) + rpc.On("DisconnectAll") testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = NodeStateInvalidChainID allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) + rpc.On("DisconnectAll") testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = NodeStateSyncing allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil, nil).Times(len(allowedStates)) + rpc.On("DisconnectAll") testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", nil, nil).Once() + rpc.On("DisconnectAll").Once() node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(NodeStateDialed) fn := new(fnMock) diff --git a/common/client/types.go b/common/client/types.go index 00781726ee6..a8113d1b05c 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -10,18 +10,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/common/types" ) -// PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo -// -//go:generate mockery --quiet --name PoolChainInfoProvider --structname mockPoolChainInfoProvider --filename "mock_pool_chain_info_provider_test.go" --inpackage --case=underscore -type PoolChainInfoProvider interface { - // LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being. - // Return highest latest ChainInfo within the alive nodes. E.g. most recent block number and highest block number - // observed by Node A are 10 and 15; Node B - 12 and 14. This method will return 12. - LatestChainInfo() (int, ChainInfo) - // HighestChainInfo - returns highest ChainInfo ever observed by any node in the pool. - HighestChainInfo() ChainInfo -} - // RPCClient includes all the necessary generalized RPC methods along with any additional chain-specific methods. // //go:generate mockery --quiet --name RPCClient --structname MockRPCClient --filename "mock_rpc_client_test.go" --inpackage --case=underscore @@ -43,8 +31,21 @@ type RPCClient[ IsSyncing(ctx context.Context) (bool, error) // UnsubscribeAllExcept - close all subscriptions except `subs` UnsubscribeAllExcept(subs ...types.Subscription) + // DisconnectAll - cancels all inflight requests, terminates all subscriptions and resets latest ChainInfo. + DisconnectAll() // Close - closes all subscriptions and aborts all RPC calls Close() + // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. + // latest ChainInfo is the most recent value received within a NodeClient's current lifecycle between Dial and DisconnectAll. + // highestUserObservations ChainInfo is the highest ChainInfo observed excluding health checks calls. + // Its values must not be reset. + // The results of corresponding calls, to get the most recent head and the latest finalized head, must be + // intercepted and reflected in ChainInfo before being returned to a caller. Otherwise, MultiNode is not able to + // provide repeatable read guarantee. + // DisconnectAll must reset latest ChainInfo to default value. + // Ensure implementation does not have a race condition when values are reset before request completion and as + // a result latest ChainInfo contains information from the previous cycle. + GetInterceptedChainInfo() (latest, highestUserObservations ChainInfo) } // RPC includes all the necessary methods for a multi-node client to interact directly with any RPC endpoint. @@ -192,7 +193,7 @@ type connection[ ] interface { ChainID(ctx context.Context) (CHAIN_ID, error) Dial(ctx context.Context) error - SubscribeNewHead(ctx context.Context, channel chan<- HEAD) (types.Subscription, error) + SubscribeNewHead(ctx context.Context) (<-chan HEAD, types.Subscription, error) } // PoolChainInfoProvider - provides aggregation of nodes pool ChainInfo diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index 682c4352457..024dac17b14 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -1,5 +1,6 @@ package client_test +/* TODO: Implement tests for RPCClient using new interface import ( "context" "encoding/json" @@ -298,3 +299,4 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { assert.Equal(t, int64(0), latest.BlockNumber) assert.Equal(t, int64(0), latest.FinalizedBlockNumber) } +*/ From c9a6c12a6ae857889e817b0fc861e1ace116594c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 12:33:44 -0400 Subject: [PATCH 48/58] Use UnsubscribeAllExcept --- common/client/node.go | 7 -- common/client/node_fsm.go | 8 +- common/client/node_fsm_test.go | 12 +-- common/client/node_lifecycle_test.go | 85 ++++++++++----------- common/client/types.go | 2 - core/chains/evm/client/chain_client_test.go | 56 -------------- core/chains/evm/client/rpc_client.go | 66 ---------------- 7 files changed, 53 insertions(+), 183 deletions(-) diff --git a/common/client/node.go b/common/client/node.go index 547faa0de76..9720ecdd109 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -313,13 +313,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) verifyConn(ctx context.Context, lggr logger. return NodeStateAlive } -// disconnectAll disconnects all clients connected to the node -// WARNING: NOT THREAD-SAFE -// This must be called from within the n.stateMu lock -func (n *node[CHAIN_ID, HEAD, RPC]) disconnectAll() { - n.rpc.DisconnectAll() -} - func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 { return n.order } diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index f1b5e838dec..fc7894a1964 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -257,7 +257,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { } switch n.state { case NodeStateAlive: - n.disconnectAll() + n.UnsubscribeAllExceptAliveLoop() n.state = NodeStateOutOfSync default: panic(transitionFail(n.state, NodeStateOutOfSync)) @@ -282,7 +282,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { } switch n.state { case NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing: - n.disconnectAll() + n.UnsubscribeAllExceptAliveLoop() n.state = NodeStateUnreachable default: panic(transitionFail(n.state, NodeStateUnreachable)) @@ -325,7 +325,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { } switch n.state { case NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing: - n.disconnectAll() + n.UnsubscribeAllExceptAliveLoop() n.state = NodeStateInvalidChainID default: panic(transitionFail(n.state, NodeStateInvalidChainID)) @@ -350,7 +350,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { } switch n.state { case NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID: - n.disconnectAll() + n.UnsubscribeAllExceptAliveLoop() n.state = NodeStateSyncing default: panic(transitionFail(n.state, NodeStateSyncing)) diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index f6855dab548..973d40e6ba7 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -5,6 +5,8 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/assert" "github.com/smartcontractkit/chainlink/v2/common/types" @@ -53,33 +55,33 @@ func TestUnit_Node_StateTransitions(t *testing.T) { const destinationState = NodeStateOutOfSync allowedStates := []NodeState{NodeStateAlive} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = NodeStateUnreachable allowedStates := []NodeState{NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = NodeStateInvalidChainID allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = NodeStateSyncing allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID} rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything) node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(NodeStateDialed) fn := new(fnMock) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 7021a001a6b..980b1482325 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -49,7 +49,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -74,7 +74,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() @@ -122,7 +122,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(node.chainID, nil) @@ -172,7 +172,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { // disconnects all on transfer to unreachable // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) tests.AssertEventually(t, func() bool { @@ -221,7 +221,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() rpc.On("Ping", mock.Anything).Return(nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) const mostRecentBlock = 20 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) poolInfo := newMockPoolChainInfoProvider(t) @@ -295,7 +295,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ @@ -346,7 +346,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() @@ -414,7 +414,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to finalized heads") rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -572,7 +572,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -602,7 +602,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to dial rpc") // might be called again in unreachable loop, so no need to set once rpc.On("Dial", mock.Anything).Return(expectedError) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -620,7 +620,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() // for unreachable rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) expectedError := errors.New("failed to get chain ID") // might be called multiple times rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) @@ -642,7 +642,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { // one for out-of-sync & one for invalid chainID rpc.On("Dial", mock.Anything).Return(nil).Twice() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) // might be called multiple times rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareOutOfSync(stubIsOutOfSync) @@ -663,7 +663,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(true, nil) node.declareOutOfSync(stubIsOutOfSync) @@ -684,7 +684,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { // one for out-of-sync rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) // for unreachable rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -710,7 +710,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to subscribe") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -730,8 +730,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll") - + rpc.On("UnsubscribeAllExcept", mock.Anything) sub := mocks.NewSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") @@ -759,7 +758,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) @@ -790,7 +789,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -836,7 +835,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -883,7 +882,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareUnreachable() tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; still unreachable", 2) }) @@ -900,7 +899,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) @@ -920,7 +919,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareUnreachable() tests.AssertEventually(t, func() bool { @@ -941,7 +940,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil) @@ -963,7 +962,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) setupRPCForAliveLoop(t, rpc) @@ -990,7 +989,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() setupRPCForAliveLoop(t, rpc) @@ -1011,7 +1010,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() setupRPCForAliveLoop(t, rpc) @@ -1049,7 +1048,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareInvalidChainID() tests.AssertEventually(t, func() bool { @@ -1072,7 +1071,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { // once for chainID and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareInvalidChainID() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1095,7 +1094,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareInvalidChainID() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) @@ -1123,7 +1122,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("SubscribeToHeads", mock.Anything).Return(headCh, sub, nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() setupRPCForAliveLoop(t, rpc) @@ -1148,7 +1147,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() @@ -1187,7 +1186,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) // disconnects all on transfer to unreachable - rpc.On("DisconnectAll").Once() + rpc.On("UnsubscribeAllExcept", mock.Anything).Once() err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") @@ -1208,7 +1207,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) @@ -1232,7 +1231,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable err := node.Start(tests.Context(t)) @@ -1255,7 +1254,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil).Once() @@ -1282,7 +1281,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable @@ -1514,7 +1513,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareSyncing() tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -1533,7 +1532,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id")) - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) // once for syncing and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() @@ -1557,7 +1556,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Twice() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) @@ -1583,7 +1582,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check if syncing")).Once() rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareSyncing() tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") tests.AssertEventually(t, func() bool { @@ -1605,7 +1604,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(true, nil) rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Verification failed: Node is syncing", 2) tests.AssertEventually(t, func() bool { @@ -1623,7 +1622,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("DisconnectAll") + rpc.On("UnsubscribeAllExcept", mock.Anything) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(true, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() diff --git a/common/client/types.go b/common/client/types.go index a8113d1b05c..06f43984ba3 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -31,8 +31,6 @@ type RPCClient[ IsSyncing(ctx context.Context) (bool, error) // UnsubscribeAllExcept - close all subscriptions except `subs` UnsubscribeAllExcept(subs ...types.Subscription) - // DisconnectAll - cancels all inflight requests, terminates all subscriptions and resets latest ChainInfo. - DisconnectAll() // Close - closes all subscriptions and aborts all RPC calls Close() // GetInterceptedChainInfo - returns latest and highest observed by application layer ChainInfo. diff --git a/core/chains/evm/client/chain_client_test.go b/core/chains/evm/client/chain_client_test.go index 3ea52495f9c..09f402389eb 100644 --- a/core/chains/evm/client/chain_client_test.go +++ b/core/chains/evm/client/chain_client_test.go @@ -743,62 +743,6 @@ func TestEthClient_SubscribeNewHead(t *testing.T) { sub.Unsubscribe() } -/* -TODO: Do we need this? - - func newMockRpc(t *testing.T) *mocks.RPCClient { - mockRpc := mocks.NewRPCClient(t) - mockRpc.On("Dial", mock.Anything).Return(nil).Once() - mockRpc.On("Close").Return(nil).Once() - mockRpc.On("ChainID", mock.Anything).Return(testutils.FixtureChainID, nil).Once() - // node does not always manage to fully setup aliveLoop, so we have to make calls optional to avoid flakes - mockRpc.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(client.NewMockSubscription(), nil).Maybe() - mockRpc.On("SetAliveLoopSub", mock.Anything).Return().Maybe() - return mockRpc - } - - func TestChainClient_BatchCallContext(t *testing.T) { - t.Parallel() - - t.Run("batch requests return errors", func(t *testing.T) { - ctx := tests.Context(t) - rpcError := errors.New("something went wrong") - blockNumResp := "" - blockNum := hexutil.EncodeBig(big.NewInt(42)) - b := []rpc.BatchElem{ - { - Method: "eth_getBlockByNumber", - Args: []interface{}{blockNum, true}, - Result: &types.Block{}, - }, - { - Method: "eth_blockNumber", - Result: &blockNumResp, - }, - } - - mockRpc := newMockRpc(t) - mockRpc.On("GetInterceptedChainInfo").Return(commonclient.ChainInfo{}, commonclient.ChainInfo{}).Maybe() - mockRpc.On("BatchCallContext", mock.Anything, b).Run(func(args mock.Arguments) { - reqs := args.Get(1).([]rpc.BatchElem) - for i := 0; i < len(reqs); i++ { - elem := &reqs[i] - elem.Error = rpcError - } - }).Return(nil).Once() - - client := client.NewChainClientWithMockedRpc(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID, mockRpc) - err := client.Dial(ctx) - require.NoError(t, err) - - err = client.BatchCallContext(ctx, b) - require.NoError(t, err) - for _, elem := range b { - require.ErrorIs(t, rpcError, elem.Error) - } - }) - } -*/ func TestEthClient_ErroringClient(t *testing.T) { t.Parallel() ctx := tests.Context(t) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index b0761af55ed..a221fa1c781 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -351,27 +351,6 @@ func (r *RpcClient) registerSub(sub ethereum.Subscription, stopInFLightCh chan s return nil } -// DisconnectAll disconnects all clients connected to the rpcClient -func (r *RpcClient) DisconnectAll() { - r.stateMu.Lock() - defer r.stateMu.Unlock() - if r.ws.rpc != nil { - r.ws.rpc.Close() - } - r.cancelInflightRequests() - r.unsubscribeAll() - r.latestChainInfo = commonclient.ChainInfo{} -} - -// unsubscribeAll unsubscribes all subscriptions -func (r *RpcClient) unsubscribeAll() { - r.stateMu.Lock() - defer r.stateMu.Unlock() - for _, sub := range r.subs { - sub.Unsubscribe() - } - r.subs = nil -} func (r *RpcClient) SetAliveLoopSub(sub commontypes.Subscription) { r.stateMu.Lock() defer r.stateMu.Unlock() @@ -385,19 +364,6 @@ func (r *RpcClient) SubscribersCount() int32 { return int32(len(r.subs)) } -// UnsubscribeAllExceptAliveLoop disconnects all subscriptions to the node except the alive loop subscription -// while holding the n.stateMu lock -func (r *RpcClient) UnsubscribeAllExceptAliveLoop() { - r.stateMu.Lock() - defer r.stateMu.Unlock() - - for _, s := range r.subs { - if s != r.aliveLoopSub { - s.Unsubscribe() - } - } -} - // RPC wrappers // CallContext implementation @@ -467,38 +433,6 @@ func (r *RpcClient) subscribe(ctx context.Context, channel chan<- *evmtypes.Head return sub, r.wrapWS(err) } -// TODO: Remove this -func (r *RpcClient) SubscribeNewHead(ctx context.Context, channel chan<- *evmtypes.Head) (_ commontypes.Subscription, err error) { - ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) - defer cancel() - args := []interface{}{"newHeads"} - lggr := r.newRqLggr().With("args", args) - - lggr.Debug("RPC call: evmclient.Client#EthSubscribe") - start := time.Now() - defer func() { - duration := time.Since(start) - r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") - err = r.wrapWS(err) - }() - subForwarder := newSubForwarder(channel, func(head *evmtypes.Head) *evmtypes.Head { - head.EVMChainID = ubig.New(r.chainID) - r.onNewHead(ctx, chStopInFlight, head) - return head - }, r.wrapRPCClientError) - err = subForwarder.start(ws.rpc.EthSubscribe(ctx, subForwarder.srcCh, args...)) - if err != nil { - return - } - - err = r.registerSub(subForwarder, chStopInFlight) - if err != nil { - return - } - - return subForwarder, nil -} - // GethClient wrappers func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { From d5a1a8cffd3f722d95325774f0e5d6dc517e9e93 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 12:51:17 -0400 Subject: [PATCH 49/58] Fix rpc client tests --- core/chains/evm/client/rpc_client.go | 8 +++- core/chains/evm/client/rpc_client_test.go | 46 +++++++++++------------ 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index a221fa1c781..9ce0f7d95f6 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -151,11 +151,15 @@ func NewRPCClient( } func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { + ctx, cancel, chStopInFlight, _, _ := r.acquireQueryCtx(ctx) + defer cancel() + newChainIDSubForwarder := func(chainID *big.Int, ch chan<- *evmtypes.Head) *subForwarder[*evmtypes.Head] { return newSubForwarder(ch, func(head *evmtypes.Head) *evmtypes.Head { head.EVMChainID = ubig.New(chainID) + r.onNewHead(ctx, chStopInFlight, head) return head - }, nil) + }, r.wrapRPCClientError) } ch := make(chan *evmtypes.Head) @@ -213,6 +217,8 @@ func (r *RpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { sub.Unsubscribe() } } + // TODO: Reset latest? + r.latestChainInfo = commonclient.ChainInfo{} } // Not thread-safe, pure dial. diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index 024dac17b14..dae625f1897 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -1,6 +1,5 @@ package client_test -/* TODO: Implement tests for RPCClient using new interface import ( "context" "encoding/json" @@ -8,6 +7,7 @@ import ( "math/big" "net/url" "testing" + "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/core/types" @@ -16,10 +16,8 @@ import ( "github.com/tidwall/gjson" "go.uber.org/zap" - "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink-common/pkg/logger" - + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" commonclient "github.com/smartcontractkit/chainlink/v2/common/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/testutils" @@ -42,6 +40,10 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { chainId := big.NewInt(123456) lggr := logger.Test(t) + nodePoolCfg := client.TestNodePoolConfig{ + NodeFinalizedBlockPollInterval: 1 * time.Second, + } + serverCallBack := func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { if method == "eth_unsubscribe" { resp.Result = "true" @@ -57,7 +59,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(nodePoolCfg, lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) // set to default values @@ -69,8 +71,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { assert.Equal(t, int64(0), highestUserObservations.FinalizedBlockNumber) assert.Nil(t, highestUserObservations.TotalDifficulty) - ch := make(chan *evmtypes.Head) - sub, err := rpc.SubscribeNewHead(tests.Context(t), ch) + ch, sub, err := rpc.SubscribeToHeads(tests.Context(t)) require.NoError(t, err) defer sub.Unsubscribe() go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256, TotalDifficulty: big.NewInt(1000)})) @@ -93,8 +94,8 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { assertHighestUserObservations(highestUserObservations) - // DisconnectAll resets latest - rpc.DisconnectAll() + // UnsubscribeAllExcept resets latest + rpc.UnsubscribeAllExcept() latest, highestUserObservations = rpc.GetInterceptedChainInfo() assert.Equal(t, int64(0), latest.BlockNumber) @@ -107,11 +108,10 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(nodePoolCfg, lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) - ch := make(chan *evmtypes.Head) - sub, err := rpc.SubscribeNewHead(commonclient.CtxAddHealthCheckFlag(tests.Context(t)), ch) + ch, sub, err := rpc.SubscribeToHeads(commonclient.CtxAddHealthCheckFlag(tests.Context(t))) require.NoError(t, err) defer sub.Unsubscribe() go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256, TotalDifficulty: big.NewInt(1000)})) @@ -130,11 +130,10 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { t.Run("Block's chain ID matched configured", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(nodePoolCfg, lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) - ch := make(chan *evmtypes.Head) - sub, err := rpc.SubscribeNewHead(tests.Context(t), ch) + ch, sub, err := rpc.SubscribeToHeads(tests.Context(t)) require.NoError(t, err) defer sub.Unsubscribe() go server.MustWriteBinaryMessageSync(t, makeNewHeadWSMessage(&evmtypes.Head{Number: 256})) @@ -147,20 +146,20 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(nodePoolCfg, observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) require.NoError(t, rpc.Dial(ctx)) server.Close() - _, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) + _, _, err := rpc.SubscribeToHeads(ctx) require.ErrorContains(t, err, "RPCClient returned error (rpc)") tests.AssertLogEventually(t, observed, "evmclient.Client#EthSubscribe RPC call failure") }) t.Run("Subscription error is properly wrapper", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, serverCallBack) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(nodePoolCfg, lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) - sub, err := rpc.SubscribeNewHead(ctx, make(chan *evmtypes.Head)) + _, sub, err := rpc.SubscribeToHeads(ctx) require.NoError(t, err) go server.MustWriteBinaryMessageSync(t, "invalid msg") select { @@ -185,7 +184,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { }) wsURL := server.WSURL() observedLggr, observed := logger.TestObserved(t, zap.DebugLevel) - rpc := client.NewRPCClient(observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(client.TestNodePoolConfig{}, observedLggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) require.NoError(t, rpc.Dial(ctx)) server.Close() _, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -202,7 +201,7 @@ func TestRPCClient_SubscribeFilterLogs(t *testing.T) { return resp }) wsURL := server.WSURL() - rpc := client.NewRPCClient(lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(client.TestNodePoolConfig{}, lggr, *wsURL, nil, "rpc", 1, chainId, commonclient.Primary) defer rpc.Close() require.NoError(t, rpc.Dial(ctx)) sub, err := rpc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, make(chan types.Log)) @@ -251,7 +250,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { } server := createRPCServer() - rpc := client.NewRPCClient(lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary) + rpc := client.NewRPCClient(client.TestNodePoolConfig{}, lggr, *server.URL, nil, "rpc", 1, chainId, commonclient.Primary) require.NoError(t, rpc.Dial(ctx)) defer rpc.Close() server.Head = &evmtypes.Head{Number: 128} @@ -291,7 +290,7 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { assert.Equal(t, int64(256), latest.FinalizedBlockNumber) // DisconnectAll resets latest ChainInfo - rpc.DisconnectAll() + rpc.UnsubscribeAllExcept() latest, highestUserObservations = rpc.GetInterceptedChainInfo() assert.Equal(t, int64(0), highestUserObservations.BlockNumber) assert.Equal(t, int64(128), highestUserObservations.FinalizedBlockNumber) @@ -299,4 +298,3 @@ func TestRPCClient_LatestFinalizedBlock(t *testing.T) { assert.Equal(t, int64(0), latest.BlockNumber) assert.Equal(t, int64(0), latest.FinalizedBlockNumber) } -*/ From a50bb2210374da44199871955b1d2999c3744637 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 13:31:07 -0400 Subject: [PATCH 50/58] Address comments --- common/client/multi_node.go | 48 ++++++++++----------- common/client/node_lifecycle_test.go | 27 ++---------- common/client/node_selector.go | 2 +- common/client/node_selector_highest_head.go | 4 +- core/chains/evm/client/rpc_client.go | 9 +--- 5 files changed, 32 insertions(+), 58 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index d929283af03..bb7ff808365 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -35,15 +35,15 @@ var ( // It also handles multiple node RPC connections simultaneously. type MultiNode[ CHAIN_ID types.ID, - RPC_CLIENT any, + RPC any, ] struct { services.StateMachine - primaryNodes []Node[CHAIN_ID, RPC_CLIENT] - sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + primaryNodes []Node[CHAIN_ID, RPC] + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC] chainID CHAIN_ID lggr logger.SugaredLogger selectionMode string - nodeSelector NodeSelector[CHAIN_ID, RPC_CLIENT] + nodeSelector NodeSelector[CHAIN_ID, RPC] leaseDuration time.Duration leaseTicker *time.Ticker chainFamily string @@ -51,7 +51,7 @@ type MultiNode[ deathDeclarationDelay time.Duration activeMu sync.RWMutex - activeNode Node[CHAIN_ID, RPC_CLIENT] + activeNode Node[CHAIN_ID, RPC] chStop services.StopChan wg sync.WaitGroup @@ -59,22 +59,22 @@ type MultiNode[ func NewMultiNode[ CHAIN_ID types.ID, - RPC_CLIENT any, + RPC any, ]( lggr logger.Logger, selectionMode string, // type of the "best" RPC selector (e.g HighestHead, RoundRobin, etc.) leaseDuration time.Duration, // defines interval on which new "best" RPC should be selected - primaryNodes []Node[CHAIN_ID, RPC_CLIENT], - sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC_CLIENT], + primaryNodes []Node[CHAIN_ID, RPC], + sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC], chainID CHAIN_ID, // configured chain ID (used to verify that passed primaryNodes belong to the same chain) chainFamily string, // name of the chain family - used in the metrics deathDeclarationDelay time.Duration, -) *MultiNode[CHAIN_ID, RPC_CLIENT] { +) *MultiNode[CHAIN_ID, RPC] { nodeSelector := newNodeSelector(selectionMode, primaryNodes) // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) const reportInterval = 6500 * time.Millisecond - c := &MultiNode[CHAIN_ID, RPC_CLIENT]{ + c := &MultiNode[CHAIN_ID, RPC]{ primaryNodes: primaryNodes, sendOnlyNodes: sendOnlyNodes, chainID: chainID, @@ -93,11 +93,11 @@ func NewMultiNode[ return c } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) ChainID() CHAIN_ID { +func (c *MultiNode[CHAIN_ID, RPC]) ChainID() CHAIN_ID { return c.chainID } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC_CLIENT, isSendOnly bool)) error { +func (c *MultiNode[CHAIN_ID, RPC]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC, isSendOnly bool)) error { callsCompleted := 0 for _, n := range c.primaryNodes { if ctx.Err() != nil { @@ -125,7 +125,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) DoAll(ctx context.Context, do func(ctx return nil } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { +func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]NodeState { states := map[string]NodeState{} for _, n := range c.primaryNodes { states[n.String()] = n.State() @@ -137,7 +137,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) NodeStates() map[string]NodeState { } // HighestChainInfo - returns highest ChainInfo ever observed by any node in the pool. -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { +func (c *MultiNode[CHAIN_ID, RPC]) HighestChainInfo() ChainInfo { ch := ChainInfo{ TotalDifficulty: big.NewInt(0), } @@ -154,7 +154,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestChainInfo() ChainInfo { // // Nodes handle their own redialing and runloops, so this function does not // return any error if the nodes aren't available -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Start(ctx context.Context) error { +func (c *MultiNode[CHAIN_ID, RPC]) Start(ctx context.Context) error { return c.StartOnce("MultiNode", func() (merr error) { if len(c.primaryNodes) == 0 { return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) @@ -194,7 +194,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Start(ctx context.Context) error { } // Close tears down the MultiNode and closes all nodes -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Close() error { +func (c *MultiNode[CHAIN_ID, RPC]) Close() error { return c.StopOnce("MultiNode", func() error { close(c.chStop) c.wg.Wait() @@ -205,7 +205,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) Close() error { // SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error. // Call this method from your chain-specific client implementation to access any chain-specific rpc calls. -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error) { +func (c *MultiNode[CHAIN_ID, RPC]) SelectRPC() (rpc RPC, err error) { n, err := c.selectNode() if err != nil { return rpc, err @@ -214,7 +214,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) SelectRPC() (rpc RPC_CLIENT, err error } // selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_CLIENT], err error) { +func (c *MultiNode[CHAIN_ID, RPC]) selectNode() (node Node[CHAIN_ID, RPC], err error) { c.activeMu.RLock() node = c.activeNode c.activeMu.RUnlock() @@ -248,7 +248,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, RPC_ // LatestChainInfo - returns number of live nodes available in the pool, so we can prevent the last alive node in a pool from being marked as out-of-sync. // Return highest ChainInfo most recently received by the alive nodes. // E.g. If Node A's the most recent block is 10 and highest 15 and for Node B it's - 12 and 14. This method will return 12. -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) LatestChainInfo() (int, ChainInfo) { +func (c *MultiNode[CHAIN_ID, RPC]) LatestChainInfo() (int, ChainInfo) { var nLiveNodes int ch := ChainInfo{ TotalDifficulty: big.NewInt(0), @@ -265,7 +265,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) LatestChainInfo() (int, ChainInfo) { } // HighestUserObservations - returns highest ChainInfo ever observed by any user of the MultiNode -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestUserObservations() ChainInfo { +func (c *MultiNode[CHAIN_ID, RPC]) HighestUserObservations() ChainInfo { ch := ChainInfo{ TotalDifficulty: big.NewInt(0), } @@ -278,7 +278,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) HighestUserObservations() ChainInfo { return ch } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { +func (c *MultiNode[CHAIN_ID, RPC]) checkLease() { bestNode := c.nodeSelector.Select() for _, n := range c.primaryNodes { // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new @@ -299,7 +299,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLease() { } } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { +func (c *MultiNode[CHAIN_ID, RPC]) checkLeaseLoop() { defer c.wg.Done() c.leaseTicker = time.NewTicker(c.leaseDuration) defer c.leaseTicker.Stop() @@ -314,7 +314,7 @@ func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) checkLeaseLoop() { } } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) runLoop() { +func (c *MultiNode[CHAIN_ID, RPC]) runLoop() { defer c.wg.Done() nodeStates := make([]nodeWithState, len(c.primaryNodes)) @@ -347,7 +347,7 @@ type nodeWithState struct { DeadSince *time.Time } -func (c *MultiNode[CHAIN_ID, RPC_CLIENT]) report(nodesStateInfo []nodeWithState) { +func (c *MultiNode[CHAIN_ID, RPC]) report(nodesStateInfo []nodeWithState) { start := time.Now() var dead int counts := make(map[NodeState]int) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 980b1482325..7f62f540e4a 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -460,7 +460,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Latest finalized block is not valid") }) - t.Run("If finality tag and finalized block polling are enabled updates latest finalized block metric", func(t *testing.T) { + t.Run("If finality tag is enabled updates latest finalized block metric", func(t *testing.T) { t.Parallel() rpc := NewMockRPCClient[types.ID, Head](t) const expectedBlock = 1101 @@ -469,7 +469,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { sub.On("Err").Return(nil) sub.On("Unsubscribe") ch := make(chan Head, 1) - // I think it has to in case finality tag doesn't exist? rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() rpc.On("SubscribeToFinalizedHeads", mock.Anything).Run(func(args mock.Arguments) { go writeHeads(t, ch, head{BlockNumber: expectedBlock - 1}, head{BlockNumber: expectedBlock}) @@ -495,7 +494,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { require.NoError(t, err) var m = &prom.Metric{} require.NoError(t, metric.Write(m)) - fmt.Println("Val:", m.Gauge.GetValue()) return float64(expectedBlock) == m.Gauge.GetValue() }) }) @@ -531,7 +529,7 @@ func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { aliveSubscription.On("Err").Return(nil).Maybe() aliveSubscription.On("Unsubscribe").Maybe() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), aliveSubscription, nil).Maybe() - rpc.On("UnsubscribeAllExcept", nil, nil).Maybe() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() rpc.On("SetAliveLoopSub", mock.Anything).Maybe() rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Maybe() } @@ -1113,18 +1111,9 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - headCh := make(<-chan Head) - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - - rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(headCh, sub, nil) + setupRPCForAliveLoop(t, rpc) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() - - setupRPCForAliveLoop(t, rpc) node.declareInvalidChainID() tests.AssertEventually(t, func() bool { @@ -1143,15 +1132,9 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) @@ -1167,7 +1150,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { newNode := func(t *testing.T, opts testNodeOpts) testNode { node := newTestNode(t, opts) - opts.rpc.On("UnsubscribeAllExcept", nil, nil).Maybe() + opts.rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() opts.rpc.On("Close").Return(nil).Once() return node @@ -1185,8 +1168,6 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - // disconnects all on transfer to unreachable - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() err := node.Start(tests.Context(t)) assert.NoError(t, err) tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") diff --git a/common/client/node_selector.go b/common/client/node_selector.go index d62fac9a1e5..d1bb58c6273 100644 --- a/common/client/node_selector.go +++ b/common/client/node_selector.go @@ -27,7 +27,7 @@ type NodeSelector[ func newNodeSelector[ CHAIN_ID types.ID, - RPC any, //RPCClient[CHAIN_ID, HEAD], + RPC any, ](selectionMode string, nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { switch selectionMode { case NodeSelectionModeHighestHead: diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go index 9fcd627644a..dbf402c7062 100644 --- a/common/client/node_selector_highest_head.go +++ b/common/client/node_selector_highest_head.go @@ -8,12 +8,12 @@ import ( type highestHeadNodeSelector[ CHAIN_ID types.ID, - RPC any, //RPCClient[CHAIN_ID, HEAD], + RPC any, ] []Node[CHAIN_ID, RPC] func NewHighestHeadNodeSelector[ CHAIN_ID types.ID, - RPC any, //RPCClient[CHAIN_ID, HEAD], + RPC any, ](nodes []Node[CHAIN_ID, RPC]) NodeSelector[CHAIN_ID, RPC] { return highestHeadNodeSelector[CHAIN_ID, RPC](nodes) } diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 9ce0f7d95f6..1b77b86f31a 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -217,7 +217,6 @@ func (r *RpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { sub.Unsubscribe() } } - // TODO: Reset latest? r.latestChainInfo = commonclient.ChainInfo{} } @@ -357,12 +356,6 @@ func (r *RpcClient) registerSub(sub ethereum.Subscription, stopInFLightCh chan s return nil } -func (r *RpcClient) SetAliveLoopSub(sub commontypes.Subscription) { - r.stateMu.Lock() - defer r.stateMu.Unlock() - r.aliveLoopSub = sub -} - // SubscribersCount returns the number of client subscribed to the node func (r *RpcClient) SubscribersCount() int32 { r.stateMu.RLock() @@ -441,7 +434,7 @@ func (r *RpcClient) subscribe(ctx context.Context, channel chan<- *evmtypes.Head // GethClient wrappers -func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { +func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { err = r.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash, false) if err != nil { return nil, err From 21c1c64950469a095fe11eb93725ee1e444b399c Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 13:41:14 -0400 Subject: [PATCH 51/58] Remove unused code --- common/client/models.go | 8 -------- common/client/node.go | 2 -- common/client/node_lifecycle.go | 2 -- 3 files changed, 12 deletions(-) diff --git a/common/client/models.go b/common/client/models.go index 16f27e2eb36..6a6afe431e3 100644 --- a/common/client/models.go +++ b/common/client/models.go @@ -22,14 +22,6 @@ const ( sendTxReturnCodeLen // tracks the number of errors. Must always be last ) -// sendTxSevereErrors - error codes which signal that transaction would never be accepted in its current form by the node -// TODO: Implement Transaction Sending -//var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, ExceedsMaxFee, FeeOutOfValidRange, Unknown} - -// sendTxSuccessfulCodes - error codes which signal that transaction was accepted by the node -// TODO: Implement Transaction Sending -//var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} - func (c SendTxReturnCode) String() string { switch c { case Successful: diff --git a/common/client/node.go b/common/client/node.go index 9720ecdd109..d6f71a6718e 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -171,8 +171,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() (chainID CHAIN_ID) { } func (n *node[CHAIN_ID, HEAD, RPC]) Name() string { - n.stateMu.RLock() - defer n.stateMu.RUnlock() return n.name } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 11916a74bee..1d5e955c6f0 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -440,8 +440,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { } } - fmt.Println("invalidChainIDLoop") - invalidAt := time.Now() lggr := logger.Named(n.lfcLog, "InvalidChainID") From 15e3fa70a904dbc967e88f3cbb5c82e9560e2117 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 14:01:55 -0400 Subject: [PATCH 52/58] Generate private mock --- common/client/mock_rpc_client_test.go | 35 ++++----- common/client/node_fsm_test.go | 16 ++-- common/client/node_lifecycle_test.go | 108 +++++++++++++------------- common/client/node_test.go | 2 +- common/client/types.go | 2 +- 5 files changed, 79 insertions(+), 84 deletions(-) diff --git a/common/client/mock_rpc_client_test.go b/common/client/mock_rpc_client_test.go index 80708c3df71..c1204ca5914 100644 --- a/common/client/mock_rpc_client_test.go +++ b/common/client/mock_rpc_client_test.go @@ -9,13 +9,13 @@ import ( mock "github.com/stretchr/testify/mock" ) -// MockRPCClient is an autogenerated mock type for the RPCClient type -type MockRPCClient[CHAIN_ID types.ID, HEAD Head] struct { +// mockRPCClient is an autogenerated mock type for the RPCClient type +type mockRPCClient[CHAIN_ID types.ID, HEAD Head] struct { mock.Mock } // ChainID provides a mock function with given fields: ctx -func (_m *MockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -43,12 +43,12 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, } // Close provides a mock function with given fields: -func (_m *MockRPCClient[CHAIN_ID, HEAD]) Close() { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Close() { _m.Called() } // Dial provides a mock function with given fields: ctx -func (_m *MockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { ret := _m.Called(ctx) if len(ret) == 0 { @@ -65,13 +65,8 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { return r0 } -// DisconnectAll provides a mock function with given fields: -func (_m *MockRPCClient[CHAIN_ID, HEAD]) DisconnectAll() { - _m.Called() -} - // GetInterceptedChainInfo provides a mock function with given fields: -func (_m *MockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, ChainInfo) { ret := _m.Called() if len(ret) == 0 { @@ -99,7 +94,7 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) GetInterceptedChainInfo() (ChainInfo, C } // IsSyncing provides a mock function with given fields: ctx -func (_m *MockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -127,7 +122,7 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) IsSyncing(ctx context.Context) (bool, e } // Ping provides a mock function with given fields: _a0 -func (_m *MockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { ret := _m.Called(_a0) if len(ret) == 0 { @@ -145,7 +140,7 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) Ping(_a0 context.Context) error { } // SubscribeToFinalizedHeads provides a mock function with given fields: ctx -func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -184,7 +179,7 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToFinalizedHeads(ctx context.C } // SubscribeToHeads provides a mock function with given fields: ctx -func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) (<-chan HEAD, types.Subscription, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -223,7 +218,7 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) SubscribeToHeads(ctx context.Context) ( } // UnsubscribeAllExcept provides a mock function with given fields: subs -func (_m *MockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { +func (_m *mockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subscription) { _va := make([]interface{}, len(subs)) for _i := range subs { _va[_i] = subs[_i] @@ -233,13 +228,13 @@ func (_m *MockRPCClient[CHAIN_ID, HEAD]) UnsubscribeAllExcept(subs ...types.Subs _m.Called(_ca...) } -// NewMockRPCClient creates a new instance of MockRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// newMockRPCClient creates a new instance of mockRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewMockRPCClient[CHAIN_ID types.ID, HEAD Head](t interface { +func newMockRPCClient[CHAIN_ID types.ID, HEAD Head](t interface { mock.TestingT Cleanup(func()) -}) *MockRPCClient[CHAIN_ID, HEAD] { - mock := &MockRPCClient[CHAIN_ID, HEAD]{} +}) *mockRPCClient[CHAIN_ID, HEAD] { + mock := &mockRPCClient[CHAIN_ID, HEAD]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 973d40e6ba7..6b8b9afd758 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -41,46 +41,46 @@ func TestUnit_Node_StateTransitions(t *testing.T) { t.Run("transitionToAlive", func(t *testing.T) { const destinationState = NodeStateAlive allowedStates := []NodeState{NodeStateDialed, NodeStateInvalidChainID, NodeStateSyncing} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) }) t.Run("transitionToInSync", func(t *testing.T) { const destinationState = NodeStateAlive allowedStates := []NodeState{NodeStateOutOfSync, NodeStateSyncing} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) }) t.Run("transitionToOutOfSync", func(t *testing.T) { const destinationState = NodeStateOutOfSync allowedStates := []NodeState{NodeStateAlive} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = NodeStateUnreachable allowedStates := []NodeState{NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = NodeStateInvalidChainID allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = NodeStateSyncing allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID} - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(NodeStateDialed) @@ -92,7 +92,7 @@ func TestUnit_Node_StateTransitions(t *testing.T) { }) } -func testTransition(t *testing.T, rpc *MockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState NodeState, allowedStates ...NodeState) { +func testTransition(t *testing.T, rpc *mockRPCClient[types.ID, Head], transition func(node testNode, fn func()), destinationState NodeState, allowedStates ...NodeState) { node := newTestNode(t, testNodeOpts{rpc: rpc, config: testNodeConfig{nodeIsSyncingEnabled: true}}) for _, allowedState := range allowedStates { m := new(fnMock) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 7f62f540e4a..c59179ca04a 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -41,7 +41,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) node := newDialedNode(t, testNodeOpts{ rpc: rpc, }) @@ -59,7 +59,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) node := newDialedNode(t, testNodeOpts{ @@ -92,7 +92,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { } t.Run("Stays alive and waits for signal", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ @@ -108,7 +108,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 @@ -154,7 +154,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 @@ -181,7 +181,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const pollFailureThreshold = 3 node := newSubscribedNode(t, testNodeOpts{ @@ -207,7 +207,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -245,7 +245,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) const syncThreshold = 10 node := newSubscribedNode(t, testNodeOpts{ @@ -272,7 +272,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -293,7 +293,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() rpc.On("UnsubscribeAllExcept", mock.Anything) node := newSubscribedNode(t, testNodeOpts{ @@ -321,7 +321,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newSubscribedNode(t, testNodeOpts{ @@ -345,7 +345,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("UnsubscribeAllExcept", mock.Anything) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) @@ -374,7 +374,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finality tag is not enabled updates finalized block metric using finality depth and latest head", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() @@ -406,7 +406,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("Logs warning if failed to subscrive to latest finalized blocks", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return(nil).Maybe() sub.On("Unsubscribe") @@ -432,7 +432,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("Logs warning if latest finalized block is not valid", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) sub.On("Unsubscribe") @@ -462,7 +462,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) t.Run("If finality tag is enabled updates latest finalized block metric", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) const expectedBlock = 1101 const finalityDepth = 10 sub := mocks.NewSubscription(t) @@ -523,7 +523,7 @@ func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { } } -func setupRPCForAliveLoop(t *testing.T, rpc *MockRPCClient[types.ID, Head]) { +func setupRPCForAliveLoop(t *testing.T, rpc *mockRPCClient[types.ID, Head]) { rpc.On("Dial", mock.Anything).Return(nil).Maybe() aliveSubscription := mocks.NewSubscription(t) aliveSubscription.On("Err").Return(nil).Maybe() @@ -558,7 +558,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -591,7 +591,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, }) @@ -608,7 +608,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fail to get chainID, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) node := newAliveNode(t, testNodeOpts{ rpc: rpc, }) @@ -629,7 +629,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ @@ -650,7 +650,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if syncing, transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -671,7 +671,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to fetch syncing status, transitions to unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -695,7 +695,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -716,7 +716,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("on subscription termination becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -744,7 +744,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) node := newAliveNode(t, testNodeOpts{ @@ -775,7 +775,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { t.Run("becomes alive if it receives a newer head", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -811,7 +811,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { }) t.Run("becomes alive if there is no other nodes", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -869,7 +869,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed redial, keeps trying", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -886,7 +886,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on failed chainID verification, keep trying", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -906,7 +906,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newAliveNode(t, testNodeOpts{ @@ -926,7 +926,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing status check failure, keeps trying", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newAliveNode(t, testNodeOpts{ @@ -948,7 +948,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on syncing, transitions to syncing state", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -971,7 +971,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -998,7 +998,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newAliveNode(t, testNodeOpts{ rpc: rpc, @@ -1037,7 +1037,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1055,7 +1055,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1079,7 +1079,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on chainID mismatch keeps trying", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) @@ -1102,7 +1102,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newDialedNode(t, testNodeOpts{ @@ -1122,7 +1122,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newDialedNode(t, testNodeOpts{ @@ -1157,7 +1157,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { } t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1177,7 +1177,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1202,7 +1202,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) node := newNode(t, testNodeOpts{ @@ -1223,7 +1223,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("if syncing verification fails, becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newNode(t, testNodeOpts{ @@ -1252,7 +1252,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on isSyncing transitions to syncing", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1274,7 +1274,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1300,7 +1300,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) t.Run("on successful verification without isSyncing becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newNode(t, testNodeOpts{ rpc: rpc, @@ -1485,7 +1485,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1502,7 +1502,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1525,7 +1525,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on chainID mismatch transitions to invalidChainID", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.NewIDFromInt(10) rpcChainID := types.NewIDFromInt(11) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) @@ -1547,7 +1547,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on failed Syncing check - becomes unreachable", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1572,7 +1572,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on IsSyncing - keeps trying", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ @@ -1594,7 +1594,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { }) t.Run("on successful verification becomes alive", func(t *testing.T) { t.Parallel() - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) nodeChainID := types.RandomID() node := newDialedNode(t, testNodeOpts{ rpc: rpc, @@ -1695,7 +1695,7 @@ func TestNode_State(t *testing.T) { } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - rpc := NewMockRPCClient[types.ID, Head](t) + rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(tc.NodeChainInfo, tc.PoolChainInfo).Once() node := newTestNode(t, testNodeOpts{ config: testNodeConfig{ diff --git a/common/client/node_test.go b/common/client/node_test.go index da7885ea45b..87f3b589e12 100644 --- a/common/client/node_test.go +++ b/common/client/node_test.go @@ -68,7 +68,7 @@ type testNodeOpts struct { id int32 chainID types.ID nodeOrder int32 - rpc *MockRPCClient[types.ID, Head] + rpc *mockRPCClient[types.ID, Head] chainFamily string } diff --git a/common/client/types.go b/common/client/types.go index 06f43984ba3..fd0d4e85397 100644 --- a/common/client/types.go +++ b/common/client/types.go @@ -12,7 +12,7 @@ import ( // RPCClient includes all the necessary generalized RPC methods along with any additional chain-specific methods. // -//go:generate mockery --quiet --name RPCClient --structname MockRPCClient --filename "mock_rpc_client_test.go" --inpackage --case=underscore +//go:generate mockery --quiet --name RPCClient --structname mockRPCClient --filename "mock_rpc_client_test.go" --inpackage --case=underscore type RPCClient[ CHAIN_ID types.ID, HEAD Head, From f4ebec02779561172b6c123dcac863d133e662a4 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Thu, 4 Jul 2024 14:30:54 -0400 Subject: [PATCH 53/58] lint --- common/client/node_lifecycle.go | 3 +++ core/chains/evm/client/rpc_client.go | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 1d5e955c6f0..06557ffa8d1 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -145,7 +145,10 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } defer finalizedHeadSub.Unsubscribe() } + + n.stateMu.Lock() n.finalizedBlockSub = finalizedHeadSub + n.stateMu.Unlock() localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 1b77b86f31a..8e0c1829002 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -101,9 +101,6 @@ type RpcClient struct { // close the underlying subscription subs []ethereum.Subscription - // Need to track the aliveLoop subscription, so we do not cancel it when checking lease on the MultiNode - aliveLoopSub ethereum.Subscription - // chStopInFlight can be closed to immediately cancel all in-flight requests on // this RpcClient. Closing and replacing should be serialized through // stateMu since it can happen on state transitions as well as RpcClient Close. From f3e0ec1bade4151dfdaf7b0d516e28e7bb64a151 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 5 Jul 2024 15:11:02 -0400 Subject: [PATCH 54/58] Fix locks and unsubscribing --- common/client/multi_node.go | 14 ---- common/client/node.go | 16 ++++- common/client/node_fsm.go | 8 +-- common/client/node_fsm_test.go | 10 +-- common/client/node_lifecycle.go | 26 +++++--- common/client/node_lifecycle_test.go | 78 +++++++++++------------ core/chains/evm/client/rpc_client.go | 73 ++++++++------------- core/chains/evm/client/rpc_client_test.go | 2 +- 8 files changed, 107 insertions(+), 120 deletions(-) diff --git a/common/client/multi_node.go b/common/client/multi_node.go index 3887edf1b2c..3bfa74c4393 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -129,20 +129,6 @@ func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]NodeState { return states } -// HighestChainInfo - returns highest ChainInfo ever observed by any node in the pool. -func (c *MultiNode[CHAIN_ID, RPC]) HighestChainInfo() ChainInfo { - ch := ChainInfo{ - TotalDifficulty: big.NewInt(0), - } - for _, n := range c.primaryNodes { - _, nodeChainInfo := n.StateAndLatest() - ch.BlockNumber = max(ch.BlockNumber, nodeChainInfo.BlockNumber) - ch.FinalizedBlockNumber = max(ch.FinalizedBlockNumber, nodeChainInfo.FinalizedBlockNumber) - ch.TotalDifficulty = nodeChainInfo.TotalDifficulty - } - return ch -} - // Start starts every node in the pool // // Nodes handle their own redialing and runloops, so this function does not diff --git a/common/client/node.go b/common/client/node.go index d6f71a6718e..300a0be7f95 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -110,7 +110,9 @@ type node[ stopCh services.StopChan // wg waits for subsidiary goroutines - wg sync.WaitGroup + wg sync.WaitGroup + + subsMu sync.Mutex aliveLoopSub types.Subscription finalizedBlockSub types.Subscription } @@ -178,8 +180,18 @@ func (n *node[CHAIN_ID, HEAD, RPC]) RPC() RPC { return n.rpc } +// unsubscribeAllExceptAliveLoop is not thread-safe; it should only be called +// while holding the stateMu lock. +func (n *node[CHAIN_ID, HEAD, RPC]) unsubscribeAllExceptAliveLoop() { + aliveLoopSub := n.aliveLoopSub + finalizedBlockSub := n.finalizedBlockSub + n.rpc.UnsubscribeAllExcept(aliveLoopSub, finalizedBlockSub) +} + func (n *node[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { - n.rpc.UnsubscribeAllExcept(n.aliveLoopSub) + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.unsubscribeAllExceptAliveLoop() } func (n *node[CHAIN_ID, HEAD, RPC]) Close() error { diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go index fc7894a1964..734a15e4be7 100644 --- a/common/client/node_fsm.go +++ b/common/client/node_fsm.go @@ -257,7 +257,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { } switch n.state { case NodeStateAlive: - n.UnsubscribeAllExceptAliveLoop() + n.unsubscribeAllExceptAliveLoop() n.state = NodeStateOutOfSync default: panic(transitionFail(n.state, NodeStateOutOfSync)) @@ -282,7 +282,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { } switch n.state { case NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing: - n.UnsubscribeAllExceptAliveLoop() + n.unsubscribeAllExceptAliveLoop() n.state = NodeStateUnreachable default: panic(transitionFail(n.state, NodeStateUnreachable)) @@ -325,7 +325,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { } switch n.state { case NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing: - n.UnsubscribeAllExceptAliveLoop() + n.unsubscribeAllExceptAliveLoop() n.state = NodeStateInvalidChainID default: panic(transitionFail(n.state, NodeStateInvalidChainID)) @@ -350,7 +350,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) transitionToSyncing(fn func()) { } switch n.state { case NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID: - n.UnsubscribeAllExceptAliveLoop() + n.unsubscribeAllExceptAliveLoop() n.state = NodeStateSyncing default: panic(transitionFail(n.state, NodeStateSyncing)) diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 6b8b9afd758..62a5264b32e 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -55,33 +55,33 @@ func TestUnit_Node_StateTransitions(t *testing.T) { const destinationState = NodeStateOutOfSync allowedStates := []NodeState{NodeStateAlive} rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) }) t.Run("transitionToUnreachable", func(t *testing.T) { const destinationState = NodeStateUnreachable allowedStates := []NodeState{NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID, NodeStateSyncing} rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) }) t.Run("transitionToInvalidChain", func(t *testing.T) { const destinationState = NodeStateInvalidChainID allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateSyncing} rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) }) t.Run("transitionToSyncing", func(t *testing.T) { const destinationState = NodeStateSyncing allowedStates := []NodeState{NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID} rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) testTransition(t, rpc, testNode.transitionToSyncing, destinationState, allowedStates...) }) t.Run("transitionToSyncing panics if nodeIsSyncing is disabled", func(t *testing.T) { rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node := newTestNode(t, testNodeOpts{rpc: rpc}) node.setState(NodeStateDialed) fn := new(fnMock) diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 06557ffa8d1..26307a4f32a 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -104,7 +104,12 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.stateMu.Lock() n.aliveLoopSub = sub n.stateMu.Unlock() - defer sub.Unsubscribe() + defer func() { + defer sub.Unsubscribe() + n.stateMu.Lock() + n.aliveLoopSub = nil + n.stateMu.Unlock() + }() var outOfSyncT *time.Ticker var outOfSyncTC <-chan time.Time @@ -134,8 +139,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { } var finalizedHeadCh <-chan HEAD - var finalizedHeadSub types.Subscription if n.chainCfg.FinalityTagEnabled() { + var finalizedHeadSub types.Subscription lggr.Debugw("Finalized block polling enabled") finalizedHeadCh, finalizedHeadSub, err = n.rpc.SubscribeToFinalizedHeads(ctx) if err != nil { @@ -143,12 +148,17 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.declareUnreachable() return } - defer finalizedHeadSub.Unsubscribe() - } - n.stateMu.Lock() - n.finalizedBlockSub = finalizedHeadSub - n.stateMu.Unlock() + n.stateMu.Lock() + n.finalizedBlockSub = finalizedHeadSub + n.stateMu.Unlock() + defer func() { + finalizedHeadSub.Unsubscribe() + n.stateMu.Lock() + n.finalizedBlockSub = nil + n.stateMu.Unlock() + }() + } localHighestChainInfo, _ := n.rpc.GetInterceptedChainInfo() var pollFailures uint32 @@ -252,13 +262,11 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { continue } - n.stateMu.Lock() latestFinalizedBN := latestFinalized.BlockNumber() if latestFinalizedBN > localHighestChainInfo.FinalizedBlockNumber { promPoolRPCNodeHighestFinalizedBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(latestFinalizedBN)) localHighestChainInfo.FinalizedBlockNumber = latestFinalizedBN } - n.stateMu.Unlock() } } } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index c59179ca04a..17ee6c890f6 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -49,7 +49,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to rpc") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() node.declareAlive() @@ -74,7 +74,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { close(errChan) sub.On("Err").Return((<-chan error)(errChan)).Once() sub.On("Unsubscribe").Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("SubscribeToHeads", mock.Anything).Return(nil, sub, nil).Once() // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() @@ -122,7 +122,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(node.chainID, nil) @@ -172,7 +172,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { // disconnects all on transfer to unreachable // might be called in unreachable loop rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareAlive() tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) tests.AssertEventually(t, func() bool { @@ -221,7 +221,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() rpc.On("Ping", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) const mostRecentBlock = 20 rpc.On("GetInterceptedChainInfo").Return(ChainInfo{BlockNumber: mostRecentBlock}, ChainInfo{BlockNumber: 30}) poolInfo := newMockPoolChainInfoProvider(t) @@ -295,7 +295,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Parallel() rpc := newMockRPCClient[types.ID, Head](t) rpc.On("GetInterceptedChainInfo").Return(ChainInfo{}, ChainInfo{}).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node := newSubscribedNode(t, testNodeOpts{ config: testNodeConfig{}, chainConfig: clientMocks.ChainConfig{ @@ -346,7 +346,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { t.Run("rpc closed head channel", func(t *testing.T) { t.Parallel() rpc := newMockRPCClient[types.ID, Head](t) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) sub.On("Unsubscribe").Once() @@ -414,7 +414,7 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to finalized heads") rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -570,7 +570,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -600,7 +600,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to dial rpc") // might be called again in unreachable loop, so no need to set once rpc.On("Dial", mock.Anything).Return(expectedError) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -618,7 +618,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() // for unreachable rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) expectedError := errors.New("failed to get chain ID") // might be called multiple times rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) @@ -640,7 +640,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { // one for out-of-sync & one for invalid chainID rpc.On("Dial", mock.Anything).Return(nil).Twice() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) // might be called multiple times rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareOutOfSync(stubIsOutOfSync) @@ -661,7 +661,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) // might be called multiple times rpc.On("IsSyncing", mock.Anything).Return(true, nil) node.declareOutOfSync(stubIsOutOfSync) @@ -682,7 +682,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { // one for out-of-sync rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) // for unreachable rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() @@ -708,7 +708,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { expectedError := errors.New("failed to subscribe") rpc.On("SubscribeToHeads", mock.Anything).Return(nil, nil, expectedError).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareOutOfSync(stubIsOutOfSync) tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -728,7 +728,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) sub := mocks.NewSubscription(t) errChan := make(chan error, 1) errChan <- errors.New("subscription was terminate") @@ -756,7 +756,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) sub := mocks.NewSubscription(t) sub.On("Err").Return((<-chan error)(nil)) @@ -787,7 +787,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -833,7 +833,7 @@ func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) outOfSyncSubscription := mocks.NewSubscription(t) outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) @@ -880,7 +880,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareUnreachable() tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; still unreachable", 2) }) @@ -897,7 +897,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) @@ -917,7 +917,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareUnreachable() tests.AssertEventually(t, func() bool { @@ -938,7 +938,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil) @@ -960,7 +960,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) setupRPCForAliveLoop(t, rpc) @@ -987,7 +987,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { sub.On("Err").Return(nil) sub.On("Unsubscribe").Once() rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() setupRPCForAliveLoop(t, rpc) @@ -1008,7 +1008,7 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - rpc.On("UnsubscribeAllExcept", mock.Anything).Once() + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() setupRPCForAliveLoop(t, rpc) @@ -1046,7 +1046,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareInvalidChainID() tests.AssertEventually(t, func() bool { @@ -1069,7 +1069,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { // once for chainID and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareInvalidChainID() tests.AssertLogEventually(t, observedLogs, "Failed to verify chain ID for node") @@ -1092,7 +1092,7 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareInvalidChainID() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) @@ -1188,7 +1188,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, errors.New("failed to get chain id")) @@ -1212,7 +1212,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) // disconnects all on transfer to unreachable err := node.Start(tests.Context(t)) @@ -1235,7 +1235,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { assert.Equal(t, NodeStateDialed, node.State()) }).Return(nodeChainID, nil).Once() @@ -1262,7 +1262,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(true, nil) // disconnects all on transfer to unreachable @@ -1494,7 +1494,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareSyncing() tests.AssertEventually(t, func() bool { return node.State() == NodeStateUnreachable @@ -1513,7 +1513,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id")) - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) // once for syncing and maybe another one for unreachable rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() @@ -1537,7 +1537,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Twice() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) @@ -1563,7 +1563,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("IsSyncing", mock.Anything).Return(false, errors.New("failed to check if syncing")).Once() rpc.On("Dial", mock.Anything).Return(nil).Once() rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareSyncing() tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node synchronization status") tests.AssertEventually(t, func() bool { @@ -1585,7 +1585,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(true, nil) rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) node.declareSyncing() tests.AssertLogCountEventually(t, observedLogs, "Verification failed: Node is syncing", 2) tests.AssertEventually(t, func() bool { @@ -1603,7 +1603,7 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { defer func() { assert.NoError(t, node.close()) }() rpc.On("Dial", mock.Anything).Return(nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything) + rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(true, nil).Once() rpc.On("IsSyncing", mock.Anything).Return(false, nil).Once() diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 8e0c1829002..9301e92d71b 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -147,34 +147,39 @@ func NewRPCClient( return r } -func (r *RpcClient) SubscribeToHeads(ctx context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { - ctx, cancel, chStopInFlight, _, _ := r.acquireQueryCtx(ctx) +func (r *RpcClient) SubscribeToHeads(ctx context.Context) (ch <-chan *evmtypes.Head, sub commontypes.Subscription, err error) { + ctx, cancel, chStopInFlight, ws, _ := r.acquireQueryCtx(ctx) defer cancel() - newChainIDSubForwarder := func(chainID *big.Int, ch chan<- *evmtypes.Head) *subForwarder[*evmtypes.Head] { - return newSubForwarder(ch, func(head *evmtypes.Head) *evmtypes.Head { - head.EVMChainID = ubig.New(chainID) - r.onNewHead(ctx, chStopInFlight, head) - return head - }, r.wrapRPCClientError) - } + args := []interface{}{rpcSubscriptionMethodNewHeads} + start := time.Now() + lggr := r.newRqLggr().With("args", args) - ch := make(chan *evmtypes.Head) - forwarder := newChainIDSubForwarder(r.chainID, ch) + lggr.Debug("RPC call: evmclient.Client#EthSubscribe") + defer func() { + duration := time.Since(start) + r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + err = r.wrapWS(err) + }() - sub, err := r.subscribe(ctx, forwarder.srcCh, rpcSubscriptionMethodNewHeads) + channel := make(chan *evmtypes.Head) + forwarder := newSubForwarder(channel, func(head *evmtypes.Head) *evmtypes.Head { + head.EVMChainID = ubig.New(r.chainID) + r.onNewHead(ctx, chStopInFlight, head) + return head + }, r.wrapRPCClientError) - err = forwarder.start(sub, err) + err = forwarder.start(ws.rpc.EthSubscribe(ctx, forwarder.srcCh, args...)) if err != nil { return nil, nil, err } - err = r.registerSub(forwarder, r.chStopInFlight) + err = r.registerSub(forwarder, chStopInFlight) if err != nil { return nil, nil, err } - return ch, forwarder, err + return channel, forwarder, err } func (r *RpcClient) SubscribeToFinalizedHeads(_ context.Context) (<-chan *evmtypes.Head, commontypes.Subscription, error) { @@ -202,15 +207,14 @@ func (r *RpcClient) Ping(ctx context.Context) error { func (r *RpcClient) UnsubscribeAllExcept(subs ...commontypes.Subscription) { r.stateMu.Lock() defer r.stateMu.Unlock() + + keepSubs := map[commontypes.Subscription]struct{}{} + for _, sub := range subs { + keepSubs[sub] = struct{}{} + } + for _, sub := range r.subs { - var keep bool - for _, s := range subs { - if sub == s { - keep = true - break - } - } - if !keep { + if _, keep := keepSubs[sub]; !keep { sub.Unsubscribe() } } @@ -406,29 +410,6 @@ func (r *RpcClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) err return err } -func (r *RpcClient) subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { - ctx, cancel, ws, _ := r.makeLiveQueryCtxAndSafeGetClients(ctx) - defer cancel() - lggr := r.newRqLggr().With("args", args) - - lggr.Debug("RPC call: evmclient.Client#EthSubscribe") - start := time.Now() - var sub commontypes.Subscription - sub, err := ws.rpc.EthSubscribe(ctx, channel, args...) - if err == nil { - err = r.registerSub(sub, r.chStopInFlight) - if err != nil { - sub.Unsubscribe() - return nil, err - } - } - duration := time.Since(start) - - r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") - - return sub, r.wrapWS(err) -} - // GethClient wrappers func (r *RpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { diff --git a/core/chains/evm/client/rpc_client_test.go b/core/chains/evm/client/rpc_client_test.go index dae625f1897..91651c94210 100644 --- a/core/chains/evm/client/rpc_client_test.go +++ b/core/chains/evm/client/rpc_client_test.go @@ -140,7 +140,7 @@ func TestRPCClient_SubscribeNewHead(t *testing.T) { head := <-ch require.Equal(t, chainId, head.ChainID()) }) - t.Run("Failed SubscribeNewHead returns and logs proper error", func(t *testing.T) { + t.Run("Failed SubscribeToHeads returns and logs proper error", func(t *testing.T) { server := testutils.NewWSServer(t, chainId, func(reqMethod string, reqParams gjson.Result) (resp testutils.JSONRPCResponse) { return resp }) From 62e5f55ca03a0c5e50842c0cf96eb1ae0e4f510b Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Fri, 5 Jul 2024 15:18:33 -0400 Subject: [PATCH 55/58] Update node.go --- common/client/node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/common/client/node.go b/common/client/node.go index 300a0be7f95..7ef0460e538 100644 --- a/common/client/node.go +++ b/common/client/node.go @@ -112,7 +112,6 @@ type node[ // wg waits for subsidiary goroutines wg sync.WaitGroup - subsMu sync.Mutex aliveLoopSub types.Subscription finalizedBlockSub types.Subscription } From 8308ecedac758afc9871bc76019020217ebfa2b9 Mon Sep 17 00:00:00 2001 From: Dmytro Haidashenko Date: Mon, 8 Jul 2024 13:46:42 +0200 Subject: [PATCH 56/58] fixed flaky headtracker tests --- core/chains/evm/headtracker/head_tracker_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go index 0bc82403cac..35fd0e8499c 100644 --- a/core/chains/evm/headtracker/head_tracker_test.go +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -68,7 +68,7 @@ func TestHeadTracker_New(t *testing.T) { } ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Maybe(). - Return(mockEth.NewSub(t), nil) + Return(nil, mockEth.NewSub(t), nil) orm := headtracker.NewORM(*testutils.FixtureChainID, db) assert.Nil(t, orm.IdempotentInsertHead(tests.Context(t), testutils.Head(1))) @@ -243,7 +243,7 @@ func TestHeadTracker_Start(t *testing.T) { ethClient := evmtest.NewEthClientMockWithDefaultChain(t) mockEth := &testutils.MockEth{EthClient: ethClient} sub := mockEth.NewSub(t) - ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Maybe() + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, sub, nil).Maybe() return createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) } t.Run("Starts even if failed to get initialHead", func(t *testing.T) { @@ -271,7 +271,7 @@ func TestHeadTracker_Start(t *testing.T) { head := testutils.Head(1000) ht.ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(head, nil).Once() ht.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, nil).Once() - ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("failed to connect")).Maybe() + ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, nil, errors.New("failed to connect")).Maybe() ht.Start(t) tests.AssertLogEventually(t, ht.observer, "Error handling initial head") }) @@ -286,7 +286,7 @@ func TestHeadTracker_Start(t *testing.T) { ht.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(finalizedHead, nil).Once() // on backfill ht.ethClient.On("LatestFinalizedBlock", mock.Anything).Return(nil, errors.New("backfill call to finalized failed")).Maybe() - ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("failed to connect")).Maybe() + ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, nil, errors.New("failed to connect")).Maybe() ht.Start(t) tests.AssertLogEventually(t, ht.observer, "Loaded chain from DB") }) @@ -300,7 +300,7 @@ func TestHeadTracker_Start(t *testing.T) { require.NoError(t, ht.orm.IdempotentInsertHead(ctx, testutils.Head(finalizedHead.Number-1))) // on backfill ht.ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(nil, errors.New("backfill call to finalized failed")).Maybe() - ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("failed to connect")).Maybe() + ht.ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, nil, errors.New("failed to connect")).Maybe() ht.Start(t) tests.AssertLogEventually(t, ht.observer, "Loaded chain from DB") } From 80ddd26231b86bc547fc6540c5562967806cd7cd Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 8 Jul 2024 09:46:01 -0400 Subject: [PATCH 57/58] Update node_lifecycle_test.go --- common/client/node_lifecycle_test.go | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index 17ee6c890f6..d97625b78e1 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -122,16 +122,12 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) - rpc.On("Dial", mock.Anything).Return(nil) - rpc.On("ChainID", mock.Anything).Return(node.chainID, nil) - pollError := errors.New("failed to get ClientVersion") // 1. Return error several times, but below threshold rpc.On("Ping", mock.Anything).Return(pollError).Run(func(_ mock.Arguments) { // stays healthy while below threshold assert.Equal(t, NodeStateAlive, node.State()) - }).Times(pollFailureThreshold) + }).Times(pollFailureThreshold - 1) // 2. Successful call that is expected to reset counter rpc.On("Ping", mock.Anything).Return(nil).Once() // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState @@ -414,7 +410,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { expectedError := errors.New("failed to subscribe to finalized heads") rpc.On("SubscribeToFinalizedHeads", mock.Anything).Return(nil, sub, expectedError).Once() rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Maybe() - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything) lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) node := newDialedNode(t, testNodeOpts{ config: testNodeConfig{ @@ -980,15 +975,8 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() - rpc.On("UnsubscribeAllExcept", mock.Anything, mock.Anything).Once() - setupRPCForAliveLoop(t, rpc) node.declareUnreachable() @@ -1283,13 +1271,8 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) rpc.On("IsSyncing", mock.Anything).Return(false, nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() setupRPCForAliveLoop(t, rpc) err := node.Start(tests.Context(t)) @@ -1308,13 +1291,7 @@ func TestUnit_NodeLifecycle_start(t *testing.T) { }) defer func() { assert.NoError(t, node.close()) }() - rpc.On("Dial", mock.Anything).Return(nil) rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) - sub := mocks.NewSubscription(t) - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Once() - rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() - setupRPCForAliveLoop(t, rpc) err := node.Start(tests.Context(t)) From 169e44be0a1a830bf81085d856567574cf6495b6 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 8 Jul 2024 10:04:32 -0400 Subject: [PATCH 58/58] Update node_lifecycle_test.go --- common/client/node_lifecycle_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index d97625b78e1..081c8374090 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -86,8 +86,8 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { sub := mocks.NewSubscription(t) sub.On("Err").Return(nil) - sub.On("Unsubscribe") - opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil) + sub.On("Unsubscribe").Once() + opts.rpc.On("SubscribeToHeads", mock.Anything).Return(make(<-chan Head), sub, nil).Once() return newDialedNode(t, opts) } t.Run("Stays alive and waits for signal", func(t *testing.T) {