From 09f8c7fcd63bd4cc11a193b49f808f42e3d1f37a Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Mon, 6 May 2024 18:40:29 +0200 Subject: [PATCH 1/9] [TT-849] Move TestConfig's common parts to CTF (#13046) * use latest Seth * move test config to CTF * use latest CTF that fixes eth2 genesis generation * use latest Seth * use tagged CTF version --- integration-tests/actions/private_network.go | 8 ++--- .../actions/vrf/vrfv2/setup_steps.go | 2 +- .../actions/vrf/vrfv2plus/setup_steps.go | 2 +- .../docker/cmd/internal/commands.go | 2 +- integration-tests/docker/test_env/test_env.go | 31 ++++++++++--------- .../docker/test_env/test_env_builder.go | 14 ++++----- .../docker/test_env/test_env_config.go | 12 +++---- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 +-- .../load/functions/onchain_monitoring.go | 4 +-- integration-tests/load/functions/setup.go | 4 +-- integration-tests/load/go.mod | 4 +-- integration-tests/load/go.sum | 8 ++--- .../migration/upgrade_version_test.go | 2 +- integration-tests/smoke/automation_test.go | 4 +-- integration-tests/smoke/cron_test.go | 4 +-- integration-tests/smoke/flux_test.go | 2 +- integration-tests/smoke/forwarder_ocr_test.go | 2 +- .../smoke/forwarders_ocr2_test.go | 2 +- integration-tests/smoke/keeper_test.go | 2 +- integration-tests/smoke/ocr2_test.go | 2 +- integration-tests/smoke/ocr_test.go | 2 +- integration-tests/smoke/runlog_test.go | 2 +- integration-tests/smoke/vrf_test.go | 2 +- integration-tests/testconfig/testconfig.go | 31 ++----------------- .../testconfig/testconfig_test.go | 16 +++++----- integration-tests/types/testconfigs.go | 19 ++++++------ .../universal/log_poller/helpers.go | 2 +- 28 files changed, 84 insertions(+), 107 deletions(-) diff --git a/integration-tests/actions/private_network.go b/integration-tests/actions/private_network.go index 01a084b66d8..70239a60060 100644 --- a/integration-tests/actions/private_network.go +++ b/integration-tests/actions/private_network.go @@ -3,17 +3,17 @@ package actions import ( "github.com/rs/zerolog" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" ) -func EthereumNetworkConfigFromConfig(l zerolog.Logger, config tc.GlobalTestConfig) (network ctf_test_env.EthereumNetwork, err error) { +func EthereumNetworkConfigFromConfig(l zerolog.Logger, config ctf_config.GlobalTestConfig) (network ctf_test_env.EthereumNetwork, err error) { if config.GetPrivateEthereumNetworkConfig() == nil { l.Warn().Msg("No TOML private ethereum network config found, will use old geth") ethBuilder := ctf_test_env.NewEthereumNetworkBuilder() network, err = ethBuilder. - WithEthereumVersion(ctf_test_env.EthereumVersion_Eth1). - WithExecutionLayer(ctf_test_env.ExecutionLayer_Geth). + WithEthereumVersion(ctf_config.EthereumVersion_Eth1). + WithExecutionLayer(ctf_config.ExecutionLayer_Geth). Build() return diff --git a/integration-tests/actions/vrf/vrfv2/setup_steps.go b/integration-tests/actions/vrf/vrfv2/setup_steps.go index bd41fb33e4e..ca85bdb5f19 100644 --- a/integration-tests/actions/vrf/vrfv2/setup_steps.go +++ b/integration-tests/actions/vrf/vrfv2/setup_steps.go @@ -359,7 +359,7 @@ func SetupVRFV2ForNewEnv( env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&testConfig). - WithPrivateEthereumNetwork(network). + WithPrivateEthereumNetwork(network.EthereumNetworkConfig). WithCLNodes(len(newEnvConfig.NodesToCreate)). WithFunding(big.NewFloat(*testConfig.Common.ChainlinkNodeFunding)). WithCustomCleanup(cleanupFn). diff --git a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go index 0b7be600cc2..ed81935fa2b 100644 --- a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go +++ b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go @@ -402,7 +402,7 @@ func SetupVRFV2PlusForNewEnv( env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&testConfig). - WithPrivateEthereumNetwork(network). + WithPrivateEthereumNetwork(network.EthereumNetworkConfig). WithCLNodes(len(newEnvConfig.NodesToCreate)). WithFunding(big.NewFloat(*testConfig.Common.ChainlinkNodeFunding)). WithCustomCleanup(cleanupFn). diff --git a/integration-tests/docker/cmd/internal/commands.go b/integration-tests/docker/cmd/internal/commands.go index 074cfb8083d..e05e5d89fac 100644 --- a/integration-tests/docker/cmd/internal/commands.go +++ b/integration-tests/docker/cmd/internal/commands.go @@ -43,7 +43,7 @@ var StartNodesCmd = &cobra.Command{ _, err = test_env.NewCLTestEnvBuilder(). WithTestConfig(&config). - WithPrivateEthereumNetwork(network). + WithPrivateEthereumNetwork(network.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(nodeCount). WithoutCleanup(). diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go index fc0ba355556..fd1555ec055 100644 --- a/integration-tests/docker/test_env/test_env.go +++ b/integration-tests/docker/test_env/test_env.go @@ -18,6 +18,7 @@ import ( tc "github.com/testcontainers/testcontainers-go" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" "github.com/smartcontractkit/chainlink-testing-framework/docker" "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" "github.com/smartcontractkit/chainlink-testing-framework/logging" @@ -29,7 +30,6 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" d "github.com/smartcontractkit/chainlink/integration-tests/docker" - core_testconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig" ) var ( @@ -40,7 +40,7 @@ type CLClusterTestEnv struct { Cfg *TestEnvConfig DockerNetwork *tc.DockerNetwork LogStream *logstream.LogStream - TestConfig core_testconfig.GlobalTestConfig + TestConfig ctf_config.GlobalTestConfig /* components */ ClCluster *ClCluster @@ -49,7 +49,7 @@ type CLClusterTestEnv struct { sethClients map[int64]*seth.Client ContractDeployer contracts.ContractDeployer ContractLoader contracts.ContractLoader - PrivateEthereumConfigs []*test_env.EthereumNetwork // new approach to private chains, supporting eth1 and eth2 + PrivateEthereumConfigs []*ctf_config.EthereumNetworkConfig EVMNetworks []*blockchain.EVMNetwork rpcProviders map[int64]*test_env.RpcProvider l zerolog.Logger @@ -95,18 +95,11 @@ func (te *CLClusterTestEnv) ParallelTransactions(enabled bool) { } } -func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *test_env.EthereumNetwork) (blockchain.EVMNetwork, test_env.RpcProvider, error) { +func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *ctf_config.EthereumNetworkConfig) (blockchain.EVMNetwork, test_env.RpcProvider, error) { // if environment is being restored from a previous state, use the existing config // this might fail terribly if temporary folders with chain data on the host machine were removed - if te.Cfg != nil && te.Cfg.EthereumNetwork != nil { - builder := test_env.NewEthereumNetworkBuilder() - c, err := builder.WithExistingConfig(*te.Cfg.EthereumNetwork). - WithTest(te.t). - Build() - if err != nil { - return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err - } - cfg = &c + if te.Cfg != nil && te.Cfg.EthereumNetworkConfig != nil { + cfg = te.Cfg.EthereumNetworkConfig } te.l.Info(). @@ -115,7 +108,15 @@ func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *test_env.EthereumNetwork) Str("Custom Docker Images", fmt.Sprintf("%v", cfg.CustomDockerImages)). Msg("Starting Ethereum network") - n, rpc, err := cfg.Start() + builder := test_env.NewEthereumNetworkBuilder() + c, err := builder.WithExistingConfig(*cfg). + WithTest(te.t). + Build() + if err != nil { + return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err + } + + n, rpc, err := c.Start() if err != nil { return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err @@ -129,7 +130,7 @@ func (te *CLClusterTestEnv) StartMockAdapter() error { } // pass config here -func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count int, secretsConfig string, testconfig core_testconfig.GlobalTestConfig, opts ...ClNodeOption) error { +func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count int, secretsConfig string, testconfig ctf_config.GlobalTestConfig, opts ...ClNodeOption) error { if te.Cfg != nil && te.Cfg.ClCluster != nil { te.ClCluster = te.Cfg.ClCluster } else { diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go index c2aa07c8fa9..c8b4ac8e734 100644 --- a/integration-tests/docker/test_env/test_env_builder.go +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -12,6 +12,7 @@ import ( "github.com/smartcontractkit/seth" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/logstream" @@ -23,7 +24,6 @@ import ( actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" "github.com/smartcontractkit/chainlink/integration-tests/utils" ) @@ -56,8 +56,8 @@ type CLTestEnvBuilder struct { cleanUpCustomFn func() chainOptionsFn []ChainOption evmClientNetworkOption []EVMClientNetworkOption - privateEthereumNetworks []*test_env.EthereumNetwork - testConfig tc.GlobalTestConfig + privateEthereumNetworks []*ctf_config.EthereumNetworkConfig + testConfig ctf_config.GlobalTestConfig /* funding */ ETHFunds *big.Float @@ -120,7 +120,7 @@ func (b *CLTestEnvBuilder) WithCLNodes(clNodesCount int) *CLTestEnvBuilder { return b } -func (b *CLTestEnvBuilder) WithTestConfig(cfg tc.GlobalTestConfig) *CLTestEnvBuilder { +func (b *CLTestEnvBuilder) WithTestConfig(cfg ctf_config.GlobalTestConfig) *CLTestEnvBuilder { b.testConfig = cfg return b } @@ -146,12 +146,12 @@ func (b *CLTestEnvBuilder) WithSeth() *CLTestEnvBuilder { return b } -func (b *CLTestEnvBuilder) WithPrivateEthereumNetwork(en test_env.EthereumNetwork) *CLTestEnvBuilder { +func (b *CLTestEnvBuilder) WithPrivateEthereumNetwork(en ctf_config.EthereumNetworkConfig) *CLTestEnvBuilder { b.privateEthereumNetworks = append(b.privateEthereumNetworks, &en) return b } -func (b *CLTestEnvBuilder) WithPrivateEthereumNetworks(ens []*test_env.EthereumNetwork) *CLTestEnvBuilder { +func (b *CLTestEnvBuilder) WithPrivateEthereumNetworks(ens []*ctf_config.EthereumNetworkConfig) *CLTestEnvBuilder { b.privateEthereumNetworks = ens return b } @@ -300,7 +300,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { if err != nil { return nil, err } - b.privateEthereumNetworks[i] = &netWithLs + b.privateEthereumNetworks[i] = &netWithLs.EthereumNetworkConfig } } diff --git a/integration-tests/docker/test_env/test_env_config.go b/integration-tests/docker/test_env/test_env_config.go index 0902deb0c2d..9aefa9615c9 100644 --- a/integration-tests/docker/test_env/test_env_config.go +++ b/integration-tests/docker/test_env/test_env_config.go @@ -3,16 +3,16 @@ package test_env import ( "encoding/json" - cte "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" env "github.com/smartcontractkit/chainlink/integration-tests/types/envcommon" ) type TestEnvConfig struct { - Networks []string `json:"networks"` - Geth GethConfig `json:"geth"` - MockAdapter MockAdapterConfig `json:"mock_adapter"` - ClCluster *ClCluster `json:"clCluster"` - EthereumNetwork *cte.EthereumNetwork `json:"private_ethereum_config"` + Networks []string `json:"networks"` + Geth GethConfig `json:"geth"` + MockAdapter MockAdapterConfig `json:"mock_adapter"` + ClCluster *ClCluster `json:"clCluster"` + EthereumNetworkConfig *ctf_config.EthereumNetworkConfig `json:"private_ethereum_config"` } type MockAdapterConfig struct { diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 3d76a656be5..babf82a7d96 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -26,7 +26,7 @@ require ( github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.3 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240429120925-907b29311feb - github.com/smartcontractkit/chainlink-testing-framework v1.28.7 + github.com/smartcontractkit/chainlink-testing-framework v1.28.8 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 61fef05bbad..95f686a6d28 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1529,8 +1529,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240422172640-59d47c73ba5 github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240422172640-59d47c73ba58/go.mod h1:oV5gIuSKrPEcjQ6uB6smBsm5kXHxyydVLNyAs4V9CoQ= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240325075535-0f7eb05ee595 h1:y6ks0HsSOhPUueOmTcoxDQ50RCS1XINlRDTemZyHjFw= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240325075535-0f7eb05ee595/go.mod h1:vV6WfnVIbK5Q1JsIru4YcTG0T1uRpLJm6t2BgCnCSsg= -github.com/smartcontractkit/chainlink-testing-framework v1.28.7 h1:Yr93tBl5jVx1cfKywt0C0cbuObDPJ6JIU4FIsZ6bZlM= -github.com/smartcontractkit/chainlink-testing-framework v1.28.7/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= +github.com/smartcontractkit/chainlink-testing-framework v1.28.8 h1:EaxNwB/16wpISzaUn2WJ4bE3TawD3joEekIlQuWNRGo= +github.com/smartcontractkit/chainlink-testing-framework v1.28.8/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo= diff --git a/integration-tests/load/functions/onchain_monitoring.go b/integration-tests/load/functions/onchain_monitoring.go index 12a10ce0042..31ca8752dd3 100644 --- a/integration-tests/load/functions/onchain_monitoring.go +++ b/integration-tests/load/functions/onchain_monitoring.go @@ -7,7 +7,7 @@ import ( "github.com/rs/zerolog/log" "github.com/smartcontractkit/wasp" - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" ) /* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ @@ -25,7 +25,7 @@ type LoadStats struct { Empty uint32 } -func MonitorLoadStats(t *testing.T, ft *FunctionsTest, labels map[string]string, config tc.GlobalTestConfig) { +func MonitorLoadStats(t *testing.T, ft *FunctionsTest, labels map[string]string, config ctf_config.GlobalTestConfig) { go func() { updatedLabels := make(map[string]string) for k, v := range labels { diff --git a/integration-tests/load/functions/setup.go b/integration-tests/load/functions/setup.go index 4e353ff93a9..190dbbd8692 100644 --- a/integration-tests/load/functions/setup.go +++ b/integration-tests/load/functions/setup.go @@ -16,8 +16,8 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/networks" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" "github.com/smartcontractkit/chainlink/integration-tests/contracts" - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" "github.com/smartcontractkit/chainlink/integration-tests/types" "github.com/smartcontractkit/chainlink/integration-tests/utils" chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" @@ -50,7 +50,7 @@ type S4SecretsCfg struct { S4SetPayload string } -func SetupLocalLoadTestEnv(globalConfig tc.GlobalTestConfig, functionsConfig types.FunctionsTestConfig) (*FunctionsTest, error) { +func SetupLocalLoadTestEnv(globalConfig ctf_config.GlobalTestConfig, functionsConfig types.FunctionsTestConfig) (*FunctionsTest, error) { selectedNetwork := networks.MustGetSelectedNetworkConfig(globalConfig.GetNetworkConfig())[0] readSethCfg := globalConfig.GetSethConfig() sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(selectedNetwork, *readSethCfg) diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index c8fde175cab..c179a3619f7 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -17,11 +17,11 @@ require ( github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.3 github.com/smartcontractkit/chainlink-common v0.1.7-0.20240429120925-907b29311feb - github.com/smartcontractkit/chainlink-testing-framework v1.28.7 + github.com/smartcontractkit/chainlink-testing-framework v1.28.8 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8 github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c - github.com/smartcontractkit/seth v0.1.6 + github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 github.com/smartcontractkit/wasp v0.4.6 github.com/stretchr/testify v1.9.0 diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 46646e6b289..2c31f0fa335 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1512,8 +1512,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240422172640-59d47c73ba5 github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240422172640-59d47c73ba58/go.mod h1:oV5gIuSKrPEcjQ6uB6smBsm5kXHxyydVLNyAs4V9CoQ= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240325075535-0f7eb05ee595 h1:y6ks0HsSOhPUueOmTcoxDQ50RCS1XINlRDTemZyHjFw= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240325075535-0f7eb05ee595/go.mod h1:vV6WfnVIbK5Q1JsIru4YcTG0T1uRpLJm6t2BgCnCSsg= -github.com/smartcontractkit/chainlink-testing-framework v1.28.7 h1:Yr93tBl5jVx1cfKywt0C0cbuObDPJ6JIU4FIsZ6bZlM= -github.com/smartcontractkit/chainlink-testing-framework v1.28.7/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= +github.com/smartcontractkit/chainlink-testing-framework v1.28.8 h1:EaxNwB/16wpISzaUn2WJ4bE3TawD3joEekIlQuWNRGo= +github.com/smartcontractkit/chainlink-testing-framework v1.28.8/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240227164431-18a7065e23ea h1:ZdLmNAfKRjH8AYUvjiiDGUgiWQfq/7iNpxyTkvjx/ko= github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240227164431-18a7065e23ea/go.mod h1:gCKC9w6XpNk6jm+XIk2psrkkfxhi421N9NSiFceXW88= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= @@ -1524,8 +1524,8 @@ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c h1:lIyMbTaF2H0Q71vkwZHX/Ew4KF2BxiKhqEXwF8rn+KI= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= -github.com/smartcontractkit/seth v0.1.6 h1:exU96KiKM/gxvp7OR8KkOXnTgbtFNepdhMBvyobFKCw= -github.com/smartcontractkit/seth v0.1.6/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= +github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec h1:BT1loU6TT2YqMenD7XE+aw7IeeTiC25+r1TLKAySVIg= +github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= diff --git a/integration-tests/migration/upgrade_version_test.go b/integration-tests/migration/upgrade_version_test.go index 47761c09e50..f89644eb815 100644 --- a/integration-tests/migration/upgrade_version_test.go +++ b/integration-tests/migration/upgrade_version_test.go @@ -30,7 +30,7 @@ func TestVersionUpgrade(t *testing.T) { WithTestConfig(&config). WithTestInstance(t). WithStandardCleanup(). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithCLNodes(1). WithStandardCleanup(). WithSeth(). diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go index 73a7749c4e1..81d18139122 100644 --- a/integration-tests/smoke/automation_test.go +++ b/integration-tests/smoke/automation_test.go @@ -1160,7 +1160,7 @@ func setupAutomationTestDocker( env, err = test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(automationTestConfig). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithFunding(big.NewFloat(*automationTestConfig.GetCommonConfig().ChainlinkNodeFunding)). WithStandardCleanup(). @@ -1200,7 +1200,7 @@ func setupAutomationTestDocker( env, err = test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(automationTestConfig). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(clNodesCount). WithCLNodeConfig(clNodeConfig). diff --git a/integration-tests/smoke/cron_test.go b/integration-tests/smoke/cron_test.go index 218727b7d66..e281824f0bb 100644 --- a/integration-tests/smoke/cron_test.go +++ b/integration-tests/smoke/cron_test.go @@ -32,7 +32,7 @@ func TestCronBasic(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(1). WithStandardCleanup(). @@ -88,7 +88,7 @@ func TestCronJobReplacement(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(1). WithStandardCleanup(). diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go index 023dd9dae89..4165e9b79b7 100644 --- a/integration-tests/smoke/flux_test.go +++ b/integration-tests/smoke/flux_test.go @@ -39,7 +39,7 @@ func TestFluxBasic(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(3). WithStandardCleanup(). diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go index 5a8e51f871f..1ff132f09ab 100644 --- a/integration-tests/smoke/forwarder_ocr_test.go +++ b/integration-tests/smoke/forwarder_ocr_test.go @@ -35,7 +35,7 @@ func TestForwarderOCRBasic(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithForwarders(). WithCLNodes(6). diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go index ee86e8cc4b6..d3aa9e85ce6 100644 --- a/integration-tests/smoke/forwarders_ocr2_test.go +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -38,7 +38,7 @@ func TestForwarderOCR2Basic(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), node.WithOCR2(), diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go index 7f2183faeac..fbfe4c73c89 100644 --- a/integration-tests/smoke/keeper_test.go +++ b/integration-tests/smoke/keeper_test.go @@ -1166,7 +1166,7 @@ func setupKeeperTest(l zerolog.Logger, t *testing.T, config *tc.TestConfig) ( env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithCLNodes(5). WithCLNodeConfig(clNodeConfig). WithFunding(big.NewFloat(.5)). diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go index d4f7d1e7ffd..d2df0c858c0 100644 --- a/integration-tests/smoke/ocr2_test.go +++ b/integration-tests/smoke/ocr2_test.go @@ -138,7 +138,7 @@ func prepareORCv2SmokeTestEnv(t *testing.T, l zerolog.Logger, firstRoundResult i env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), node.WithOCR2(), diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go index 29e633beb15..bef08493962 100644 --- a/integration-tests/smoke/ocr_test.go +++ b/integration-tests/smoke/ocr_test.go @@ -91,7 +91,7 @@ func prepareORCv1SmokeTestEnv(t *testing.T, l zerolog.Logger, firstRoundResult i env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(network). + WithPrivateEthereumNetwork(network.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(6). WithFunding(big.NewFloat(.5)). diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go index d255fe07235..b01c5a019b1 100644 --- a/integration-tests/smoke/runlog_test.go +++ b/integration-tests/smoke/runlog_test.go @@ -36,7 +36,7 @@ func TestRunLogBasic(t *testing.T) { env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithMockAdapter(). WithCLNodes(1). WithFunding(big.NewFloat(.1)). diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go index 3a28c14be00..ed8f756396f 100644 --- a/integration-tests/smoke/vrf_test.go +++ b/integration-tests/smoke/vrf_test.go @@ -192,7 +192,7 @@ func prepareVRFtestEnv(t *testing.T, l zerolog.Logger) (*test_env.CLClusterTestE env, err := test_env.NewCLTestEnvBuilder(). WithTestInstance(t). WithTestConfig(&config). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithCLNodes(1). WithFunding(big.NewFloat(.1)). WithStandardCleanup(). diff --git a/integration-tests/testconfig/testconfig.go b/integration-tests/testconfig/testconfig.go index 30a795e1881..fbaf80a2c88 100644 --- a/integration-tests/testconfig/testconfig.go +++ b/integration-tests/testconfig/testconfig.go @@ -19,8 +19,6 @@ import ( "github.com/smartcontractkit/seth" ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" - "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" - ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" k8s_config "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/utils/osutil" @@ -35,15 +33,6 @@ import ( vrfv2plus_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/vrfv2plus" ) -type GlobalTestConfig interface { - GetChainlinkImageConfig() *ctf_config.ChainlinkImageConfig - GetLoggingConfig() *ctf_config.LoggingConfig - GetNetworkConfig() *ctf_config.NetworkConfig - GetPrivateEthereumNetworkConfig() *test_env.EthereumNetwork - GetPyroscopeConfig() *ctf_config.PyroscopeConfig - SethConfig -} - type UpgradeableChainlinkTestConfig interface { GetChainlinkUpgradeImageConfig() *ctf_config.ChainlinkImageConfig } @@ -80,24 +69,8 @@ type Ocr2TestConfig interface { GetOCR2Config() *ocr2_config.Config } -type NamedConfiguration interface { - GetConfigurationName() string -} - -type SethConfig interface { - GetSethConfig() *seth.Config -} - type TestConfig struct { - ChainlinkImage *ctf_config.ChainlinkImageConfig `toml:"ChainlinkImage"` - ChainlinkUpgradeImage *ctf_config.ChainlinkImageConfig `toml:"ChainlinkUpgradeImage"` - Logging *ctf_config.LoggingConfig `toml:"Logging"` - Network *ctf_config.NetworkConfig `toml:"Network"` - Pyroscope *ctf_config.PyroscopeConfig `toml:"Pyroscope"` - PrivateEthereumNetwork *ctf_test_env.EthereumNetwork `toml:"PrivateEthereumNetwork"` - WaspConfig *ctf_config.WaspAutoBuildConfig `toml:"WaspAutoBuild"` - - Seth *seth.Config `toml:"Seth"` + ctf_config.TestConfig Common *Common `toml:"Common"` Automation *a_config.Config `toml:"Automation"` @@ -182,7 +155,7 @@ func (c TestConfig) GetChainlinkImageConfig() *ctf_config.ChainlinkImageConfig { return c.ChainlinkImage } -func (c TestConfig) GetPrivateEthereumNetworkConfig() *ctf_test_env.EthereumNetwork { +func (c TestConfig) GetPrivateEthereumNetworkConfig() *ctf_config.EthereumNetworkConfig { return c.PrivateEthereumNetwork } diff --git a/integration-tests/testconfig/testconfig_test.go b/integration-tests/testconfig/testconfig_test.go index 4a9dbdaade3..fd5230dac2d 100644 --- a/integration-tests/testconfig/testconfig_test.go +++ b/integration-tests/testconfig/testconfig_test.go @@ -57,13 +57,15 @@ func TestBase64ConfigRead(t *testing.T) { }, }, }, - Network: &ctf_config.NetworkConfig{ - SelectedNetworks: []string{"OPTIMISM_GOERLI"}, - RpcHttpUrls: map[string][]string{ - "OPTIMISM_GOERLI": {"http://localhost:8545"}, - }, - WalletKeys: map[string][]string{ - "OPTIMISM_GOERLI": {"0x3333333333333333333333333333333333333333"}, + TestConfig: ctf_config.TestConfig{ + Network: &ctf_config.NetworkConfig{ + SelectedNetworks: []string{"OPTIMISM_GOERLI"}, + RpcHttpUrls: map[string][]string{ + "OPTIMISM_GOERLI": {"http://localhost:8545"}, + }, + WalletKeys: map[string][]string{ + "OPTIMISM_GOERLI": {"0x3333333333333333333333333333333333333333"}, + }, }, }, } diff --git a/integration-tests/types/testconfigs.go b/integration-tests/types/testconfigs.go index cfebf0a3c7a..58eb1a7c8cf 100644 --- a/integration-tests/types/testconfigs.go +++ b/integration-tests/types/testconfigs.go @@ -1,52 +1,53 @@ package types import ( + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" "github.com/smartcontractkit/chainlink-testing-framework/testreporters" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" ) type VRFv2TestConfig interface { tc.CommonTestConfig - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.VRFv2TestConfig } type VRFv2PlusTestConfig interface { tc.CommonTestConfig - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.VRFv2PlusTestConfig } type FunctionsTestConfig interface { tc.CommonTestConfig - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.FunctionsTestConfig } type AutomationTestConfig interface { - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.CommonTestConfig tc.UpgradeableChainlinkTestConfig tc.AutomationTestConfig } type KeeperBenchmarkTestConfig interface { - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.CommonTestConfig tc.KeeperTestConfig - tc.NamedConfiguration + ctf_config.NamedConfiguration testreporters.GrafanaURLProvider } type OcrTestConfig interface { - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.CommonTestConfig tc.OcrTestConfig - tc.SethConfig + ctf_config.SethConfig } type Ocr2TestConfig interface { - tc.GlobalTestConfig + ctf_config.GlobalTestConfig tc.CommonTestConfig tc.Ocr2TestConfig } diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index 4759818d11c..fe3732cb47b 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -1131,7 +1131,7 @@ func SetupLogPollerTestDocker( env, err = test_env.NewCLTestEnvBuilder(). WithTestConfig(testConfig). WithTestInstance(t). - WithPrivateEthereumNetwork(privateNetwork). + WithPrivateEthereumNetwork(privateNetwork.EthereumNetworkConfig). WithCLNodes(clNodesCount). WithCLNodeConfig(clNodeConfig). WithFunding(big.NewFloat(chainlinkNodeFunding)). From 0955d4657113e3e069429391783bd15bf92040b3 Mon Sep 17 00:00:00 2001 From: Cedric Date: Mon, 6 May 2024 17:49:09 +0100 Subject: [PATCH 2/9] [KS-90] Add database-backed store (#13045) * [KS-90] Add database-backed store * [KS-90] Add database-backed store --- core/services/chainlink/application.go | 4 + core/services/workflows/delegate.go | 7 +- core/services/workflows/engine.go | 236 +++++++---- core/services/workflows/engine_test.go | 258 +++++++++--- core/services/workflows/models.go | 3 +- core/services/workflows/state.go | 81 ++-- core/services/workflows/state_test.go | 109 ++--- core/services/workflows/store.go | 70 ---- core/services/workflows/store/models.go | 41 ++ core/services/workflows/store/store.go | 16 + core/services/workflows/store/store_db.go | 382 ++++++++++++++++++ .../services/workflows/store/store_db_test.go | 215 ++++++++++ core/services/workflows/store/store_memory.go | 86 ++++ .../migrations/0235_add_workflow_models.sql | 47 +++ 14 files changed, 1249 insertions(+), 306 deletions(-) delete mode 100644 core/services/workflows/store.go create mode 100644 core/services/workflows/store/models.go create mode 100644 core/services/workflows/store/store.go create mode 100644 core/services/workflows/store/store_db.go create mode 100644 core/services/workflows/store/store_db_test.go create mode 100644 core/services/workflows/store/store_memory.go create mode 100644 core/store/migrate/migrations/0235_add_workflow_models.sql diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index ae3db2e7a73..ef4b0d870dd 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/google/uuid" "github.com/grafana/pyroscope-go" + "github.com/jonboulle/clockwork" "github.com/pkg/errors" "go.uber.org/multierr" "go.uber.org/zap/zapcore" @@ -63,6 +64,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/vrf" "github.com/smartcontractkit/chainlink/v2/core/services/webhook" "github.com/smartcontractkit/chainlink/v2/core/services/workflows" + workflowstore "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" "github.com/smartcontractkit/chainlink/v2/core/sessions" "github.com/smartcontractkit/chainlink/v2/core/sessions/ldapauth" "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth" @@ -319,6 +321,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { jobORM = job.NewORM(opts.DS, pipelineORM, bridgeORM, keyStore, globalLogger) txmORM = txmgr.NewTxStore(opts.DS, globalLogger) streamRegistry = streams.NewRegistry(globalLogger, pipelineRunner) + workflowORM = workflowstore.NewDBStore(opts.DS, clockwork.NewRealClock()) ) for _, chain := range legacyEVMChains.Slice() { @@ -388,6 +391,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) { globalLogger, registry, legacyEVMChains, + workflowORM, func() *p2ptypes.PeerID { if externalPeerWrapper == nil { return nil diff --git a/core/services/workflows/delegate.go b/core/services/workflows/delegate.go index 8dc440da477..9db802f9a2f 100644 --- a/core/services/workflows/delegate.go +++ b/core/services/workflows/delegate.go @@ -15,6 +15,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/job" p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" ) type Delegate struct { @@ -22,6 +23,7 @@ type Delegate struct { logger logger.Logger legacyEVMChains legacyevm.LegacyChainContainer peerID func() *p2ptypes.PeerID + store store.Store } var _ job.Delegate = (*Delegate)(nil) @@ -58,6 +60,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, spec job.Job) ([]job.Ser Registry: d.registry, DONInfo: dinfo, PeerID: d.peerID, + Store: d.store, } engine, err := NewEngine(cfg) if err != nil { @@ -103,8 +106,8 @@ func initializeDONInfo(lggr logger.Logger) (*capabilities.DON, error) { }, nil } -func NewDelegate(logger logger.Logger, registry core.CapabilitiesRegistry, legacyEVMChains legacyevm.LegacyChainContainer, peerID func() *p2ptypes.PeerID) *Delegate { - return &Delegate{logger: logger, registry: registry, legacyEVMChains: legacyEVMChains, peerID: peerID} +func NewDelegate(logger logger.Logger, registry core.CapabilitiesRegistry, legacyEVMChains legacyevm.LegacyChainContainer, store store.Store, peerID func() *p2ptypes.PeerID) *Delegate { + return &Delegate{logger: logger, registry: registry, legacyEVMChains: legacyEVMChains, store: store, peerID: peerID} } func ValidatedWorkflowSpec(tomlString string) (job.Job, error) { diff --git a/core/services/workflows/engine.go b/core/services/workflows/engine.go index e405102e123..292ad9c6468 100644 --- a/core/services/workflows/engine.go +++ b/core/services/workflows/engine.go @@ -8,18 +8,20 @@ import ( "sync" "time" + "github.com/jonboulle/clockwork" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink-common/pkg/types/core" "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink/v2/core/logger" p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" ) const ( // NOTE: max 32 bytes per ID - consider enforcing exactly 32 bytes? - mockedTriggerID = "cccccccccc0000000000000000000000" - mockedWorkflowID = "15c631d295ef5e32deb99a10ee6804bc4af1385568f9b3363f6552ac6dbb2cef" + mockedTriggerID = "cccccccccc0000000000000000000000" ) type donInfo struct { @@ -30,18 +32,19 @@ type donInfo struct { // Engine handles the lifecycle of a single workflow and its executions. type Engine struct { services.StateMachine - logger logger.Logger - registry core.CapabilitiesRegistry - workflow *workflow - donInfo donInfo - executionStates *inMemoryStore - pendingStepRequests chan stepRequest - triggerEvents chan capabilities.CapabilityResponse - newWorkerCh chan struct{} - stepUpdateCh chan stepState - wg sync.WaitGroup - stopCh services.StopChan - newWorkerTimeout time.Duration + logger logger.Logger + registry core.CapabilitiesRegistry + workflow *workflow + donInfo donInfo + executionStates store.Store + pendingStepRequests chan stepRequest + triggerEvents chan capabilities.CapabilityResponse + newWorkerCh chan struct{} + stepUpdateCh chan store.WorkflowExecutionStep + wg sync.WaitGroup + stopCh services.StopChan + newWorkerTimeout time.Duration + maxExecutionDuration time.Duration // testing lifecycle hook to signal when an execution is finished. onExecutionFinished func(string) @@ -53,6 +56,8 @@ type Engine struct { // Used for testing to control the retry interval // when initializing the engine. retryMs int + + clock clockwork.Clock } func (e *Engine) Start(ctx context.Context) error { @@ -183,7 +188,13 @@ func (e *Engine) init(ctx context.Context) { return } - e.logger.Debug("capabilities resolved, registering triggers") + e.logger.Debug("capabilities resolved, resuming in-progress workflows") + err := e.resumeInProgressExecutions(ctx) + if err != nil { + e.logger.Errorf("failed to resume workflows: %w", err) + } + + e.logger.Debug("registering triggers") for _, t := range e.workflow.triggers { err := e.registerTrigger(ctx, t) if err != nil { @@ -195,6 +206,55 @@ func (e *Engine) init(ctx context.Context) { e.afterInit(true) } +var ( + defaultOffset, defaultLimit = 0, 1_000 +) + +func (e *Engine) resumeInProgressExecutions(ctx context.Context) error { + wipExecutions, err := e.executionStates.GetUnfinished(ctx, defaultOffset, defaultLimit) + if err != nil { + return err + } + + // TODO: paginate properly + if len(wipExecutions) >= defaultLimit { + e.logger.Warnf("possible execution overflow during resumption") + } + + // Cache the dependents associated with a step. + // We may have to reprocess many executions, but should only + // need to calculate the dependents of a step once since + // they won't change. + refToDeps := map[string][]*step{} + for _, execution := range wipExecutions { + for _, step := range execution.Steps { + // NOTE: In order to determine what tasks need to be enqueued, + // we look at any completed steps, and for each dependent, + // check if they are ready to be enqueued. + // This will also handle an execution that has stalled immediately on creation, + // since we always create an execution with an initially completed trigger step. + if step.Status != store.StatusCompleted { + continue + } + + sds, ok := refToDeps[step.Ref] + if !ok { + s, err := e.workflow.dependents(step.Ref) + if err != nil { + return err + } + + sds = s + } + + for _, sd := range sds { + e.queueIfReady(execution, sd) + } + } + } + return nil +} + // initializeExecutionStrategy for `step`. // Broadly speaking, we'll use `immediateExecution` for non-target steps // and `scheduledExecution` for targets. If we don't have the necessary @@ -341,12 +401,12 @@ func (e *Engine) loop(ctx context.Context) { // Wait for a new worker to be available before dispatching a new one. // We'll do this up to newWorkerTimeout. If this expires, we'll put the // message back on the queue and keep going. - t := time.NewTimer(e.newWorkerTimeout) + t := e.clock.NewTimer(e.newWorkerTimeout) select { case <-e.newWorkerCh: e.wg.Add(1) go e.workerForStepRequest(ctx, pendingStepRequest) - case <-t.C: + case <-t.Chan(): e.logger.Errorf("timed out when spinning off worker for pending step request %+v", pendingStepRequest) e.pendingStepRequests <- pendingStepRequest } @@ -379,21 +439,23 @@ func generateExecutionID(workflowID, eventID string) (string, error) { // startExecution kicks off a new workflow execution when a trigger event is received. func (e *Engine) startExecution(ctx context.Context, executionID string, event values.Value) error { e.logger.Debugw("executing on a trigger event", "event", event, "executionID", executionID) - ec := &executionState{ - steps: map[string]*stepState{ + ec := &store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ keywordTrigger: { - outputs: &stepOutput{ - value: event, + Outputs: &store.StepOutput{ + Value: event, }, - status: statusCompleted, + Status: store.StatusCompleted, + ExecutionID: executionID, + Ref: keywordTrigger, }, }, - workflowID: e.workflow.id, - executionID: executionID, - status: statusStarted, + WorkflowID: e.workflow.id, + ExecutionID: executionID, + Status: store.StatusStarted, } - err := e.executionStates.add(ctx, ec) + err := e.executionStates.Add(ctx, ec) if err != nil { return err } @@ -413,26 +475,25 @@ func (e *Engine) startExecution(ctx context.Context, executionID string, event v return nil } -func (e *Engine) handleStepUpdate(ctx context.Context, stepUpdate stepState) error { - state, err := e.executionStates.updateStep(ctx, &stepUpdate) +func (e *Engine) handleStepUpdate(ctx context.Context, stepUpdate store.WorkflowExecutionStep) error { + state, err := e.executionStates.UpsertStep(ctx, &stepUpdate) if err != nil { return err } - switch stepUpdate.status { - case statusCompleted: - stepDependents, err := e.workflow.dependents(stepUpdate.ref) + switch stepUpdate.Status { + case store.StatusCompleted: + stepDependents, err := e.workflow.dependents(stepUpdate.Ref) if err != nil { return err } // There are no steps left to process in the current path, so let's check if // we've completed the workflow. - // If not, we'll check for any dependents that are ready to process. if len(stepDependents) == 0 { workflowCompleted := true err := e.workflow.walkDo(keywordTrigger, func(s *step) error { - step, ok := state.steps[s.Ref] + step, ok := state.Steps[s.Ref] // The step is missing from the state, // which means it hasn't been processed yet. // Let's mark `workflowCompleted` = false, and @@ -442,8 +503,8 @@ func (e *Engine) handleStepUpdate(ctx context.Context, stepUpdate stepState) err return nil } - switch step.status { - case statusCompleted, statusErrored: + switch step.Status { + case store.StatusCompleted, store.StatusErrored: default: workflowCompleted = false } @@ -454,18 +515,23 @@ func (e *Engine) handleStepUpdate(ctx context.Context, stepUpdate stepState) err } if workflowCompleted { - err := e.finishExecution(ctx, state.executionID, statusCompleted) - if err != nil { - return err - } + return e.finishExecution(ctx, state.ExecutionID, store.StatusCompleted) } } + // We haven't completed the workflow, but should we continue? + // If we've been executing for too long, let's time the workflow out and stop here. + if state.CreatedAt != nil && e.clock.Since(*state.CreatedAt) > e.maxExecutionDuration { + return e.finishExecution(ctx, state.ExecutionID, store.StatusTimeout) + } + + // Finally, since the workflow hasn't timed out or completed, let's + // check for any dependents that are ready to process. for _, sd := range stepDependents { e.queueIfReady(state, sd) } - case statusErrored: - err := e.finishExecution(ctx, state.executionID, statusErrored) + case store.StatusErrored: + err := e.finishExecution(ctx, state.ExecutionID, store.StatusErrored) if err != nil { return err } @@ -474,11 +540,11 @@ func (e *Engine) handleStepUpdate(ctx context.Context, stepUpdate stepState) err return nil } -func (e *Engine) queueIfReady(state executionState, step *step) { +func (e *Engine) queueIfReady(state store.WorkflowExecution, step *step) { // Check if all dependencies are completed for the current step var waitingOnDependencies bool for _, dr := range step.dependencies { - stepState, ok := state.steps[dr] + stepState, ok := state.Steps[dr] if !ok { waitingOnDependencies = true continue @@ -489,7 +555,7 @@ func (e *Engine) queueIfReady(state executionState, step *step) { // This includes cases where one of the dependent // steps has errored, since that means we shouldn't // schedule the step for execution. - if stepState.status != statusCompleted { + if stepState.Status != store.StatusCompleted { waitingOnDependencies = true } } @@ -506,7 +572,7 @@ func (e *Engine) queueIfReady(state executionState, step *step) { func (e *Engine) finishExecution(ctx context.Context, executionID string, status string) error { e.logger.Infow("finishing execution", "executionID", executionID, "status", status) - err := e.executionStates.updateStatus(ctx, executionID, status) + err := e.executionStates.UpdateStatus(ctx, executionID, status) if err != nil { return err } @@ -521,27 +587,27 @@ func (e *Engine) workerForStepRequest(ctx context.Context, msg stepRequest) { // Instantiate a child logger; in addition to the WorkflowID field the workflow // logger will already have, this adds the `stepRef` and `executionID` - l := e.logger.With("stepRef", msg.stepRef, "executionID", msg.state.executionID) + l := e.logger.With("stepRef", msg.stepRef, "executionID", msg.state.ExecutionID) l.Debugw("executing on a step event") - stepState := &stepState{ - outputs: &stepOutput{}, - executionID: msg.state.executionID, - ref: msg.stepRef, + stepState := &store.WorkflowExecutionStep{ + Outputs: &store.StepOutput{}, + ExecutionID: msg.state.ExecutionID, + Ref: msg.stepRef, } inputs, outputs, err := e.executeStep(ctx, l, msg) if err != nil { l.Errorf("error executing step request: %s", err) - stepState.outputs.err = err - stepState.status = statusErrored + stepState.Outputs.Err = err + stepState.Status = store.StatusErrored } else { l.Infow("step executed successfully", "outputs", outputs) - stepState.outputs.value = outputs - stepState.status = statusCompleted + stepState.Outputs.Value = outputs + stepState.Status = store.StatusCompleted } - stepState.inputs = inputs + stepState.Inputs = inputs // Let's try and emit the stepUpdate. // If the context is canceled, we'll just drop the update. @@ -577,8 +643,8 @@ func (e *Engine) executeStep(ctx context.Context, l logger.Logger, msg stepReque Inputs: inputs, Config: step.config, Metadata: capabilities.RequestMetadata{ - WorkflowID: msg.state.workflowID, - WorkflowExecutionID: msg.state.executionID, + WorkflowID: msg.state.WorkflowID, + WorkflowExecutionID: msg.state.ExecutionID, }, } @@ -670,27 +736,31 @@ func (e *Engine) Close() error { } type Config struct { - Spec string - WorkflowID string - Lggr logger.Logger - Registry core.CapabilitiesRegistry - MaxWorkerLimit int - QueueSize int - NewWorkerTimeout time.Duration - DONInfo *capabilities.DON - PeerID func() *p2ptypes.PeerID + Spec string + WorkflowID string + Lggr logger.Logger + Registry core.CapabilitiesRegistry + MaxWorkerLimit int + QueueSize int + NewWorkerTimeout time.Duration + MaxExecutionDuration time.Duration + DONInfo *capabilities.DON + PeerID func() *p2ptypes.PeerID + Store store.Store // For testing purposes only maxRetries int retryMs int afterInit func(success bool) onExecutionFinished func(weid string) + clock clockwork.Clock } const ( - defaultWorkerLimit = 100 - defaultQueueSize = 100000 - defaultNewWorkerTimeout = 2 * time.Second + defaultWorkerLimit = 100 + defaultQueueSize = 100000 + defaultNewWorkerTimeout = 2 * time.Second + defaultMaxExecutionDuration = 10 * time.Minute ) func NewEngine(cfg Config) (engine *Engine, err error) { @@ -706,6 +776,14 @@ func NewEngine(cfg Config) (engine *Engine, err error) { cfg.NewWorkerTimeout = defaultNewWorkerTimeout } + if cfg.MaxExecutionDuration == 0 { + cfg.MaxExecutionDuration = defaultMaxExecutionDuration + } + + if cfg.Store == nil { + cfg.Store = store.NewInMemoryStore() + } + if cfg.retryMs == 0 { cfg.retryMs = 5000 } @@ -718,6 +796,10 @@ func NewEngine(cfg Config) (engine *Engine, err error) { cfg.onExecutionFinished = func(weid string) {} } + if cfg.clock == nil { + cfg.clock = clockwork.NewRealClock() + } + // TODO: validation of the workflow spec // We'll need to check, among other things: // - that there are no step `ref` called `trigger` as this is reserved for any triggers @@ -747,18 +829,20 @@ func NewEngine(cfg Config) (engine *Engine, err error) { DON: cfg.DONInfo, PeerID: cfg.PeerID, }, - executionStates: newInMemoryStore(), - pendingStepRequests: make(chan stepRequest, cfg.QueueSize), - newWorkerCh: newWorkerCh, - stepUpdateCh: make(chan stepState), - triggerEvents: make(chan capabilities.CapabilityResponse), - stopCh: make(chan struct{}), - newWorkerTimeout: cfg.NewWorkerTimeout, + executionStates: cfg.Store, + pendingStepRequests: make(chan stepRequest, cfg.QueueSize), + newWorkerCh: newWorkerCh, + stepUpdateCh: make(chan store.WorkflowExecutionStep), + triggerEvents: make(chan capabilities.CapabilityResponse), + stopCh: make(chan struct{}), + newWorkerTimeout: cfg.NewWorkerTimeout, + maxExecutionDuration: cfg.MaxExecutionDuration, onExecutionFinished: cfg.onExecutionFinished, afterInit: cfg.afterInit, maxRetries: cfg.maxRetries, retryMs: cfg.retryMs, + clock: cfg.clock, } return engine, nil } diff --git a/core/services/workflows/engine_test.go b/core/services/workflows/engine_test.go index ff4c5682129..212ad37367e 100644 --- a/core/services/workflows/engine_test.go +++ b/core/services/workflows/engine_test.go @@ -4,7 +4,9 @@ import ( "context" "errors" "testing" + "time" + "github.com/jonboulle/clockwork" "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -13,8 +15,10 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/values" coreCap "github.com/smartcontractkit/chainlink/v2/core/capabilities" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" ) const hardcodedWorkflow = ` @@ -71,7 +75,7 @@ type testHooks struct { } // newTestEngine creates a new engine with some test defaults. -func newTestEngine(t *testing.T, reg *coreCap.Registry, spec string) (*Engine, *testHooks) { +func newTestEngine(t *testing.T, reg *coreCap.Registry, spec string, opts ...func(c *Config)) (*Engine, *testHooks) { peerID := p2ptypes.PeerID{} initFailed := make(chan struct{}) executionFinished := make(chan string, 100) @@ -91,6 +95,10 @@ func newTestEngine(t *testing.T, reg *coreCap.Registry, spec string) (*Engine, * onExecutionFinished: func(weid string) { executionFinished <- weid }, + clock: clockwork.NewFakeClock(), + } + for _, o := range opts { + o(&cfg) } eng, err := NewEngine(cfg) require.NoError(t, err) @@ -152,14 +160,16 @@ func (m *mockCapability) UnregisterFromWorkflow(ctx context.Context, request cap type mockTriggerCapability struct { capabilities.CapabilityInfo - triggerEvent capabilities.CapabilityResponse + triggerEvent *capabilities.CapabilityResponse ch chan capabilities.CapabilityResponse } var _ capabilities.TriggerCapability = (*mockTriggerCapability)(nil) func (m *mockTriggerCapability) RegisterTrigger(ctx context.Context, req capabilities.CapabilityRequest) (<-chan capabilities.CapabilityResponse, error) { - m.ch <- m.triggerEvent + if m.triggerEvent != nil { + m.ch <- *m.triggerEvent + } return m.ch, nil } @@ -169,47 +179,70 @@ func (m *mockTriggerCapability) UnregisterTrigger(ctx context.Context, req capab func TestEngineWithHardcodedWorkflow(t *testing.T) { t.Parallel() - ctx := testutils.Context(t) - reg := coreCap.NewRegistry(logger.TestLogger(t)) - trigger, cr := mockTrigger(t) - - require.NoError(t, reg.Add(ctx, trigger)) - require.NoError(t, reg.Add(ctx, mockConsensus())) - target1 := mockTarget() - require.NoError(t, reg.Add(ctx, target1)) - - target2 := newMockCapability( - capabilities.MustNewCapabilityInfo( - "write_ethereum-testnet-sepolia", - capabilities.CapabilityTypeTarget, - "a write capability targeting ethereum sepolia testnet", - "v1.0.0", - nil, - ), - func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { - m := req.Inputs.Underlying["report"].(*values.Map) - return capabilities.CapabilityResponse{ - Value: m, - }, nil + testCases := []struct { + name string + store store.Store + }{ + { + name: "db-engine", + store: store.NewDBStore(pgtest.NewSqlxDB(t), clockwork.NewFakeClock()), }, - ) - require.NoError(t, reg.Add(ctx, target2)) - - eng, hooks := newTestEngine(t, reg, hardcodedWorkflow) - - err := eng.Start(ctx) - require.NoError(t, err) - defer eng.Close() - - eid := getExecutionId(t, eng, hooks) - assert.Equal(t, cr, <-target1.response) - assert.Equal(t, cr, <-target2.response) - - state, err := eng.executionStates.get(ctx, eid) - require.NoError(t, err) - - assert.Equal(t, state.status, statusCompleted) + { + name: "in-memory-engine", + store: store.NewInMemoryStore(), + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := testutils.Context(t) + reg := coreCap.NewRegistry(logger.TestLogger(t)) + + trigger, cr := mockTrigger(t) + + require.NoError(t, reg.Add(ctx, trigger)) + require.NoError(t, reg.Add(ctx, mockConsensus())) + target1 := mockTarget() + require.NoError(t, reg.Add(ctx, target1)) + + target2 := newMockCapability( + capabilities.MustNewCapabilityInfo( + "write_ethereum-testnet-sepolia", + capabilities.CapabilityTypeTarget, + "a write capability targeting ethereum sepolia testnet", + "v1.0.0", + nil, + ), + func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { + m := req.Inputs.Underlying["report"].(*values.Map) + return capabilities.CapabilityResponse{ + Value: m, + }, nil + }, + ) + require.NoError(t, reg.Add(ctx, target2)) + + eng, testHooks := newTestEngine( + t, + reg, + hardcodedWorkflow, + func(c *Config) { c.Store = tc.store }, + ) + + err := eng.Start(ctx) + require.NoError(t, err) + defer eng.Close() + + eid := getExecutionId(t, eng, testHooks) + assert.Equal(t, cr, <-target1.response) + assert.Equal(t, cr, <-target2.response) + + state, err := eng.executionStates.Get(ctx, eid) + require.NoError(t, err) + + assert.Equal(t, state.Status, store.StatusCompleted) + }) + } } const ( @@ -275,10 +308,24 @@ func mockTrigger(t *testing.T) (capabilities.TriggerCapability, capabilities.Cap cr := capabilities.CapabilityResponse{ Value: resp, } - mt.triggerEvent = cr + mt.triggerEvent = &cr return mt, cr } +func mockNoopTrigger(t *testing.T) capabilities.TriggerCapability { + mt := &mockTriggerCapability{ + CapabilityInfo: capabilities.MustNewCapabilityInfo( + "mercury-trigger", + capabilities.CapabilityTypeTrigger, + "issues a trigger when a mercury report is received.", + "v1.0.0", + nil, + ), + ch: make(chan capabilities.CapabilityResponse, 10), + } + return mt +} + func mockFailingConsensus() *mockCapability { return newMockCapability( capabilities.MustNewCapabilityInfo( @@ -357,12 +404,12 @@ func TestEngine_ErrorsTheWorkflowIfAStepErrors(t *testing.T) { defer eng.Close() eid := getExecutionId(t, eng, hooks) - state, err := eng.executionStates.get(ctx, eid) + state, err := eng.executionStates.Get(ctx, eid) require.NoError(t, err) - assert.Equal(t, state.status, statusErrored) + assert.Equal(t, state.Status, store.StatusErrored) // evm_median is the ref of our failing consensus step - assert.Equal(t, state.steps["evm_median"].status, statusErrored) + assert.Equal(t, state.Steps["evm_median"].Status, store.StatusErrored) } const ( @@ -455,14 +502,14 @@ func TestEngine_MultiStepDependencies(t *testing.T) { defer eng.Close() eid := getExecutionId(t, eng, hooks) - state, err := eng.executionStates.get(ctx, eid) + state, err := eng.executionStates.Get(ctx, eid) require.NoError(t, err) - assert.Equal(t, state.status, statusCompleted) + assert.Equal(t, state.Status, store.StatusCompleted) // The inputs to the consensus step should // be the outputs of the two dependents. - inputs := state.steps["evm_median"].inputs + inputs := state.Steps["evm_median"].Inputs unw, err := values.Unwrap(inputs) require.NoError(t, err) @@ -477,3 +524,116 @@ func TestEngine_MultiStepDependencies(t *testing.T) { require.NoError(t, err) assert.Equal(t, obs.([]any)[1], o) } + +func TestEngine_ResumesPendingExecutions(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + reg := coreCap.NewRegistry(logger.TestLogger(t)) + + trigger := mockNoopTrigger(t) + resp, err := values.NewMap(map[string]any{ + "123": decimal.NewFromFloat(1.00), + "456": decimal.NewFromFloat(1.25), + "789": decimal.NewFromFloat(1.50), + }) + require.NoError(t, err) + + require.NoError(t, reg.Add(ctx, trigger)) + require.NoError(t, reg.Add(ctx, mockConsensus())) + require.NoError(t, reg.Add(ctx, mockTarget())) + + action, _ := mockAction() + require.NoError(t, reg.Add(ctx, action)) + + dbstore := store.NewDBStore(pgtest.NewSqlxDB(t), clockwork.NewFakeClock()) + ec := &store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ + keywordTrigger: { + Outputs: &store.StepOutput{ + Value: resp, + }, + Status: store.StatusCompleted, + ExecutionID: "", + Ref: keywordTrigger, + }, + }, + WorkflowID: "", + ExecutionID: "", + Status: store.StatusStarted, + } + err = dbstore.Add(ctx, ec) + require.NoError(t, err) + + eng, hooks := newTestEngine( + t, + reg, + multiStepWorkflow, + func(c *Config) { c.Store = dbstore }, + ) + err = eng.Start(ctx) + require.NoError(t, err) + + eid := getExecutionId(t, eng, hooks) + gotEx, err := dbstore.Get(ctx, eid) + require.NoError(t, err) + assert.Equal(t, store.StatusCompleted, gotEx.Status) +} + +func TestEngine_TimesOutOldExecutions(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + reg := coreCap.NewRegistry(logger.TestLogger(t)) + + trigger := mockNoopTrigger(t) + resp, err := values.NewMap(map[string]any{ + "123": decimal.NewFromFloat(1.00), + "456": decimal.NewFromFloat(1.25), + "789": decimal.NewFromFloat(1.50), + }) + require.NoError(t, err) + + require.NoError(t, reg.Add(ctx, trigger)) + require.NoError(t, reg.Add(ctx, mockConsensus())) + require.NoError(t, reg.Add(ctx, mockTarget())) + + action, _ := mockAction() + require.NoError(t, reg.Add(ctx, action)) + + clock := clockwork.NewFakeClock() + dbstore := store.NewDBStore(pgtest.NewSqlxDB(t), clock) + ec := &store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ + keywordTrigger: { + Outputs: &store.StepOutput{ + Value: resp, + }, + Status: store.StatusCompleted, + ExecutionID: "", + Ref: keywordTrigger, + }, + }, + WorkflowID: "", + ExecutionID: "", + Status: store.StatusStarted, + } + err = dbstore.Add(ctx, ec) + require.NoError(t, err) + + eng, hooks := newTestEngine( + t, + reg, + multiStepWorkflow, + func(c *Config) { + c.Store = dbstore + c.clock = clock + }, + ) + clock.Advance(15 * time.Minute) + err = eng.Start(ctx) + require.NoError(t, err) + + _ = getExecutionId(t, eng, hooks) + gotEx, err := dbstore.Get(ctx, "") + require.NoError(t, err) + assert.Equal(t, store.StatusTimeout, gotEx.Status) +} diff --git a/core/services/workflows/models.go b/core/services/workflows/models.go index cd167403089..8dce11cabe5 100644 --- a/core/services/workflows/models.go +++ b/core/services/workflows/models.go @@ -8,11 +8,12 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/values" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" ) type stepRequest struct { stepRef string - state executionState + state store.WorkflowExecution } // stepDefinition is the parsed representation of a step in a workflow. diff --git a/core/services/workflows/state.go b/core/services/workflows/state.go index c229b14e1dd..4026a59be0b 100644 --- a/core/services/workflows/state.go +++ b/core/services/workflows/state.go @@ -6,71 +6,44 @@ import ( "strconv" "strings" - "github.com/smartcontractkit/chainlink-common/pkg/values" -) + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" -const ( - statusStarted = "started" - statusErrored = "errored" - statusTimeout = "timeout" - statusCompleted = "completed" + "github.com/smartcontractkit/chainlink-common/pkg/values" ) -type stepOutput struct { - err error - value values.Value -} - -type stepState struct { - executionID string - ref string - status string - - inputs *values.Map - outputs *stepOutput -} - -type executionState struct { - steps map[string]*stepState - executionID string - workflowID string - - status string -} - // copyState returns a deep copy of the input executionState -func copyState(es executionState) executionState { - steps := map[string]*stepState{} - for ref, step := range es.steps { +func copyState(es store.WorkflowExecution) store.WorkflowExecution { + steps := map[string]*store.WorkflowExecutionStep{} + for ref, step := range es.Steps { var mval *values.Map - if step.inputs != nil { - mp := values.Proto(step.inputs).GetMapValue() + if step.Inputs != nil { + mp := values.Proto(step.Inputs).GetMapValue() mval = values.FromMapValueProto(mp) } - op := values.Proto(step.outputs.value) + op := values.Proto(step.Outputs.Value) copiedov := values.FromProto(op) - newState := &stepState{ - executionID: step.executionID, - ref: step.ref, - status: step.status, + newState := &store.WorkflowExecutionStep{ + ExecutionID: step.ExecutionID, + Ref: step.Ref, + Status: step.Status, - outputs: &stepOutput{ - err: step.outputs.err, - value: copiedov, + Outputs: &store.StepOutput{ + Err: step.Outputs.Err, + Value: copiedov, }, - inputs: mval, + Inputs: mval, } steps[ref] = newState } - return executionState{ - executionID: es.executionID, - workflowID: es.workflowID, - status: es.status, - steps: steps, + return store.WorkflowExecution{ + ExecutionID: es.ExecutionID, + WorkflowID: es.WorkflowID, + Status: es.Status, + Steps: steps, } } @@ -84,7 +57,7 @@ func copyState(es executionState) executionState { // If a key has more than two parts, then we traverse the parts // to find the value we want to replace. // We support traversing both nested maps and lists and any combination of the two. -func interpolateKey(key string, state executionState) (any, error) { +func interpolateKey(key string, state store.WorkflowExecution) (any, error) { parts := strings.Split(key, ".") if len(parts) < 2 { @@ -92,7 +65,7 @@ func interpolateKey(key string, state executionState) (any, error) { } // lookup the step we want to get either input or output state from - sc, ok := state.steps[parts[0]] + sc, ok := state.Steps[parts[0]] if !ok { return "", fmt.Errorf("could not find ref `%s`", parts[0]) } @@ -100,13 +73,13 @@ func interpolateKey(key string, state executionState) (any, error) { var value values.Value switch parts[1] { case "inputs": - value = sc.inputs + value = sc.Inputs case "outputs": - if sc.outputs.err != nil { + if sc.Outputs.Err != nil { return "", fmt.Errorf("cannot interpolate ref part `%s` in `%+v`: step has errored", parts[1], sc) } - value = sc.outputs.value + value = sc.Outputs.Value default: return "", fmt.Errorf("cannot interpolate ref part `%s` in `%+v`: second part must be `inputs` or `outputs`", parts[1], sc) } @@ -153,7 +126,7 @@ var ( // identifies any values that should be replaced from `state`. // // A value `v` should be replaced if it is wrapped as follows: `$(v)`. -func findAndInterpolateAllKeys(input any, state executionState) (any, error) { +func findAndInterpolateAllKeys(input any, state store.WorkflowExecution) (any, error) { return deepMap( input, func(el string) (any, error) { diff --git a/core/services/workflows/state_test.go b/core/services/workflows/state_test.go index 0917662ccb6..ccd6cd5004d 100644 --- a/core/services/workflows/state_test.go +++ b/core/services/workflows/state_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-common/pkg/values" + "github.com/smartcontractkit/chainlink/v2/core/services/workflows/store" ) func TestInterpolateKey(t *testing.T) { @@ -27,18 +28,18 @@ func TestInterpolateKey(t *testing.T) { testCases := []struct { name string key string - state executionState + state store.WorkflowExecution expected any errMsg string }{ { name: "digging into a string", key: "evm_median.outputs.reports", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: values.NewString(""), + Outputs: &store.StepOutput{ + Value: values.NewString(""), }, }, }, @@ -48,27 +49,27 @@ func TestInterpolateKey(t *testing.T) { { name: "ref doesn't exist", key: "evm_median.outputs.reports", - state: executionState{ - steps: map[string]*stepState{}, + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{}, }, errMsg: "could not find ref `evm_median`", }, { name: "less than 2 parts", key: "evm_median", - state: executionState{ - steps: map[string]*stepState{}, + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{}, }, errMsg: "must have at least two parts", }, { name: "second part isn't `inputs` or `outputs`", key: "evm_median.foo", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: values.NewString(""), + Outputs: &store.StepOutput{ + Value: values.NewString(""), }, }, }, @@ -78,11 +79,11 @@ func TestInterpolateKey(t *testing.T) { { name: "outputs has errored", key: "evm_median.outputs", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - err: errors.New("catastrophic error"), + Outputs: &store.StepOutput{ + Err: errors.New("catastrophic error"), }, }, }, @@ -92,11 +93,11 @@ func TestInterpolateKey(t *testing.T) { { name: "digging into a recursive map", key: "evm_median.outputs.reports.inner", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -106,11 +107,11 @@ func TestInterpolateKey(t *testing.T) { { name: "missing key in map", key: "evm_median.outputs.reports.missing", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -120,11 +121,11 @@ func TestInterpolateKey(t *testing.T) { { name: "digging into an array", key: "evm_median.outputs.reportsList.0", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -134,11 +135,11 @@ func TestInterpolateKey(t *testing.T) { { name: "digging into an array that's too small", key: "evm_median.outputs.reportsList.2", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -148,11 +149,11 @@ func TestInterpolateKey(t *testing.T) { { name: "digging into an array with a string key", key: "evm_median.outputs.reportsList.notAString", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -162,11 +163,11 @@ func TestInterpolateKey(t *testing.T) { { name: "digging into an array with a negative index", key: "evm_median.outputs.reportsList.-1", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -176,11 +177,11 @@ func TestInterpolateKey(t *testing.T) { { name: "empty element", key: "evm_median.outputs..notAString", - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: val, + Outputs: &store.StepOutput{ + Value: val, }, }, }, @@ -207,7 +208,7 @@ func TestInterpolateInputsFromState(t *testing.T) { testCases := []struct { name string inputs map[string]any - state executionState + state store.WorkflowExecution expected any errMsg string }{ @@ -218,11 +219,11 @@ func TestInterpolateInputsFromState(t *testing.T) { "shouldinterpolate": "$(evm_median.outputs)", }, }, - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: values.NewString(""), + Outputs: &store.StepOutput{ + Value: values.NewString(""), }, }, }, @@ -238,11 +239,11 @@ func TestInterpolateInputsFromState(t *testing.T) { inputs: map[string]any{ "foo": "bar", }, - state: executionState{ - steps: map[string]*stepState{ + state: store.WorkflowExecution{ + Steps: map[string]*store.WorkflowExecutionStep{ "evm_median": { - outputs: &stepOutput{ - value: values.NewString(""), + Outputs: &store.StepOutput{ + Value: values.NewString(""), }, }, }, diff --git a/core/services/workflows/store.go b/core/services/workflows/store.go deleted file mode 100644 index d6ef72d39b9..00000000000 --- a/core/services/workflows/store.go +++ /dev/null @@ -1,70 +0,0 @@ -package workflows - -import ( - "context" - "fmt" - "sync" -) - -// `inMemoryStore` is a temporary in-memory -// equivalent of the database table that should persist -// workflow progress. -type inMemoryStore struct { - idToState map[string]*executionState - mu sync.RWMutex -} - -func newInMemoryStore() *inMemoryStore { - return &inMemoryStore{idToState: map[string]*executionState{}} -} - -// add adds a new execution state under the given executionID -func (s *inMemoryStore) add(ctx context.Context, state *executionState) error { - s.mu.Lock() - defer s.mu.Unlock() - _, ok := s.idToState[state.executionID] - if ok { - return fmt.Errorf("execution ID %s already exists in store", state.executionID) - } - - s.idToState[state.executionID] = state - return nil -} - -// updateStep updates a step for the given executionID -func (s *inMemoryStore) updateStep(ctx context.Context, step *stepState) (executionState, error) { - s.mu.Lock() - defer s.mu.Unlock() - state, ok := s.idToState[step.executionID] - if !ok { - return executionState{}, fmt.Errorf("could not find execution %s", step.executionID) - } - - state.steps[step.ref] = step - return *state, nil -} - -// updateStatus updates the status for the given executionID -func (s *inMemoryStore) updateStatus(ctx context.Context, executionID string, status string) error { - s.mu.Lock() - defer s.mu.Unlock() - state, ok := s.idToState[executionID] - if !ok { - return fmt.Errorf("could not find execution %s", executionID) - } - - state.status = status - return nil -} - -// get gets the state for the given executionID -func (s *inMemoryStore) get(ctx context.Context, executionID string) (executionState, error) { - s.mu.RLock() - defer s.mu.RUnlock() - state, ok := s.idToState[executionID] - if !ok { - return executionState{}, fmt.Errorf("could not find execution %s", executionID) - } - - return *state, nil -} diff --git a/core/services/workflows/store/models.go b/core/services/workflows/store/models.go new file mode 100644 index 00000000000..29a1df154de --- /dev/null +++ b/core/services/workflows/store/models.go @@ -0,0 +1,41 @@ +package store + +import ( + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/values" +) + +const ( + StatusStarted = "started" + StatusErrored = "errored" + StatusTimeout = "timeout" + StatusCompleted = "completed" +) + +type StepOutput struct { + Err error + Value values.Value +} + +type WorkflowExecutionStep struct { + ExecutionID string + Ref string + Status string + + Inputs *values.Map + Outputs *StepOutput + + UpdatedAt *time.Time +} + +type WorkflowExecution struct { + Steps map[string]*WorkflowExecutionStep + ExecutionID string + WorkflowID string + + Status string + CreatedAt *time.Time + UpdatedAt *time.Time + FinishedAt *time.Time +} diff --git a/core/services/workflows/store/store.go b/core/services/workflows/store/store.go new file mode 100644 index 00000000000..e77050617ab --- /dev/null +++ b/core/services/workflows/store/store.go @@ -0,0 +1,16 @@ +package store + +import ( + "context" +) + +type Store interface { + Add(ctx context.Context, state *WorkflowExecution) error + UpsertStep(ctx context.Context, step *WorkflowExecutionStep) (WorkflowExecution, error) + UpdateStatus(ctx context.Context, executionID string, status string) error + Get(ctx context.Context, executionID string) (WorkflowExecution, error) + GetUnfinished(ctx context.Context, offset, limit int) ([]WorkflowExecution, error) +} + +var _ Store = (*InMemoryStore)(nil) +var _ Store = (*DBStore)(nil) diff --git a/core/services/workflows/store/store_db.go b/core/services/workflows/store/store_db.go new file mode 100644 index 00000000000..73acece5b18 --- /dev/null +++ b/core/services/workflows/store/store_db.go @@ -0,0 +1,382 @@ +package store + +import ( + "context" + "errors" + "fmt" + "time" + + "google.golang.org/protobuf/proto" + + "github.com/jmoiron/sqlx" + "github.com/jonboulle/clockwork" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/smartcontractkit/chainlink-common/pkg/values" + valuespb "github.com/smartcontractkit/chainlink-common/pkg/values/pb" +) + +// `DBStore` is a postgres-backed +// data store that persists workflow progress. +type DBStore struct { + db sqlutil.DataSource + clock clockwork.Clock +} + +// `workflowExecutionRow` describes a row +// of the `workflow_executions` table +type workflowExecutionRow struct { + ID string + WorkflowID *string + Status string + CreatedAt *time.Time + UpdatedAt *time.Time + FinishedAt *time.Time +} + +// `workflowStepRow` describes a row +// of the `workflow_steps` table +type workflowStepRow struct { + ID uint + WorkflowExecutionID string `db:"workflow_execution_id"` + Ref string + Status string + Inputs []byte + OutputErr *string `db:"output_err"` + OutputValue []byte `db:"output_value"` + UpdatedAt *time.Time `db:"updated_at"` +} + +// `UpdateStatus` updates the status of the given workflow execution +func (d *DBStore) UpdateStatus(ctx context.Context, executionID string, status string) error { + sql := `UPDATE workflow_executions SET status = $1, updated_at = $2 WHERE id = $3` + + // If we're completing the workflow execution, let's also set a finished_at timestamp. + if status != StatusStarted { + sql = "UPDATE workflow_executions SET status = $1, updated_at = $2, finished_at = $2 WHERE id = $3" + } + _, err := d.db.ExecContext(ctx, sql, status, d.clock.Now(), executionID) + return err +} + +// `UpsertStep` updates the given step. This will correspond to an insert, or an update +// depending on whether a step with the ref already exists. +func (d *DBStore) UpsertStep(ctx context.Context, stepState *WorkflowExecutionStep) (WorkflowExecution, error) { + step, err := stateToStep(stepState) + if err != nil { + return WorkflowExecution{}, err + } + + err = d.upsertSteps(ctx, []workflowStepRow{step}) + if err != nil { + return WorkflowExecution{}, err + } + + return d.Get(ctx, step.WorkflowExecutionID) +} + +// `Get` fetches the ExecutionState from the database. +func (d *DBStore) Get(ctx context.Context, executionID string) (WorkflowExecution, error) { + wex := &workflowExecutionRow{} + err := d.db.GetContext(ctx, wex, `SELECT * FROM workflow_executions WHERE id = $1`, executionID) + if err != nil { + return WorkflowExecution{}, err + } + + ws := []workflowStepRow{} + err = d.db.SelectContext(ctx, &ws, `SELECT * FROM workflow_steps WHERE workflow_execution_id = $1`, wex.ID) + if err != nil { + return WorkflowExecution{}, err + } + + refToStep := map[string]*WorkflowExecutionStep{} + for _, s := range ws { + ss, err := stepToState(s) + if err != nil { + return WorkflowExecution{}, err + } + + refToStep[s.Ref] = ss + } + + var workflowID string + if wex.WorkflowID != nil { + workflowID = *wex.WorkflowID + } + + es := WorkflowExecution{ + ExecutionID: wex.ID, + WorkflowID: workflowID, + Status: wex.Status, + Steps: refToStep, + CreatedAt: wex.CreatedAt, + UpdatedAt: wex.UpdatedAt, + FinishedAt: wex.FinishedAt, + } + return es, nil +} + +func stepToState(step workflowStepRow) (*WorkflowExecutionStep, error) { + var inputs *values.Map + if len(step.Inputs) > 0 { + vmProto := &valuespb.Map{} + err := proto.Unmarshal(step.Inputs, vmProto) + if err != nil { + return nil, err + } + + inputs = values.FromMapValueProto(vmProto) + } + + var ( + outputErr error + outputs values.Value + ) + + if step.OutputErr != nil { + outputErr = errors.New(*step.OutputErr) + } + + if len(step.OutputValue) != 0 { + vProto := &valuespb.Value{} + err := proto.Unmarshal(step.OutputValue, vProto) + if err != nil { + return nil, err + } + + outputs = values.FromProto(vProto) + } + + var so *StepOutput + if outputErr != nil || outputs != nil { + so = &StepOutput{ + Err: outputErr, + Value: outputs, + } + } + + return &WorkflowExecutionStep{ + ExecutionID: step.WorkflowExecutionID, + Ref: step.Ref, + Status: step.Status, + Inputs: inputs, + Outputs: so, + }, nil +} + +func stateToStep(state *WorkflowExecutionStep) (workflowStepRow, error) { + var inpb []byte + if state.Inputs != nil { + p := values.Proto(state.Inputs).GetMapValue() + ib, err := proto.Marshal(p) + if err != nil { + return workflowStepRow{}, err + } + inpb = ib + } + + wsr := workflowStepRow{ + WorkflowExecutionID: state.ExecutionID, + Ref: state.Ref, + Status: state.Status, + Inputs: inpb, + } + + if state.Outputs == nil { + return wsr, nil + } + + if state.Outputs.Value != nil { + p := values.Proto(state.Outputs.Value) + ob, err := proto.Marshal(p) + if err != nil { + return workflowStepRow{}, err + } + + wsr.OutputValue = ob + } + + if state.Outputs.Err != nil { + errs := state.Outputs.Err.Error() + wsr.OutputErr = &errs + } + return wsr, nil +} + +// `Add` creates the relevant workflow_execution and workflow_step entries +// to persist the passed in ExecutionState. +func (d *DBStore) Add(ctx context.Context, state *WorkflowExecution) error { + return d.transact(ctx, func(db *DBStore) error { + var wid *string + if state.WorkflowID != "" { + wid = &state.WorkflowID + } + wex := &workflowExecutionRow{ + ID: state.ExecutionID, + WorkflowID: wid, + Status: state.Status, + } + err := db.insertWorkflowExecution(ctx, wex) + if err != nil { + return fmt.Errorf("could not insert workflow execution %s: %w", state.ExecutionID, err) + } + + ws := []workflowStepRow{} + for _, step := range state.Steps { + step, err := stateToStep(step) + if err != nil { + return err + } + ws = append(ws, step) + } + if len(ws) > 0 { + return db.upsertSteps(ctx, ws) + } + return nil + }) +} + +func (d *DBStore) upsertSteps(ctx context.Context, steps []workflowStepRow) error { + for _, s := range steps { + now := d.clock.Now() + s.UpdatedAt = &now + } + + sql := ` + INSERT INTO + workflow_steps(workflow_execution_id, ref, status, inputs, output_err, output_value, updated_at) + VALUES (:workflow_execution_id, :ref, :status, :inputs, :output_err, :output_value, :updated_at) + ON CONFLICT ON CONSTRAINT uniq_workflow_execution_id_ref + DO UPDATE SET + workflow_execution_id = EXCLUDED.workflow_execution_id, + ref = EXCLUDED.ref, + status = EXCLUDED.status, + inputs = EXCLUDED.inputs, + output_err = EXCLUDED.output_err, + output_value = EXCLUDED.output_value, + updated_at = EXCLUDED.updated_at; + ` + stmt, args, err := sqlx.Named(sql, steps) + if err != nil { + return err + } + stmt = d.db.Rebind(stmt) + _, err = d.db.ExecContext(ctx, stmt, args...) + return err +} + +func (d *DBStore) insertWorkflowExecution(ctx context.Context, execution *workflowExecutionRow) error { + sql := ` + INSERT INTO + workflow_executions(id, workflow_id, status, created_at) + VALUES ($1, $2, $3, $4) + ` + _, err := d.db.ExecContext(ctx, sql, execution.ID, execution.WorkflowID, execution.Status, d.clock.Now()) + return err +} + +func (d *DBStore) transact(ctx context.Context, fn func(*DBStore) error) error { + return sqlutil.Transact( + ctx, + func(ds sqlutil.DataSource) *DBStore { + return &DBStore{db: ds, clock: d.clock} + }, + d.db, + nil, + fn, + ) +} + +func (d *DBStore) GetUnfinished(ctx context.Context, offset, limit int) ([]WorkflowExecution, error) { + sql := ` + SELECT + workflow_steps.workflow_execution_id AS ws_workflow_execution_id, + workflow_steps.ref AS ws_ref, + workflow_steps.status AS ws_status, + workflow_steps.inputs AS ws_inputs, + workflow_steps.output_err AS ws_output_err, + workflow_steps.output_value AS ws_output_value, + workflow_steps.updated_at AS ws_updated_at, + workflow_executions.id AS we_id, + workflow_executions.workflow_id AS we_workflow_id, + workflow_executions.status AS we_status, + workflow_executions.created_at AS we_created_at, + workflow_executions.updated_at AS we_updated_at, + workflow_executions.finished_at AS we_finished_at + FROM workflow_executions + JOIN workflow_steps + ON workflow_steps.workflow_execution_id = workflow_executions.id + WHERE workflow_executions.status = $1 + ORDER BY workflow_executions.created_at DESC + LIMIT $2 + OFFSET $3 + ` + joinRecords := []struct { + // WorkflowExecutionStep fields + WSWorkflowExecutionID string `db:"ws_workflow_execution_id"` + WSRef string `db:"ws_ref"` + WSStatus string `db:"ws_status"` + WSInputs []byte `db:"ws_inputs"` + WSOutputErr *string `db:"ws_output_err"` + WSOutputValue []byte `db:"ws_output_value"` + WSUpdatedAt *time.Time `db:"ws_updated_at"` + + // WorkflowExecution fields + WEID string `db:"we_id"` + WEWorkflowID *string `db:"we_workflow_id"` + WEStatus string `db:"we_status"` + WECreatedAt *time.Time `db:"we_created_at"` + WEUpdatedAt *time.Time `db:"we_updated_at"` + WEFinishedAt *time.Time `db:"we_finished_at"` + }{} + err := d.db.SelectContext(ctx, &joinRecords, sql, StatusStarted, limit, offset) + if err != nil { + return []WorkflowExecution{}, err + } + + idToExecutionState := map[string]*WorkflowExecution{} + for _, jr := range joinRecords { + var wid string + if jr.WEWorkflowID != nil { + wid = *jr.WEWorkflowID + } + if _, ok := idToExecutionState[jr.WEID]; !ok { + idToExecutionState[jr.WEID] = &WorkflowExecution{ + ExecutionID: jr.WEID, + WorkflowID: wid, + Status: jr.WEStatus, + Steps: map[string]*WorkflowExecutionStep{}, + CreatedAt: jr.WECreatedAt, + UpdatedAt: jr.WEUpdatedAt, + FinishedAt: jr.WEFinishedAt, + } + } + + state, err := stepToState(workflowStepRow{ + WorkflowExecutionID: jr.WSWorkflowExecutionID, + Ref: jr.WSRef, + OutputErr: jr.WSOutputErr, + OutputValue: jr.WSOutputValue, + Inputs: jr.WSInputs, + Status: jr.WSStatus, + UpdatedAt: jr.WSUpdatedAt, + }) + if err != nil { + return nil, err + } + + es := idToExecutionState[jr.WEID] + es.Steps[state.Ref] = state + } + + states := []WorkflowExecution{} + for _, s := range idToExecutionState { + states = append(states, *s) + } + + return states, nil +} + +func NewDBStore(ds sqlutil.DataSource, clock clockwork.Clock) *DBStore { + return &DBStore{db: ds, clock: clock} +} diff --git a/core/services/workflows/store/store_db_test.go b/core/services/workflows/store/store_db_test.go new file mode 100644 index 00000000000..e41f4857363 --- /dev/null +++ b/core/services/workflows/store/store_db_test.go @@ -0,0 +1,215 @@ +package store + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "testing" + + "github.com/jonboulle/clockwork" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink-common/pkg/values" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" +) + +func randomID() string { + b := make([]byte, 32) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + return hex.EncodeToString(b) +} + +func Test_StoreDB(t *testing.T) { + db := pgtest.NewSqlxDB(t) + store := &DBStore{db: db, clock: clockwork.NewFakeClock()} + + id := randomID() + es := WorkflowExecution{ + Steps: map[string]*WorkflowExecutionStep{ + "step1": { + ExecutionID: id, + Ref: "step1", + Status: "completed", + }, + "step2": { + ExecutionID: id, + Ref: "step2", + Status: "started", + }, + }, + ExecutionID: id, + Status: "started", + } + + err := store.Add(tests.Context(t), &es) + require.NoError(t, err) + + gotEs, err := store.Get(tests.Context(t), es.ExecutionID) + // Zero out the created at timestamp; this isn't present on `es` + // but is added by the db store. + gotEs.CreatedAt = nil + require.NoError(t, err) + assert.Equal(t, es, gotEs) +} + +func Test_StoreDB_DuplicateEntry(t *testing.T) { + db := pgtest.NewSqlxDB(t) + store := &DBStore{db: db, clock: clockwork.NewFakeClock()} + + id := randomID() + es := WorkflowExecution{ + Steps: map[string]*WorkflowExecutionStep{ + "step1": { + ExecutionID: id, + Ref: "step1", + Status: "completed", + }, + "step2": { + ExecutionID: id, + Ref: "step2", + Status: "started", + }, + }, + ExecutionID: id, + Status: "started", + } + + err := store.Add(tests.Context(t), &es) + require.NoError(t, err) + + err = store.Add(tests.Context(t), &es) + assert.ErrorContains(t, err, "duplicate key value violates") +} + +func Test_StoreDB_UpdateStatus(t *testing.T) { + db := pgtest.NewSqlxDB(t) + store := &DBStore{db: db, clock: clockwork.NewFakeClock()} + + id := randomID() + es := WorkflowExecution{ + Steps: map[string]*WorkflowExecutionStep{ + "step1": { + ExecutionID: id, + Ref: "step1", + Status: "completed", + }, + "step2": { + ExecutionID: id, + Ref: "step2", + Status: "started", + }, + }, + ExecutionID: id, + Status: "started", + } + + err := store.Add(tests.Context(t), &es) + require.NoError(t, err) + + completedStatus := "completed" + err = store.UpdateStatus(tests.Context(t), es.ExecutionID, "completed") + require.NoError(t, err) + + gotEs, err := store.Get(tests.Context(t), es.ExecutionID) + require.NoError(t, err) + + assert.Equal(t, gotEs.Status, completedStatus) +} + +func Test_StoreDB_UpdateStep(t *testing.T) { + db := pgtest.NewSqlxDB(t) + store := &DBStore{db: db, clock: clockwork.NewFakeClock()} + + id := randomID() + stepOne := &WorkflowExecutionStep{ + ExecutionID: id, + Ref: "step1", + Status: "completed", + } + stepTwo := &WorkflowExecutionStep{ + ExecutionID: id, + Ref: "step2", + Status: "started", + } + es := WorkflowExecution{ + Steps: map[string]*WorkflowExecutionStep{ + "step1": stepOne, + "step2": stepTwo, + }, + ExecutionID: id, + Status: "started", + } + + err := store.Add(tests.Context(t), &es) + require.NoError(t, err) + + stepOne.Status = "completed" + nm, err := values.NewMap(map[string]any{"hello": "world"}) + require.NoError(t, err) + + stepOne.Inputs = nm + stepOne.Outputs = &StepOutput{Err: errors.New("some error")} + + es, err = store.UpsertStep(tests.Context(t), stepOne) + require.NoError(t, err) + + gotStep := es.Steps[stepOne.Ref] + assert.Equal(t, stepOne, gotStep) + + stepTwo.Outputs = &StepOutput{Value: nm} + es, err = store.UpsertStep(tests.Context(t), stepTwo) + require.NoError(t, err) + + gotStep = es.Steps[stepTwo.Ref] + assert.Equal(t, stepTwo, gotStep) +} + +func Test_StoreDB_GetUnfinishedSteps(t *testing.T) { + db := pgtest.NewSqlxDB(t) + store := &DBStore{db: db, clock: clockwork.NewFakeClock()} + + id := randomID() + stepOne := &WorkflowExecutionStep{ + ExecutionID: id, + Ref: "step1", + Status: "completed", + } + stepTwo := &WorkflowExecutionStep{ + ExecutionID: id, + Ref: "step2", + Status: "started", + } + es := WorkflowExecution{ + Steps: map[string]*WorkflowExecutionStep{ + "step1": stepOne, + "step2": stepTwo, + }, + ExecutionID: id, + Status: "started", + } + + err := store.Add(tests.Context(t), &es) + require.NoError(t, err) + + id = randomID() + esTwo := WorkflowExecution{ + ExecutionID: id, + Status: "completed", + Steps: map[string]*WorkflowExecutionStep{}, + } + err = store.Add(tests.Context(t), &esTwo) + require.NoError(t, err) + + states, err := store.GetUnfinished(tests.Context(t), 0, 100) + require.NoError(t, err) + + assert.Len(t, states, 1) + // Zero out the completedAt timestamp + states[0].CreatedAt = nil + assert.Equal(t, es, states[0]) +} diff --git a/core/services/workflows/store/store_memory.go b/core/services/workflows/store/store_memory.go new file mode 100644 index 00000000000..7c8226c5d9c --- /dev/null +++ b/core/services/workflows/store/store_memory.go @@ -0,0 +1,86 @@ +package store + +import ( + "context" + "fmt" + "sync" +) + +// `InMemoryStore` is a temporary in-memory +// equivalent of the database table that should persist +// workflow progress. +type InMemoryStore struct { + idToState map[string]*WorkflowExecution + mu sync.RWMutex +} + +func NewInMemoryStore() *InMemoryStore { + return &InMemoryStore{idToState: map[string]*WorkflowExecution{}} +} + +// Add adds a new execution state under the given executionID +func (s *InMemoryStore) Add(ctx context.Context, state *WorkflowExecution) error { + s.mu.Lock() + defer s.mu.Unlock() + _, ok := s.idToState[state.ExecutionID] + if ok { + return fmt.Errorf("execution ID %s already exists in store", state.ExecutionID) + } + + s.idToState[state.ExecutionID] = state + return nil +} + +// UpsertStep updates a step for the given executionID +func (s *InMemoryStore) UpsertStep(ctx context.Context, step *WorkflowExecutionStep) (WorkflowExecution, error) { + s.mu.Lock() + defer s.mu.Unlock() + state, ok := s.idToState[step.ExecutionID] + if !ok { + return WorkflowExecution{}, fmt.Errorf("could not find execution %s", step.ExecutionID) + } + + state.Steps[step.Ref] = step + return *state, nil +} + +// UpdateStatus updates the status for the given executionID +func (s *InMemoryStore) UpdateStatus(ctx context.Context, executionID string, status string) error { + s.mu.Lock() + defer s.mu.Unlock() + state, ok := s.idToState[executionID] + if !ok { + return fmt.Errorf("could not find execution %s", executionID) + } + + state.Status = status + return nil +} + +// Get gets the state for the given executionID +func (s *InMemoryStore) Get(ctx context.Context, executionID string) (WorkflowExecution, error) { + s.mu.RLock() + defer s.mu.RUnlock() + state, ok := s.idToState[executionID] + if !ok { + return WorkflowExecution{}, fmt.Errorf("could not find execution %s", executionID) + } + + return *state, nil +} + +// GetUnfinished gets the states for execution that are in a started state +// Offset and limit are ignored for the in-memory store. +func (s *InMemoryStore) GetUnfinished(ctx context.Context, offset, limit int) ([]WorkflowExecution, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + states := []WorkflowExecution{} + for _, s := range s.idToState { + if s.Status == StatusStarted { + states = append(states, *s) + } + } + + return states, nil +} diff --git a/core/store/migrate/migrations/0235_add_workflow_models.sql b/core/store/migrate/migrations/0235_add_workflow_models.sql new file mode 100644 index 00000000000..bd159b3a9d2 --- /dev/null +++ b/core/store/migrate/migrations/0235_add_workflow_models.sql @@ -0,0 +1,47 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TYPE workflow_status AS ENUM ( + 'started', + 'errored', + 'timeout', + 'completed' +); + +ALTER TABLE workflow_specs + ADD CONSTRAINT fk_unique_workflow_id unique(workflow_id); + +CREATE TABLE workflow_executions ( + id varchar(64) PRIMARY KEY, + workflow_id varchar(64) references workflow_specs(workflow_id), + status workflow_status NOT NULL, + created_at timestamp with time zone, + updated_at timestamp with time zone, + finished_at timestamp with time zone +); + +CREATE TABLE workflow_steps ( + id SERIAL PRIMARY KEY, + workflow_execution_id varchar(64) references workflow_executions(id) NOT NULL, + ref text NOT NULL, + status workflow_status NOT NULL, + inputs bytea, + output_err text, + output_value bytea, + updated_at timestamp with time zone +); + +ALTER TABLE workflow_steps + ADD CONSTRAINT uniq_workflow_execution_id_ref unique(workflow_execution_id, ref); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE workflow_steps + DROP CONSTRAINT uniq_workflow_execution_id_ref; +DROP TABLE workflow_steps; +DROP TABLE workflow_executions; +ALTER TABLE workflow_specs + DROP CONSTRAINT fk_unique_workflow_id; +DROP TYPE workflow_status; +-- +goose StatementEnd From 466d1617607712263657029adf7ff4dd9713b3b3 Mon Sep 17 00:00:00 2001 From: Dylan Tinianov Date: Mon, 6 May 2024 13:30:35 -0400 Subject: [PATCH 3/9] Fix race condition in Poller tests (#13110) * Create polling channel * Update poller_test.go --- common/client/poller.go | 8 +++++--- common/client/poller_test.go | 40 ++++++++---------------------------- 2 files changed, 14 insertions(+), 34 deletions(-) diff --git a/common/client/poller.go b/common/client/poller.go index ebdcbd66283..d6080722c5c 100644 --- a/common/client/poller.go +++ b/common/client/poller.go @@ -27,10 +27,11 @@ type Poller[T any] struct { wg sync.WaitGroup } -// NewPoller creates a new Poller instance +// NewPoller creates a new Poller instance and returns a channel to receive the polled data func NewPoller[ T any, -](pollingInterval time.Duration, pollingFunc func(ctx context.Context) (T, error), pollingTimeout time.Duration, channel chan<- T, logger logger.Logger) Poller[T] { +](pollingInterval time.Duration, pollingFunc func(ctx context.Context) (T, error), pollingTimeout time.Duration, logger logger.Logger) (Poller[T], <-chan T) { + channel := make(chan T) return Poller[T]{ pollingInterval: pollingInterval, pollingFunc: pollingFunc, @@ -39,7 +40,7 @@ func NewPoller[ logger: logger, errCh: make(chan error), stopCh: make(chan struct{}), - } + }, channel } var _ types.Subscription = &Poller[any]{} @@ -58,6 +59,7 @@ func (p *Poller[T]) Unsubscribe() { close(p.stopCh) p.wg.Wait() close(p.errCh) + close(p.channel) return nil }) } diff --git a/common/client/poller_test.go b/common/client/poller_test.go index 3f11c759adb..82a05b5dfc7 100644 --- a/common/client/poller_test.go +++ b/common/client/poller_test.go @@ -23,10 +23,7 @@ func Test_Poller(t *testing.T) { return nil, nil } - channel := make(chan Head, 1) - defer close(channel) - - poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) err := poller.Start() require.NoError(t, err) @@ -50,12 +47,8 @@ func Test_Poller(t *testing.T) { return h.ToMockHead(t), nil } - // data channel to receive updates from the poller - channel := make(chan Head, 1) - defer close(channel) - // Create poller and start to receive data - poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) require.NoError(t, poller.Start()) defer poller.Unsubscribe() @@ -79,14 +72,10 @@ func Test_Poller(t *testing.T) { return nil, fmt.Errorf("polling error %d", pollNumber) } - // data channel to receive updates from the poller - channel := make(chan Head, 1) - defer close(channel) - olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) // Create poller and subscribe to receive data - poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, olggr) + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, time.Second, olggr) require.NoError(t, poller.Start()) defer poller.Unsubscribe() @@ -114,14 +103,10 @@ func Test_Poller(t *testing.T) { // Set instant timeout pollingTimeout := time.Duration(0) - // data channel to receive updates from the poller - channel := make(chan Head, 1) - defer close(channel) - olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) // Create poller and subscribe to receive data - poller := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, channel, olggr) + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, olggr) require.NoError(t, poller.Start()) defer poller.Unsubscribe() @@ -146,14 +131,10 @@ func Test_Poller(t *testing.T) { // Set long timeout pollingTimeout := time.Minute - // data channel to receive updates from the poller - channel := make(chan Head, 1) - defer close(channel) - olggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) // Create poller and subscribe to receive data - poller := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, channel, olggr) + poller, _ := NewPoller[Head](time.Millisecond, pollFunc, pollingTimeout, olggr) require.NoError(t, poller.Start()) // Unsubscribe while blocked in polling function @@ -184,8 +165,7 @@ func Test_Poller_Unsubscribe(t *testing.T) { } t.Run("Test multiple unsubscribe", func(t *testing.T) { - channel := make(chan Head, 1) - poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) err := poller.Start() require.NoError(t, err) @@ -194,14 +174,12 @@ func Test_Poller_Unsubscribe(t *testing.T) { poller.Unsubscribe() }) - t.Run("Test unsubscribe with closed channel", func(t *testing.T) { - channel := make(chan Head, 1) - poller := NewPoller[Head](time.Millisecond, pollFunc, time.Second, channel, lggr) + t.Run("Read channel after unsubscribe", func(t *testing.T) { + poller, channel := NewPoller[Head](time.Millisecond, pollFunc, time.Second, lggr) err := poller.Start() require.NoError(t, err) - <-channel - close(channel) poller.Unsubscribe() + require.Equal(t, <-channel, nil) }) } From 80590662bd9956d3c93449ca4703a2430e0613b7 Mon Sep 17 00:00:00 2001 From: HenryNguyen5 <6404866+HenryNguyen5@users.noreply.github.com> Date: Mon, 6 May 2024 15:49:38 -0400 Subject: [PATCH 4/9] Normalize ref regex (#13112) --- .changeset/ten-dodos-run.md | 5 +++++ core/services/workflows/models_yaml.go | 2 +- .../testdata/fixtures/workflows/workflow_schema.json | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .changeset/ten-dodos-run.md diff --git a/.changeset/ten-dodos-run.md b/.changeset/ten-dodos-run.md new file mode 100644 index 00000000000..42ab8ec58b2 --- /dev/null +++ b/.changeset/ten-dodos-run.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#internal Normalize keystone workflow ref regex property to match id regex diff --git a/core/services/workflows/models_yaml.go b/core/services/workflows/models_yaml.go index 5ed7941f84a..74ed8ee466d 100644 --- a/core/services/workflows/models_yaml.go +++ b/core/services/workflows/models_yaml.go @@ -211,7 +211,7 @@ type stepDefinitionYaml struct { // - “ref” has a circular reference. // // NOTE: Should introduce a custom validator to cover trigger case - Ref string `json:"ref,omitempty" jsonschema:"pattern=^[a-z0-9_]+$"` + Ref string `json:"ref,omitempty" jsonschema:"pattern=^[a-z0-9_-]+$"` // Capabilities can specify an additional optional ”inputs” property. It allows specifying a dependency on the result of one or more other capabilities. These are always runtime values that cannot be provided upfront. It takes a map of the argument name internal to the capability and an explicit reference to the values. // diff --git a/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json b/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json index 7f257f7798d..f9f9fd88646 100644 --- a/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json +++ b/core/services/workflows/testdata/fixtures/workflows/workflow_schema.json @@ -48,7 +48,7 @@ }, "ref": { "type": "string", - "pattern": "^[a-z0-9_]+$" + "pattern": "^[a-z0-9_-]+$" }, "inputs": { "$ref": "#/$defs/mapping" From c671a5c731b7028e36ecfac1b60b990f65d78fa2 Mon Sep 17 00:00:00 2001 From: frank zhu Date: Mon, 6 May 2024 14:04:12 -0700 Subject: [PATCH 5/9] Revert "fix: ci-core print-races to slack conditionals (#13086)" (#13115) This reverts commit fbbadfb6a1ef746aff9a98178e6186e12f4a4f54. --- .github/workflows/ci-core.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index a3ea68380f9..1d7b58820b0 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -170,7 +170,7 @@ jobs: env: OUTPUT_FILE: ./output.txt USE_TEE: false - CL_DATABASE_URL: ${{ env.DB_URL }} + CL_DATABASE_URL: ${{ env.DB_URL }} run: ./tools/bin/${{ matrix.type.cmd }} ./... - name: Print Filtered Test Results if: ${{ failure() && matrix.type.cmd == 'go_core_tests' && needs.filter.outputs.changes == 'true' }} @@ -203,7 +203,7 @@ jobs: ./coverage.txt ./postgres_logs.txt - name: Notify Slack - if: ${{ failure() && steps.print-races.outputs.post_to_slack == 'true' && matrix.type.cmd == 'go_core_race_tests' && (github.event_name == 'merge_group' || github.base_ref == 'develop') && needs.filter.outputs.changes == 'true' }} + if: ${{ failure() && steps.print-races.outputs.post_to_slack == 'true' && matrix.type.cmd == 'go_core_race_tests' && (github.event_name == 'merge_group' || github.event.branch == 'develop') && needs.filter.outputs.changes == 'true' }} uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 env: SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} From 7572a50a78a270188344786937f68233df82f65b Mon Sep 17 00:00:00 2001 From: FelixFan1992 Date: Mon, 6 May 2024 18:29:06 -0400 Subject: [PATCH 6/9] AUTO-10213 & AUTO-10214 & AUTO-10236: compare current gas price with user-defined max gas price in registry 2.1 pipeline (#12952) * AUTO-10213: pass an gas estimator to registry 2.1 pipeline * update tests and add changeset * update changeset * AUTO-10214: compare max gas price with current gas price in simulation process (#12955) * AUTO-10214: compare max gas price with current gas price in simulation process * refactor and add tests * linting (#12960) * linting * 2 * fix linting * create opts with latest block * AUTO-10236: add integration tests for max gas price check (#12974) * AUTO-10214: compare max gas price with current gas price in simulation process * refactor and add tests * linting (#12960) * linting * 2 * fix linting * AUTO-10236 * fix go mod * update test json * improve max gas price integration tests * AUTO-10214: compare max gas price with current gas price in simulation process * refactor and add tests * linting (#12960) * linting * 2 * fix linting * create opts with latest block * add some logs * fix bug and update logs * update * update * update logs * fix --- .changeset/funny-tips-promise.md | 6 + .changeset/neat-pianos-argue.md | 6 + .changeset/witty-weeks-kneel.md | 5 + .../evmregistry/v21/encoding/interface.go | 1 + .../evmregistry/v21/gasprice/gasprice.go | 70 ++++++++++ .../evmregistry/v21/gasprice/gasprice_test.go | 128 ++++++++++++++++++ .../ocr2keeper/evmregistry/v21/registry.go | 13 ++ .../v21/registry_check_pipeline.go | 16 ++- .../v21/registry_check_pipeline_test.go | 11 ++ .../contracts/ethereum_keeper_contracts.go | 41 ++++++ integration-tests/go.mod | 2 +- integration-tests/smoke/automation_test.go | 120 +++++++++++++++- .../smoke/automation_test.go_test_list.json | 4 + 13 files changed, 418 insertions(+), 5 deletions(-) create mode 100644 .changeset/funny-tips-promise.md create mode 100644 .changeset/neat-pianos-argue.md create mode 100644 .changeset/witty-weeks-kneel.md create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice.go create mode 100644 core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice_test.go diff --git a/.changeset/funny-tips-promise.md b/.changeset/funny-tips-promise.md new file mode 100644 index 00000000000..16fd0a9fc33 --- /dev/null +++ b/.changeset/funny-tips-promise.md @@ -0,0 +1,6 @@ +--- +"chainlink": patch +--- + +#added +compare user-defined max gas price with current gas price in automation simulation pipeline diff --git a/.changeset/neat-pianos-argue.md b/.changeset/neat-pianos-argue.md new file mode 100644 index 00000000000..f65c19584db --- /dev/null +++ b/.changeset/neat-pianos-argue.md @@ -0,0 +1,6 @@ +--- +"chainlink": patch +--- + +#added +pass a gas estimator to registry 2.1 pipeline diff --git a/.changeset/witty-weeks-kneel.md b/.changeset/witty-weeks-kneel.md new file mode 100644 index 00000000000..d638d037081 --- /dev/null +++ b/.changeset/witty-weeks-kneel.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#added an integration test for max gas price check diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go index e942078fe54..39d738fa7c6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go @@ -31,6 +31,7 @@ const ( UpkeepFailureReasonInvalidRevertDataInput UpkeepFailureReason = 34 UpkeepFailureReasonSimulationFailed UpkeepFailureReason = 35 UpkeepFailureReasonTxHashReorged UpkeepFailureReason = 36 + UpkeepFailureReasonGasPriceTooHigh UpkeepFailureReason = 37 // pipeline execution error NoPipelineError PipelineExecutionState = 0 diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice.go new file mode 100644 index 00000000000..2c376443fa5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice.go @@ -0,0 +1,70 @@ +package gasprice + +import ( + "context" + "math/big" + + "github.com/smartcontractkit/chainlink/v2/core/cbor" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" +) + +const ( + // feeLimit is a placeholder when getting current price from gas estimator. it does not impact gas price calculation + feeLimit = uint64(1_000_000) + // maxFeePrice is a placeholder when getting current price from gas estimator. it caps the returned gas price from + // the estimator. it's set to a very high value because the gas price will be compared with user-defined gas price + // later. + maxFeePrice = 1_000_000_000_000_000 +) + +type UpkeepOffchainConfig struct { + MaxGasPrice *big.Int `json:"maxGasPrice" cbor:"maxGasPrice"` +} + +// CheckGasPrice retrieves the current gas price and compare against the max gas price configured in upkeep's offchain config +// any errors in offchain config decoding will result in max gas price check disabled +func CheckGasPrice(ctx context.Context, upkeepId *big.Int, offchainConfigBytes []byte, ge gas.EvmFeeEstimator, lggr logger.Logger) encoding.UpkeepFailureReason { + if len(offchainConfigBytes) == 0 { + return encoding.UpkeepFailureReasonNone + } + + var offchainConfig UpkeepOffchainConfig + if err := cbor.ParseDietCBORToStruct(offchainConfigBytes, &offchainConfig); err != nil { + lggr.Errorw("failed to parse upkeep offchain config, gas price check is disabled", "upkeepId", upkeepId.String(), "err", err) + return encoding.UpkeepFailureReasonNone + } + if offchainConfig.MaxGasPrice == nil || offchainConfig.MaxGasPrice.Int64() <= 0 { + lggr.Warnw("maxGasPrice is not configured or incorrectly configured in upkeep offchain config, gas price check is disabled", "upkeepId", upkeepId.String()) + return encoding.UpkeepFailureReasonNone + } + lggr.Debugf("successfully decode offchain config for %s, max gas price is %s", upkeepId.String(), offchainConfig.MaxGasPrice.String()) + + fee, _, err := ge.GetFee(ctx, []byte{}, feeLimit, assets.NewWei(big.NewInt(maxFeePrice))) + if err != nil { + lggr.Errorw("failed to get fee, gas price check is disabled", "upkeepId", upkeepId.String(), "err", err) + return encoding.UpkeepFailureReasonNone + } + + if fee.ValidDynamic() { + lggr.Debugf("current gas price EIP-1559 is fee cap %s, tip cap %s", fee.DynamicFeeCap.String(), fee.DynamicTipCap.String()) + if fee.DynamicFeeCap.Cmp(assets.NewWei(offchainConfig.MaxGasPrice)) > 0 { + // current gas price is higher than max gas price + lggr.Warnf("maxGasPrice %s for %s is LOWER than current gas price %d", offchainConfig.MaxGasPrice.String(), upkeepId.String(), fee.DynamicFeeCap.Int64()) + return encoding.UpkeepFailureReasonGasPriceTooHigh + } + lggr.Debugf("maxGasPrice %s for %s is HIGHER than current gas price %d", offchainConfig.MaxGasPrice.String(), upkeepId.String(), fee.DynamicFeeCap.Int64()) + } else { + lggr.Debugf("current gas price legacy is %s", fee.Legacy.String()) + if fee.Legacy.Cmp(assets.NewWei(offchainConfig.MaxGasPrice)) > 0 { + // current gas price is higher than max gas price + lggr.Warnf("maxGasPrice %s for %s is LOWER than current gas price %d", offchainConfig.MaxGasPrice.String(), upkeepId.String(), fee.Legacy.Int64()) + return encoding.UpkeepFailureReasonGasPriceTooHigh + } + lggr.Debugf("maxGasPrice %s for %s is HIGHER than current gas price %d", offchainConfig.MaxGasPrice.String(), upkeepId.String(), fee.Legacy.Int64()) + } + + return encoding.UpkeepFailureReasonNone +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice_test.go new file mode 100644 index 00000000000..9b5640051df --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice/gasprice_test.go @@ -0,0 +1,128 @@ +package gasprice + +import ( + "math/big" + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + gasMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" +) + +type WrongOffchainConfig struct { + MaxGasPrice1 []int `json:"maxGasPrice1" cbor:"maxGasPrice1"` +} + +func TestGasPrice_Check(t *testing.T) { + lggr := logger.TestLogger(t) + uid, _ := new(big.Int).SetString("1843548457736589226156809205796175506139185429616502850435279853710366065936", 10) + + tests := []struct { + Name string + MaxGasPrice *big.Int + CurrentLegacyGasPrice *big.Int + CurrentDynamicGasPrice *big.Int + ExpectedResult encoding.UpkeepFailureReason + FailedToGetFee bool + NotConfigured bool + ParsingFailed bool + }{ + { + Name: "no offchain config", + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + { + Name: "maxGasPrice not configured in offchain config", + NotConfigured: true, + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + { + Name: "fail to parse offchain config", + ParsingFailed: true, + MaxGasPrice: big.NewInt(10_000_000_000), + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + { + Name: "fail to retrieve current gas price", + MaxGasPrice: big.NewInt(8_000_000_000), + FailedToGetFee: true, + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + { + Name: "current gas price is too high - legacy", + MaxGasPrice: big.NewInt(10_000_000_000), + CurrentLegacyGasPrice: big.NewInt(18_000_000_000), + ExpectedResult: encoding.UpkeepFailureReasonGasPriceTooHigh, + }, + { + Name: "current gas price is too high - dynamic", + MaxGasPrice: big.NewInt(10_000_000_000), + CurrentDynamicGasPrice: big.NewInt(15_000_000_000), + ExpectedResult: encoding.UpkeepFailureReasonGasPriceTooHigh, + }, + { + Name: "current gas price is less than user's max gas price - legacy", + MaxGasPrice: big.NewInt(8_000_000_000), + CurrentLegacyGasPrice: big.NewInt(5_000_000_000), + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + { + Name: "current gas price is less than user's max gas price - dynamic", + MaxGasPrice: big.NewInt(10_000_000_000), + CurrentDynamicGasPrice: big.NewInt(8_000_000_000), + ExpectedResult: encoding.UpkeepFailureReasonNone, + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + ctx := testutils.Context(t) + ge := gasMocks.NewEvmFeeEstimator(t) + if test.FailedToGetFee { + ge.On("GetFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + gas.EvmFee{}, + feeLimit, + errors.New("failed to retrieve gas price"), + ) + } else if test.CurrentLegacyGasPrice != nil { + ge.On("GetFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + gas.EvmFee{ + Legacy: assets.NewWei(test.CurrentLegacyGasPrice), + }, + feeLimit, + nil, + ) + } else if test.CurrentDynamicGasPrice != nil { + ge.On("GetFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + gas.EvmFee{ + DynamicFeeCap: assets.NewWei(test.CurrentDynamicGasPrice), + DynamicTipCap: assets.NewWei(big.NewInt(1_000_000_000)), + }, + feeLimit, + nil, + ) + } + + var oc []byte + if test.ParsingFailed { + oc, _ = cbor.Marshal(WrongOffchainConfig{MaxGasPrice1: []int{1, 2, 3}}) + if len(oc) > 0 { + oc[len(oc)-1] = 0x99 + } + } else if test.NotConfigured { + oc = []byte{1, 2, 3, 4} // parsing this will set maxGasPrice field to nil + } else if test.MaxGasPrice != nil { + oc, _ = cbor.Marshal(UpkeepOffchainConfig{MaxGasPrice: test.MaxGasPrice}) + } + fr := CheckGasPrice(ctx, uid, oc, ge, lggr) + assert.Equal(t, test.ExpectedResult, fr) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go index 206932cf543..5a6466a8b15 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -27,6 +27,7 @@ import ( ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated" @@ -113,6 +114,7 @@ func NewEvmRegistry( bs: blockSub, finalityDepth: finalityDepth, streams: streams.NewStreamsLookup(mercuryConfig, blockSub, client.Client(), registry, lggr), + ge: client.GasEstimator(), } } @@ -194,6 +196,7 @@ type EvmRegistry struct { logEventProvider logprovider.LogEventProvider finalityDepth uint32 streams streams.Lookup + ge gas.EvmFeeEstimator } func (r *EvmRegistry) Name() string { @@ -627,3 +630,13 @@ func (r *EvmRegistry) fetchTriggerConfig(id *big.Int) ([]byte, error) { } return cfg, nil } + +// fetchUpkeepOffchainConfig fetches upkeep offchain config in raw bytes for an upkeep. +func (r *EvmRegistry) fetchUpkeepOffchainConfig(id *big.Int) ([]byte, error) { + opts := r.buildCallOpts(r.ctx, nil) + ui, err := r.registry.GetUpkeep(opts, id) + if err != nil { + return []byte{}, err + } + return ui.OffchainConfig, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go index 3e935d0adf1..e341730c794 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go @@ -16,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice" ) const ( @@ -305,7 +306,19 @@ func (r *EvmRegistry) simulatePerformUpkeeps(ctx context.Context, checkResults [ block, _, upkeepId := r.getBlockAndUpkeepId(cr.UpkeepID, cr.Trigger) - opts := r.buildCallOpts(ctx, block) + oc, err := r.fetchUpkeepOffchainConfig(upkeepId) + if err != nil { + // this is mostly caused by RPC flakiness + r.lggr.Errorw("failed get offchain config, gas price check will be disabled", "err", err, "upkeepId", upkeepId, "block", block) + } + fr := gasprice.CheckGasPrice(ctx, upkeepId, oc, r.ge, r.lggr) + if uint8(fr) == uint8(encoding.UpkeepFailureReasonGasPriceTooHigh) { + r.lggr.Infof("upkeep %s upkeep failure reason is %d", upkeepId, fr) + checkResults[i].Eligible = false + checkResults[i].Retryable = false + checkResults[i].IneligibilityReason = uint8(fr) + continue + } // Since checkUpkeep is true, simulate perform upkeep to ensure it doesn't revert payload, err := r.abi.Pack("simulatePerformUpkeep", upkeepId, cr.PerformData) @@ -317,6 +330,7 @@ func (r *EvmRegistry) simulatePerformUpkeeps(ctx context.Context, checkResults [ continue } + opts := r.buildCallOpts(ctx, block) var result string performReqs = append(performReqs, rpc.BatchElem{ Method: "eth_call", diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go index 330da44b71b..e74ad4821a6 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go @@ -23,6 +23,7 @@ import ( ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" evmClientMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + gasMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" ac "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_v21_plus_common" @@ -651,6 +652,13 @@ func TestRegistry_SimulatePerformUpkeeps(t *testing.T) { }).Once() e.client = client + mockReg := mocks.NewRegistry(t) + mockReg.On("GetUpkeep", mock.Anything, mock.Anything).Return( + encoding.UpkeepInfo{OffchainConfig: make([]byte, 0)}, + nil, + ).Times(2) + e.registry = mockReg + results, err := e.simulatePerformUpkeeps(testutils.Context(t), tc.inputs) assert.Equal(t, tc.results, results) assert.Equal(t, tc.err, err) @@ -670,6 +678,7 @@ func setupEVMRegistry(t *testing.T) *EvmRegistry { mockReg := mocks.NewRegistry(t) mockHttpClient := mocks.NewHttpClient(t) client := evmClientMocks.NewClient(t) + ge := gasMocks.NewEvmFeeEstimator(t) r := &EvmRegistry{ lggr: lggr, @@ -694,6 +703,8 @@ func setupEVMRegistry(t *testing.T) *EvmRegistry { AllowListCache: cache.New(defaultAllowListExpiration, cleanupInterval), }, hc: mockHttpClient, + bs: &BlockSubscriber{latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}}, + ge: ge, } return r } diff --git a/integration-tests/contracts/ethereum_keeper_contracts.go b/integration-tests/contracts/ethereum_keeper_contracts.go index 337e3009f16..8ec6a547b55 100644 --- a/integration-tests/contracts/ethereum_keeper_contracts.go +++ b/integration-tests/contracts/ethereum_keeper_contracts.go @@ -87,6 +87,7 @@ type KeeperRegistry interface { UpdateCheckData(id *big.Int, newCheckData []byte) error SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error + SetUpkeepOffchainConfig(id *big.Int, offchainConfig []byte) error RegistryOwnerAddress() common.Address ChainModuleAddress() common.Address ReorgProtectionEnabled() bool @@ -1225,6 +1226,46 @@ func (v *EthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { } } +func (v *EthereumKeeperRegistry) SetUpkeepOffchainConfig(id *big.Int, offchainConfig []byte) error { + switch v.version { + case ethereum.RegistryVersion_2_0: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_0.SetUpkeepOffchainConfig(opts, id, offchainConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.SetUpkeepOffchainConfig(opts, id, offchainConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_2: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_2.SetUpkeepOffchainConfig(opts, id, offchainConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("SetUpkeepOffchainConfig is not supported by keeper registry version %d", v.version) + } +} + // Parses upkeep performed log func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) { switch v.version { diff --git a/integration-tests/go.mod b/integration-tests/go.mod index babf82a7d96..d8210ccc123 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -9,6 +9,7 @@ require ( github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df github.com/cli/go-gh/v2 v2.0.0 github.com/ethereum/go-ethereum v1.13.8 + github.com/fxamacker/cbor/v2 v2.5.0 github.com/go-resty/resty/v2 v2.7.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 @@ -174,7 +175,6 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fvbommel/sortorder v1.0.2 // indirect - github.com/fxamacker/cbor/v2 v2.5.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gagliardetto/binary v0.7.7 // indirect github.com/gagliardetto/solana-go v1.8.4 // indirect diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go index 81d18139122..ee9541926df 100644 --- a/integration-tests/smoke/automation_test.go +++ b/integration-tests/smoke/automation_test.go @@ -11,6 +11,8 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/fxamacker/cbor/v2" "github.com/onsi/gomega" "github.com/stretchr/testify/require" @@ -34,6 +36,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" ac "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_compatible_utils" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" ) @@ -190,7 +193,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t for i := 0; i < len(upkeepIDs); i++ { counter, err := consumers[i].Counter(testcontext.Get(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) - l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep index", i).Msg("Number of upkeeps performed") g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) } @@ -631,7 +634,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) l.Info(). Int64("Upkeep counter", counter.Int64()). - Int64("Upkeep ID", int64(i)). + Int64("Upkeep index", int64(i)). Msg("Number of upkeeps performed") } }, "4m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer @@ -657,7 +660,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") l.Info(). - Int64("Upkeep ID", int64(i)). + Int64("Upkeep index", int64(i)). Int64("Upkeep counter", currentCounter.Int64()). Int64("initial counter", initialCounters[i].Int64()). Msg("Number of upkeeps performed") @@ -1120,6 +1123,117 @@ func TestUpdateCheckData(t *testing.T) { } } +func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + // registry20 also has upkeep offchain config but the max gas price check is not implemented + "registry_2_1": ethereum.RegistryVersion_2_1, + "registry_2_2": ethereum.RegistryVersion_2_2, + } + + for n, rv := range registryVersions { + name := n + registryVersion := rv + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + false, + false, + ) + gom := gomega.NewGomegaWithT(t) + + l.Info().Msg("waiting for all upkeeps to be performed at least once") + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d") + } + }, "3m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + + // set the maxGasPrice to 1 wei + uoc, _ := cbor.Marshal(gasprice.UpkeepOffchainConfig{MaxGasPrice: big.NewInt(1)}) + l.Info().Msgf("setting all upkeeps' offchain config to %s, which means maxGasPrice is 1 wei", hexutil.Encode(uoc)) + for _, uid := range upkeepIDs { + err = a.Registry.SetUpkeepOffchainConfig(uid, uoc) + require.NoError(t, err, "Error setting upkeep offchain config") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events from setting upkeep offchain config") + } + + // Store how many times each upkeep performed once their offchain config is set with maxGasPrice = 1 wei + var countersAfterSettingLowMaxGasPrice = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + countersAfterSettingLowMaxGasPrice[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int64("Upkeep Performed times", countersAfterSettingLowMaxGasPrice[i].Int64()).Int("Upkeep index", i).Msg("Number of upkeeps performed") + } + + var latestCounter *big.Int + // the counters of all the upkeeps should stay constant because they are no longer getting serviced + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err = consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterSettingLowMaxGasPrice[i].Int64()), + "Expected consumer counter to remain constant at %d, but got %d", + countersAfterSettingLowMaxGasPrice[i].Int64(), latestCounter.Int64()) + } + }, "2m", "1s").Should(gomega.Succeed()) + l.Info().Msg("no upkeeps is performed because their max gas price is only 1 wei") + + // setting offchain config with a high max gas price for the first upkeep, it should perform again while + // other upkeeps should not perform + // set the maxGasPrice to 500 gwei for the first upkeep + uoc, _ = cbor.Marshal(gasprice.UpkeepOffchainConfig{MaxGasPrice: big.NewInt(500_000_000_000)}) + l.Info().Msgf("setting the first upkeeps' offchain config to %s, which means maxGasPrice is 500 gwei", hexutil.Encode(uoc)) + err = a.Registry.SetUpkeepOffchainConfig(upkeepIDs[0], uoc) + require.NoError(t, err, "Error setting upkeep offchain config") + + // the counters of all other upkeeps should stay constant because their max gas price remains very low + gom.Consistently(func(g gomega.Gomega) { + for i := 1; i < len(upkeepIDs); i++ { + latestCounter, err = consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterSettingLowMaxGasPrice[i].Int64()), + "Expected consumer counter to remain constant at %d, but got %d", + countersAfterSettingLowMaxGasPrice[i].Int64(), latestCounter.Int64()) + } + }, "2m", "1s").Should(gomega.Succeed()) + l.Info().Msg("all the rest upkeeps did not perform again because their max gas price remains 1 wei") + + // the first upkeep should start performing again + gom.Eventually(func(g gomega.Gomega) { + latestCounter, err = consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index 0") + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically(">", countersAfterSettingLowMaxGasPrice[0].Int64()), + "Expected consumer counter to be greater than %d, but got %d", + countersAfterSettingLowMaxGasPrice[0].Int64(), latestCounter.Int64()) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + l.Info().Int64("Upkeep Performed times", latestCounter.Int64()).Msg("the first upkeep performed again") + }) + } +} + func setupAutomationTestDocker( t *testing.T, registryVersion ethereum.KeeperRegistryVersion, diff --git a/integration-tests/smoke/automation_test.go_test_list.json b/integration-tests/smoke/automation_test.go_test_list.json index 03029c9018b..e8f0f838dfd 100644 --- a/integration-tests/smoke/automation_test.go_test_list.json +++ b/integration-tests/smoke/automation_test.go_test_list.json @@ -70,6 +70,10 @@ { "name": "TestUpdateCheckData", "nodes": 3 + }, + { + "name": "TestSetOffchainConfigWithMaxGasPrice", + "nodes": 2 } ] } \ No newline at end of file From ac893364e6c6ede08e9bf04da7dc64e0da94ab6e Mon Sep 17 00:00:00 2001 From: Chunkai Yang Date: Tue, 7 May 2024 01:21:07 -0400 Subject: [PATCH 7/9] Feature ORM for CCIP in-db prices (#12813) * define CCIP migration sql * POC orm * use context as opposed to pg opts * add CCIP ORM test * pass ORM tests * add change set * update changeset * fix lint and goimports * address comments * replce big.int with assets.wei for scanner/valuer type * inline table names * use named exec to insert multiple rows * bump migration index --- .changeset/mighty-flies-breathe.md | 5 + core/services/ccip/mocks/orm.go | 164 +++++++++ core/services/ccip/orm.go | 163 +++++++++ core/services/ccip/orm_test.go | 346 ++++++++++++++++++ .../migrations/0236_ccip_prices_cache.sql | 36 ++ 5 files changed, 714 insertions(+) create mode 100644 .changeset/mighty-flies-breathe.md create mode 100644 core/services/ccip/mocks/orm.go create mode 100644 core/services/ccip/orm.go create mode 100644 core/services/ccip/orm_test.go create mode 100644 core/store/migrate/migrations/0236_ccip_prices_cache.sql diff --git a/.changeset/mighty-flies-breathe.md b/.changeset/mighty-flies-breathe.md new file mode 100644 index 00000000000..d983aad7086 --- /dev/null +++ b/.changeset/mighty-flies-breathe.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#added ORM and corresponding tables for CCIP gas prices and token prices diff --git a/core/services/ccip/mocks/orm.go b/core/services/ccip/mocks/orm.go new file mode 100644 index 00000000000..b9afc6c8695 --- /dev/null +++ b/core/services/ccip/mocks/orm.go @@ -0,0 +1,164 @@ +// Code generated by mockery v2.42.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + ccip "github.com/smartcontractkit/chainlink/v2/core/services/ccip" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// ClearGasPricesByDestChain provides a mock function with given fields: ctx, destChainSelector, to +func (_m *ORM) ClearGasPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error { + ret := _m.Called(ctx, destChainSelector, to) + + if len(ret) == 0 { + panic("no return value specified for ClearGasPricesByDestChain") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time) error); ok { + r0 = rf(ctx, destChainSelector, to) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClearTokenPricesByDestChain provides a mock function with given fields: ctx, destChainSelector, to +func (_m *ORM) ClearTokenPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error { + ret := _m.Called(ctx, destChainSelector, to) + + if len(ret) == 0 { + panic("no return value specified for ClearTokenPricesByDestChain") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time) error); ok { + r0 = rf(ctx, destChainSelector, to) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetGasPricesByDestChain provides a mock function with given fields: ctx, destChainSelector +func (_m *ORM) GetGasPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]ccip.GasPrice, error) { + ret := _m.Called(ctx, destChainSelector) + + if len(ret) == 0 { + panic("no return value specified for GetGasPricesByDestChain") + } + + var r0 []ccip.GasPrice + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]ccip.GasPrice, error)); ok { + return rf(ctx, destChainSelector) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []ccip.GasPrice); ok { + r0 = rf(ctx, destChainSelector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.GasPrice) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, destChainSelector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTokenPricesByDestChain provides a mock function with given fields: ctx, destChainSelector +func (_m *ORM) GetTokenPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]ccip.TokenPrice, error) { + ret := _m.Called(ctx, destChainSelector) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPricesByDestChain") + } + + var r0 []ccip.TokenPrice + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]ccip.TokenPrice, error)); ok { + return rf(ctx, destChainSelector) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []ccip.TokenPrice); ok { + r0 = rf(ctx, destChainSelector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.TokenPrice) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, destChainSelector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertGasPricesForDestChain provides a mock function with given fields: ctx, destChainSelector, jobId, gasPrices +func (_m *ORM) InsertGasPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, gasPrices []ccip.GasPriceUpdate) error { + ret := _m.Called(ctx, destChainSelector, jobId, gasPrices) + + if len(ret) == 0 { + panic("no return value specified for InsertGasPricesForDestChain") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, int32, []ccip.GasPriceUpdate) error); ok { + r0 = rf(ctx, destChainSelector, jobId, gasPrices) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertTokenPricesForDestChain provides a mock function with given fields: ctx, destChainSelector, jobId, tokenPrices +func (_m *ORM) InsertTokenPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, tokenPrices []ccip.TokenPriceUpdate) error { + ret := _m.Called(ctx, destChainSelector, jobId, tokenPrices) + + if len(ret) == 0 { + panic("no return value specified for InsertTokenPricesForDestChain") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, int32, []ccip.TokenPriceUpdate) error); ok { + r0 = rf(ctx, destChainSelector, jobId, tokenPrices) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ccip/orm.go b/core/services/ccip/orm.go new file mode 100644 index 00000000000..8af7762b18d --- /dev/null +++ b/core/services/ccip/orm.go @@ -0,0 +1,163 @@ +package ccip + +import ( + "context" + "fmt" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" +) + +type GasPrice struct { + SourceChainSelector uint64 + GasPrice *assets.Wei + CreatedAt time.Time +} + +type GasPriceUpdate struct { + SourceChainSelector uint64 + GasPrice *assets.Wei +} + +type TokenPrice struct { + TokenAddr string + TokenPrice *assets.Wei + CreatedAt time.Time +} + +type TokenPriceUpdate struct { + TokenAddr string + TokenPrice *assets.Wei +} + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore +type ORM interface { + GetGasPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]GasPrice, error) + GetTokenPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]TokenPrice, error) + + InsertGasPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, gasPrices []GasPriceUpdate) error + InsertTokenPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, tokenPrices []TokenPriceUpdate) error + + ClearGasPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error + ClearTokenPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error +} + +type orm struct { + ds sqlutil.DataSource +} + +var _ ORM = (*orm)(nil) + +func NewORM(ds sqlutil.DataSource) (ORM, error) { + if ds == nil { + return nil, fmt.Errorf("datasource to CCIP NewORM cannot be nil") + } + + return &orm{ + ds: ds, + }, nil +} + +func (o *orm) GetGasPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]GasPrice, error) { + var gasPrices []GasPrice + stmt := ` + SELECT DISTINCT ON (source_chain_selector) + source_chain_selector, gas_price, created_at + FROM ccip.observed_gas_prices + WHERE chain_selector = $1 + ORDER BY source_chain_selector, created_at DESC; + ` + err := o.ds.SelectContext(ctx, &gasPrices, stmt, destChainSelector) + if err != nil { + return nil, err + } + + return gasPrices, nil +} + +func (o *orm) GetTokenPricesByDestChain(ctx context.Context, destChainSelector uint64) ([]TokenPrice, error) { + var tokenPrices []TokenPrice + stmt := ` + SELECT DISTINCT ON (token_addr) + token_addr, token_price, created_at + FROM ccip.observed_token_prices + WHERE chain_selector = $1 + ORDER BY token_addr, created_at DESC; + ` + err := o.ds.SelectContext(ctx, &tokenPrices, stmt, destChainSelector) + if err != nil { + return nil, err + } + + return tokenPrices, nil +} + +func (o *orm) InsertGasPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, gasPrices []GasPriceUpdate) error { + if len(gasPrices) == 0 { + return nil + } + + now := time.Now() + insertData := make([]map[string]interface{}, 0, len(gasPrices)) + for _, price := range gasPrices { + insertData = append(insertData, map[string]interface{}{ + "chain_selector": destChainSelector, + "job_id": jobId, + "source_chain_selector": price.SourceChainSelector, + "gas_price": price.GasPrice, + "created_at": now, + }) + } + + stmt := `INSERT INTO ccip.observed_gas_prices (chain_selector, job_id, source_chain_selector, gas_price, created_at) + VALUES (:chain_selector, :job_id, :source_chain_selector, :gas_price, :created_at);` + _, err := o.ds.NamedExecContext(ctx, stmt, insertData) + if err != nil { + err = fmt.Errorf("error inserting gas prices for job %d: %w", jobId, err) + } + + return err +} + +func (o *orm) InsertTokenPricesForDestChain(ctx context.Context, destChainSelector uint64, jobId int32, tokenPrices []TokenPriceUpdate) error { + if len(tokenPrices) == 0 { + return nil + } + + now := time.Now() + insertData := make([]map[string]interface{}, 0, len(tokenPrices)) + for _, price := range tokenPrices { + insertData = append(insertData, map[string]interface{}{ + "chain_selector": destChainSelector, + "job_id": jobId, + "token_addr": price.TokenAddr, + "token_price": price.TokenPrice, + "created_at": now, + }) + } + + stmt := `INSERT INTO ccip.observed_token_prices (chain_selector, job_id, token_addr, token_price, created_at) + VALUES (:chain_selector, :job_id, :token_addr, :token_price, :created_at);` + _, err := o.ds.NamedExecContext(ctx, stmt, insertData) + if err != nil { + err = fmt.Errorf("error inserting token prices for job %d: %w", jobId, err) + } + + return err +} + +func (o *orm) ClearGasPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error { + stmt := `DELETE FROM ccip.observed_gas_prices WHERE chain_selector = $1 AND created_at < $2` + + _, err := o.ds.ExecContext(ctx, stmt, destChainSelector, to) + return err +} + +func (o *orm) ClearTokenPricesByDestChain(ctx context.Context, destChainSelector uint64, to time.Time) error { + stmt := `DELETE FROM ccip.observed_token_prices WHERE chain_selector = $1 AND created_at < $2` + + _, err := o.ds.ExecContext(ctx, stmt, destChainSelector, to) + return err +} diff --git a/core/services/ccip/orm_test.go b/core/services/ccip/orm_test.go new file mode 100644 index 00000000000..741cf4b5b38 --- /dev/null +++ b/core/services/ccip/orm_test.go @@ -0,0 +1,346 @@ +package ccip + +import ( + "math/big" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" +) + +func setupORM(t *testing.T) (ORM, sqlutil.DataSource) { + t.Helper() + + db := pgtest.NewSqlxDB(t) + orm, err := NewORM(db) + + require.NoError(t, err) + + return orm, db +} + +func generateChainSelectors(n int) []uint64 { + selectors := make([]uint64, n) + for i := 0; i < n; i++ { + selectors[i] = rand.Uint64() + } + + return selectors +} + +func generateGasPriceUpdates(chainSelector uint64, n int) []GasPriceUpdate { + updates := make([]GasPriceUpdate, n) + for i := 0; i < n; i++ { + // gas prices can take up whole range of uint256 + uint256Max := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil), big.NewInt(1)) + row := GasPriceUpdate{ + SourceChainSelector: chainSelector, + GasPrice: assets.NewWei(new(big.Int).Sub(uint256Max, big.NewInt(int64(i)))), + } + updates[i] = row + } + + return updates +} + +func generateTokenAddresses(n int) []string { + addrs := make([]string, n) + for i := 0; i < n; i++ { + addrs[i] = utils.RandomAddress().Hex() + } + + return addrs +} + +func generateTokenPriceUpdates(tokenAddr string, n int) []TokenPriceUpdate { + updates := make([]TokenPriceUpdate, n) + for i := 0; i < n; i++ { + row := TokenPriceUpdate{ + TokenAddr: tokenAddr, + TokenPrice: assets.NewWei(new(big.Int).Mul(big.NewInt(1e18), big.NewInt(int64(i)))), + } + updates[i] = row + } + + return updates +} + +func getGasTableRowCount(t *testing.T, ds sqlutil.DataSource) int { + var count int + stmt := `SELECT COUNT(*) FROM ccip.observed_gas_prices;` + err := ds.QueryRowxContext(testutils.Context(t), stmt).Scan(&count) + require.NoError(t, err) + + return count +} + +func getTokenTableRowCount(t *testing.T, ds sqlutil.DataSource) int { + var count int + stmt := `SELECT COUNT(*) FROM ccip.observed_token_prices;` + err := ds.QueryRowxContext(testutils.Context(t), stmt).Scan(&count) + require.NoError(t, err) + + return count +} + +func TestInitORM(t *testing.T) { + t.Parallel() + + orm, _ := setupORM(t) + assert.NotNil(t, orm) +} + +func TestORM_EmptyGasPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, _ := setupORM(t) + + prices, err := orm.GetGasPricesByDestChain(ctx, 1) + assert.Equal(t, 0, len(prices)) + assert.NoError(t, err) +} + +func TestORM_EmptyTokenPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, _ := setupORM(t) + + prices, err := orm.GetTokenPricesByDestChain(ctx, 1) + assert.Equal(t, 0, len(prices)) + assert.NoError(t, err) +} + +func TestORM_InsertAndGetGasPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, db := setupORM(t) + + numJobs := 5 + numSourceChainSelectors := 10 + numUpdatesPerSourceSelector := 20 + destSelector := uint64(1) + + sourceSelectors := generateChainSelectors(numSourceChainSelectors) + + updates := make(map[uint64][]GasPriceUpdate) + for _, selector := range sourceSelectors { + updates[selector] = generateGasPriceUpdates(selector, numUpdatesPerSourceSelector) + } + + // 5 jobs, each inserting prices for 10 chains, with 20 updates per chain. + expectedPrices := make(map[uint64]GasPriceUpdate) + for i := 0; i < numJobs; i++ { + for selector, updatesPerSelector := range updates { + lastIndex := len(updatesPerSelector) - 1 + + err := orm.InsertGasPricesForDestChain(ctx, destSelector, int32(i), updatesPerSelector[:lastIndex]) + assert.NoError(t, err) + err = orm.InsertGasPricesForDestChain(ctx, destSelector, int32(i), updatesPerSelector[lastIndex:]) + assert.NoError(t, err) + + expectedPrices[selector] = updatesPerSelector[lastIndex] + } + } + + // verify number of rows inserted + numRows := getGasTableRowCount(t, db) + assert.Equal(t, numJobs*numSourceChainSelectors*numUpdatesPerSourceSelector, numRows) + + prices, err := orm.GetGasPricesByDestChain(ctx, destSelector) + assert.NoError(t, err) + // should return 1 price per source chain selector + assert.Equal(t, numSourceChainSelectors, len(prices)) + + // verify getGasPrices returns prices of latest timestamp + for _, price := range prices { + selector := price.SourceChainSelector + assert.Equal(t, expectedPrices[selector].GasPrice, price.GasPrice) + } + + // after the initial inserts, insert new round of prices, 1 price per selector this time + var combinedUpdates []GasPriceUpdate + for selector, updatesPerSelector := range updates { + combinedUpdates = append(combinedUpdates, updatesPerSelector[0]) + expectedPrices[selector] = updatesPerSelector[0] + } + + err = orm.InsertGasPricesForDestChain(ctx, destSelector, 1, combinedUpdates) + assert.NoError(t, err) + assert.Equal(t, numJobs*numSourceChainSelectors*numUpdatesPerSourceSelector+numSourceChainSelectors, getGasTableRowCount(t, db)) + + prices, err = orm.GetGasPricesByDestChain(ctx, destSelector) + assert.NoError(t, err) + assert.Equal(t, numSourceChainSelectors, len(prices)) + + for _, price := range prices { + selector := price.SourceChainSelector + assert.Equal(t, expectedPrices[selector].GasPrice, price.GasPrice) + } +} + +func TestORM_InsertAndDeleteGasPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, db := setupORM(t) + + numSourceChainSelectors := 10 + numUpdatesPerSourceSelector := 20 + destSelector := uint64(1) + + sourceSelectors := generateChainSelectors(numSourceChainSelectors) + + updates := make(map[uint64][]GasPriceUpdate) + for _, selector := range sourceSelectors { + updates[selector] = generateGasPriceUpdates(selector, numUpdatesPerSourceSelector) + } + + for _, updatesPerSelector := range updates { + err := orm.InsertGasPricesForDestChain(ctx, destSelector, 1, updatesPerSelector) + assert.NoError(t, err) + } + + interimTimeStamp := time.Now() + + // insert for the 2nd time after interimTimeStamp + for _, updatesPerSelector := range updates { + err := orm.InsertGasPricesForDestChain(ctx, destSelector, 1, updatesPerSelector) + assert.NoError(t, err) + } + + assert.Equal(t, 2*numSourceChainSelectors*numUpdatesPerSourceSelector, getGasTableRowCount(t, db)) + + // clear by interimTimeStamp should delete rows inserted before it + err := orm.ClearGasPricesByDestChain(ctx, destSelector, interimTimeStamp) + assert.NoError(t, err) + assert.Equal(t, numSourceChainSelectors*numUpdatesPerSourceSelector, getGasTableRowCount(t, db)) + + // clear by Now() should delete all rows + err = orm.ClearGasPricesByDestChain(ctx, destSelector, time.Now()) + assert.NoError(t, err) + assert.Equal(t, 0, getGasTableRowCount(t, db)) +} + +func TestORM_InsertAndGetTokenPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, db := setupORM(t) + + numJobs := 5 + numAddresses := 10 + numUpdatesPerAddress := 20 + destSelector := uint64(1) + + addrs := generateTokenAddresses(numAddresses) + + updates := make(map[string][]TokenPriceUpdate) + for _, addr := range addrs { + updates[addr] = generateTokenPriceUpdates(addr, numUpdatesPerAddress) + } + + // 5 jobs, each inserting prices for 10 chains, with 20 updates per chain. + expectedPrices := make(map[string]TokenPriceUpdate) + for i := 0; i < numJobs; i++ { + for addr, updatesPerAddr := range updates { + lastIndex := len(updatesPerAddr) - 1 + + err := orm.InsertTokenPricesForDestChain(ctx, destSelector, int32(i), updatesPerAddr[:lastIndex]) + assert.NoError(t, err) + err = orm.InsertTokenPricesForDestChain(ctx, destSelector, int32(i), updatesPerAddr[lastIndex:]) + assert.NoError(t, err) + + expectedPrices[addr] = updatesPerAddr[lastIndex] + } + } + + // verify number of rows inserted + numRows := getTokenTableRowCount(t, db) + assert.Equal(t, numJobs*numAddresses*numUpdatesPerAddress, numRows) + + prices, err := orm.GetTokenPricesByDestChain(ctx, destSelector) + assert.NoError(t, err) + // should return 1 price per source chain selector + assert.Equal(t, numAddresses, len(prices)) + + // verify getTokenPrices returns prices of latest timestamp + for _, price := range prices { + addr := price.TokenAddr + assert.Equal(t, expectedPrices[addr].TokenPrice, price.TokenPrice) + } + + // after the initial inserts, insert new round of prices, 1 price per selector this time + var combinedUpdates []TokenPriceUpdate + for addr, updatesPerAddr := range updates { + combinedUpdates = append(combinedUpdates, updatesPerAddr[0]) + expectedPrices[addr] = updatesPerAddr[0] + } + + err = orm.InsertTokenPricesForDestChain(ctx, destSelector, 1, combinedUpdates) + assert.NoError(t, err) + assert.Equal(t, numJobs*numAddresses*numUpdatesPerAddress+numAddresses, getTokenTableRowCount(t, db)) + + prices, err = orm.GetTokenPricesByDestChain(ctx, destSelector) + assert.NoError(t, err) + assert.Equal(t, numAddresses, len(prices)) + + for _, price := range prices { + addr := price.TokenAddr + assert.Equal(t, expectedPrices[addr].TokenPrice, price.TokenPrice) + } +} + +func TestORM_InsertAndDeleteTokenPrices(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + orm, db := setupORM(t) + + numAddresses := 10 + numUpdatesPerAddress := 20 + destSelector := uint64(1) + + addrs := generateTokenAddresses(numAddresses) + + updates := make(map[string][]TokenPriceUpdate) + for _, addr := range addrs { + updates[addr] = generateTokenPriceUpdates(addr, numUpdatesPerAddress) + } + + for _, updatesPerAddr := range updates { + err := orm.InsertTokenPricesForDestChain(ctx, destSelector, 1, updatesPerAddr) + assert.NoError(t, err) + } + + interimTimeStamp := time.Now() + + // insert for the 2nd time after interimTimeStamp + for _, updatesPerAddr := range updates { + err := orm.InsertTokenPricesForDestChain(ctx, destSelector, 1, updatesPerAddr) + assert.NoError(t, err) + } + + assert.Equal(t, 2*numAddresses*numUpdatesPerAddress, getTokenTableRowCount(t, db)) + + // clear by interimTimeStamp should delete rows inserted before it + err := orm.ClearTokenPricesByDestChain(ctx, destSelector, interimTimeStamp) + assert.NoError(t, err) + assert.Equal(t, numAddresses*numUpdatesPerAddress, getTokenTableRowCount(t, db)) + + // clear by Now() should delete all rows + err = orm.ClearTokenPricesByDestChain(ctx, destSelector, time.Now()) + assert.NoError(t, err) + assert.Equal(t, 0, getTokenTableRowCount(t, db)) +} diff --git a/core/store/migrate/migrations/0236_ccip_prices_cache.sql b/core/store/migrate/migrations/0236_ccip_prices_cache.sql new file mode 100644 index 00000000000..e88b68e5575 --- /dev/null +++ b/core/store/migrate/migrations/0236_ccip_prices_cache.sql @@ -0,0 +1,36 @@ +-- +goose Up +-- +goose StatementBegin +CREATE SCHEMA ccip; + +CREATE TABLE ccip.observed_gas_prices( + chain_selector NUMERIC(20,0) NOT NULL, + job_id INTEGER NOT NULL, + source_chain_selector NUMERIC(20,0) NOT NULL, + gas_price NUMERIC(78,0) NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE TABLE ccip.observed_token_prices( + chain_selector NUMERIC(20,0) NOT NULL, + job_id INTEGER NOT NULL, + token_addr BYTEA NOT NULL, + token_price NUMERIC(78,0) NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX idx_ccip_gas_prices_chain_gas_price_timestamp ON ccip.observed_gas_prices (chain_selector, source_chain_selector, created_at DESC); +CREATE INDEX idx_ccip_token_prices_token_price_timestamp ON ccip.observed_token_prices (chain_selector, token_addr, created_at DESC); + +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_ccip_token_prices_token_value; +DROP INDEX IF EXISTS idx_ccip_gas_prices_chain_value; + +DROP TABLE ccip.observed_token_prices; +DROP TABLE ccip.observed_gas_prices; + +DROP SCHEMA ccip; +-- +goose StatementEnd From e407400aab0850ce36ab26079cd24212764838e1 Mon Sep 17 00:00:00 2001 From: Jordan Krage Date: Tue, 7 May 2024 05:03:45 -0500 Subject: [PATCH 8/9] golangci-lint: add whitespace (#13030) --- .golangci.yml | 1 + common/client/multi_node.go | 3 --- common/client/multi_node_test.go | 3 --- common/client/node_fsm_test.go | 1 - common/client/node_lifecycle.go | 2 -- common/client/node_lifecycle_test.go | 9 --------- common/internal/utils/utils.go | 1 - common/txmgr/broadcaster.go | 1 - common/txmgr/confirmer.go | 3 --- core/capabilities/registry.go | 1 - core/capabilities/targets/write_target_test.go | 1 - core/chains/evm/assets/wei.go | 2 -- core/chains/evm/client/client_test.go | 2 -- core/chains/evm/client/errors_test.go | 1 - core/chains/evm/client/helpers_test.go | 2 -- core/chains/evm/client/node.go | 2 -- core/chains/evm/client/node_lifecycle_test.go | 1 - core/chains/evm/client/pool_test.go | 1 - core/chains/evm/client/sub_error_wrapper_test.go | 1 - core/chains/evm/forwarders/forwarder_manager.go | 1 - core/chains/evm/gas/block_history_estimator_test.go | 2 -- core/chains/evm/gas/fixed_price_estimator.go | 1 - core/chains/evm/headtracker/head_saver_test.go | 1 - core/chains/evm/headtracker/heads_test.go | 2 -- core/chains/evm/log/eth_subscriber.go | 2 -- core/chains/evm/log/helpers_test.go | 3 --- core/chains/evm/log/integration_test.go | 1 - core/chains/evm/log/registrations.go | 2 -- core/chains/evm/logpoller/log_poller_internal_test.go | 1 - core/chains/evm/logpoller/log_poller_test.go | 2 -- core/chains/evm/logpoller/orm.go | 1 - core/chains/evm/logpoller/orm_test.go | 1 - core/chains/evm/monitor/balance.go | 1 - core/chains/evm/testutils/config_test.go | 1 - core/chains/evm/txmgr/attempts.go | 1 - core/chains/evm/txmgr/broadcaster_test.go | 2 -- core/chains/evm/txmgr/confirmer_test.go | 6 ------ core/chains/evm/txmgr/evm_tx_store.go | 2 -- core/chains/evm/txmgr/evm_tx_store_test.go | 1 - core/chains/evm/txmgr/nonce_tracker.go | 1 - core/chains/evm/txmgr/nonce_tracker_test.go | 4 ---- core/chains/evm/txmgr/transmitchecker.go | 1 - core/chains/evm/types/address.go | 1 - core/chains/evm/types/block_json_benchmark_test.go | 1 - core/chains/evm/types/head_test.go | 1 - core/chains/evm/types/models.go | 3 --- core/chains/evm/types/models_test.go | 2 -- core/chains/evm/utils/ethabi_test.go | 3 --- core/chains/evm/utils/utils.go | 1 - core/chains/legacyevm/chain_test.go | 1 - core/cmd/app.go | 1 - core/cmd/cosmos_keys_commands_test.go | 1 - core/cmd/evm_transaction_commands_test.go | 1 - core/cmd/ocr2_keys_commands_test.go | 1 - core/cmd/shell.go | 1 - core/cmd/shell_local_test.go | 2 -- core/cmd/shell_remote_test.go | 2 -- core/cmd/shell_test.go | 4 ---- core/cmd/solana_keys_commands_test.go | 1 - core/cmd/starknet_keys_commands_test.go | 1 - core/config/toml/types.go | 3 --- core/gethwrappers/versions.go | 1 - core/internal/cltest/cltest.go | 3 --- core/internal/cltest/mocks.go | 1 - core/internal/testutils/evmtest/evmtest.go | 1 - core/logger/audit/audit_logger.go | 1 - core/scripts/chaincli/handler/keeper.go | 1 - core/scripts/chaincli/handler/ocr2_config.go | 1 - core/scripts/chaincli/handler/report.go | 3 --- core/scripts/common/helpers.go | 3 --- core/scripts/common/helpers_test.go | 1 - core/scripts/common/vrf/setup-envs/main.go | 2 -- core/scripts/functions/src/fetch_keys.go | 1 - core/scripts/functions/src/files_test.go | 1 - core/scripts/ocr2vrf/setup_ocr2vrf.go | 1 - core/scripts/vrfv2plus/testnet/proofs.go | 1 - core/services/blockhashstore/delegate.go | 3 --- core/services/blockheaderfeeder/delegate.go | 3 --- core/services/chainlink/config_general_test.go | 1 - core/services/chainlink/config_p2p.go | 1 - core/services/chainlink/config_pyroscope_test.go | 1 - core/services/chainlink/config_web_server_test.go | 1 - core/services/chainlink/relayer_chain_interoperators.go | 3 --- .../chainlink/relayer_chain_interoperators_test.go | 5 ----- core/services/chainlink/relayer_factory.go | 6 ------ core/services/feeds/service.go | 2 -- core/services/fluxmonitorv2/flux_monitor.go | 1 - core/services/fluxmonitorv2/integrations_test.go | 1 - core/services/fluxmonitorv2/orm.go | 1 - core/services/fluxmonitorv2/poll_manager.go | 1 - core/services/fluxmonitorv2/poll_manager_test.go | 1 - core/services/functions/connector_handler_test.go | 7 ------- core/services/functions/external_adapter_client_test.go | 1 - core/services/functions/orm_test.go | 1 - .../handlers/functions/allowlist/allowlist_test.go | 2 -- core/services/job/kv_orm.go | 1 - core/services/job/models.go | 1 - core/services/job/orm.go | 1 - core/services/job/runner_integration_test.go | 1 - core/services/job/spawner.go | 2 -- core/services/keeper/validate_test.go | 1 - core/services/keystore/eth_test.go | 1 - core/services/keystore/keys/exportutils.go | 2 -- core/services/keystore/keys/vrfkey/public_key_test.go | 1 - core/services/keystore/models_test.go | 1 - core/services/keystore/ocr2_test.go | 1 - core/services/keystore/starknet.go | 1 - core/services/keystore/starknet_test.go | 1 - core/services/llo/orm_test.go | 1 - core/services/nurse.go | 2 -- core/services/nurse_test.go | 1 - core/services/ocr/config_overrider.go | 1 - core/services/ocr2/delegate.go | 1 - core/services/ocr2/delegate_test.go | 1 - core/services/ocr2/plugins/generic/relayerset.go | 3 --- core/services/ocr2/plugins/generic/relayerset_test.go | 2 -- core/services/ocr2/plugins/llo/config/config.go | 1 - core/services/ocr2/plugins/mercury/config/config_test.go | 1 - .../ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go | 1 - .../plugins/ocr2keeper/evmregistry/v20/encoder_test.go | 1 - .../ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go | 1 - .../evmregistry/v21/logprovider/integration_test.go | 1 - .../ocr2keeper/evmregistry/v21/logprovider/provider.go | 1 - .../evmregistry/v21/logprovider/provider_test.go | 1 - .../ocr2/plugins/ocr2keeper/evmregistry/v21/services.go | 1 - .../services/ocr2/plugins/ocr2keeper/integration_test.go | 1 - .../ocr2/plugins/ocr2vrf/coordinator/coordinator.go | 4 ---- .../ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go | 1 - .../ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go | 4 ---- .../ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go | 3 --- .../plugins/ocr2vrf/internal/ocr2vrf_integration_test.go | 1 - .../ocr2vrf/reportserializer/report_serializer.go | 1 - core/services/ocr2/plugins/s4/plugin_test.go | 1 - core/services/ocrcommon/adapters_test.go | 1 - core/services/ocrcommon/block_translator_test.go | 1 - core/services/ocrcommon/data_source.go | 1 - core/services/ocrcommon/data_source_test.go | 1 - core/services/ocrcommon/discoverer_database_test.go | 1 - core/services/ocrcommon/peer_wrapper_test.go | 1 - core/services/ocrcommon/telemetry_test.go | 4 ---- core/services/ocrcommon/transmitter.go | 2 -- core/services/periodicbackup/backup_test.go | 1 - core/services/pg/connection_test.go | 1 - core/services/pg/stats.go | 1 - core/services/pg/stats_test.go | 1 - core/services/pipeline/common_eth.go | 1 - core/services/pipeline/common_http.go | 1 - core/services/pipeline/common_test.go | 1 - core/services/pipeline/graph_test.go | 1 - core/services/pipeline/orm.go | 1 - core/services/pipeline/orm_test.go | 2 -- core/services/pipeline/runner.go | 1 - core/services/pipeline/runner_test.go | 2 -- core/services/pipeline/scheduler.go | 1 - core/services/pipeline/scheduler_test.go | 1 - core/services/pipeline/task.bridge_test.go | 5 ----- core/services/pipeline/task.eth_tx_test.go | 2 -- core/services/pipeline/task.http_test.go | 1 - core/services/pipeline/task_object_params.go | 1 - core/services/pipeline/task_params.go | 1 - core/services/pipeline/task_params_test.go | 1 - core/services/relay/evm/config_poller_test.go | 1 - core/services/relay/evm/evm.go | 4 ---- .../services/relay/evm/functions/contract_transmitter.go | 1 - core/services/relay/evm/mercury/orm_test.go | 1 - core/services/relay/evm/mercury/wsrpc/pool.go | 1 - core/services/relay/evm/ocr2keeper.go | 1 - core/services/relay/evm/ocr2vrf.go | 2 -- core/services/relay/evm/relayer_extender.go | 1 - core/services/relay/evm/relayer_extender_test.go | 1 - core/services/relay/evm/types/size_helper_test.go | 2 -- core/services/signatures/ethdss/ethdss_test.go | 1 - core/services/signatures/secp256k1/field_test.go | 1 - .../services/synchronization/telemetry_ingress_client.go | 1 - .../synchronization/telemetry_ingress_client_test.go | 1 - core/services/telemetry/manager.go | 2 -- core/services/telemetry/manager_test.go | 4 ---- core/services/vrf/v2/coordinator_v2x_interface.go | 1 - core/services/vrf/v2/reverted_txns.go | 4 ---- core/services/workflows/engine_test.go | 1 - core/services/workflows/models_yaml_test.go | 1 - core/store/migrate/migrate_test.go | 3 --- core/store/models/errors.go | 1 - core/utils/collection_test.go | 2 -- core/utils/deferable_write_closer.go | 2 -- core/utils/deferable_write_closer_test.go | 1 - core/utils/utils.go | 1 - core/utils/utils_test.go | 1 - core/web/common.go | 1 - core/web/cors_test.go | 1 - core/web/cosmos_chains_controller_test.go | 1 - core/web/dkgencrypt_keys_controller_test.go | 1 - core/web/dkgsign_keys_controller_test.go | 1 - core/web/eth_keys_controller.go | 2 -- core/web/eth_keys_controller_test.go | 2 -- core/web/evm_chains_controller_test.go | 1 - core/web/evm_forwarders_controller_test.go | 1 - core/web/jobs_controller_test.go | 1 - core/web/lca_controller.go | 1 - core/web/log_controller_test.go | 2 -- core/web/loop_registry.go | 1 - core/web/resolver/chain_test.go | 3 --- core/web/resolver/helpers.go | 1 - core/web/resolver/job_proposal_spec_test.go | 2 -- core/web/resolver/node_test.go | 1 - core/web/router.go | 1 - tools/flakeytests/runner.go | 2 -- 207 files changed, 1 insertion(+), 332 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 7f127e9524d..2902503ed20 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,6 +14,7 @@ linters: - sqlclosecheck - noctx - depguard + - whitespace linters-settings: exhaustive: default-signifies-exhaustive: true diff --git a/common/client/multi_node.go b/common/client/multi_node.go index fa413df91aa..af8ff03f534 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -239,7 +239,6 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP return rpc, err } return n.RPC(), nil - } // selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector. @@ -641,7 +640,6 @@ loop: // ignore critical error as it's reported in reportSendTxAnomalies result, _ := aggregateTxResults(errorsByCode) return result - } func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT, BATCH_ELEM]) reportSendTxAnomalies(tx TX, txResults <-chan sendTxResult) { @@ -759,7 +757,6 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP c.wg.Add(1) go c.reportSendTxAnomalies(tx, txResultsToReport) - }) if !ok { return fmt.Errorf("aborted while broadcasting tx - multiNode is stopped: %w", context.Canceled) diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go index 9f6904fcaf2..d602fa30afd 100644 --- a/common/client/multi_node_test.go +++ b/common/client/multi_node_test.go @@ -373,7 +373,6 @@ func TestMultiNode_selectNode(t *testing.T) { newActiveNode, err := mn.selectNode() require.NoError(t, err) require.Equal(t, prevActiveNode.String(), newActiveNode.String()) - }) t.Run("Updates node if active is not healthy", func(t *testing.T) { t.Parallel() @@ -399,7 +398,6 @@ func TestMultiNode_selectNode(t *testing.T) { newActiveNode, err := mn.selectNode() require.NoError(t, err) require.Equal(t, newBest.String(), newActiveNode.String()) - }) t.Run("No active nodes - reports critical error", func(t *testing.T) { t.Parallel() @@ -418,7 +416,6 @@ func TestMultiNode_selectNode(t *testing.T) { require.EqualError(t, err, ErroringNodeError.Error()) require.Nil(t, node) tests.RequireLogMessage(t, observedLogs, "No live RPC nodes available") - }) } diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go index 36cee65e09e..dc0ca0e7de8 100644 --- a/common/client/node_fsm_test.go +++ b/common/client/node_fsm_test.go @@ -118,7 +118,6 @@ func testTransition(t *testing.T, rpc *mockNodeClient[types.ID, Head], transitio }, "Expected transition from `%s` to `%s` to panic", nodeState, destinationState) m.AssertNotCalled(t) assert.Equal(t, nodeState, node.State(), "Expected node to remain in initial state on invalid transition") - } } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index 4707a60426f..fa6397580c8 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -259,7 +259,6 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { n.stateLatestFinalizedBlockNumber = latestFinalizedBN } } - } } @@ -524,6 +523,5 @@ func (n *node[CHAIN_ID, HEAD, RPC]) syncingLoop() { n.declareAlive() return } - } } diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go index b3c09b35000..4bdfd698f7a 100644 --- a/common/client/node_lifecycle_test.go +++ b/common/client/node_lifecycle_test.go @@ -39,7 +39,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.setState(nodeStateClosed) node.wg.Add(1) node.aliveLoop() - }) t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { t.Parallel() @@ -58,7 +57,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertEventually(t, func() bool { return node.State() == nodeStateUnreachable }) - }) t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { t.Parallel() @@ -150,7 +148,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) assert.True(t, ensuredAlive.Load(), "expected to ensure that node was alive") - }) t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { t.Parallel() @@ -356,7 +353,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { node.declareAlive() tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") assert.Equal(t, nodeStateUnreachable, node.State()) - }) t.Run("updates block number and difficulty on new head", func(t *testing.T) { t.Parallel() @@ -859,7 +855,6 @@ func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { node.setState(nodeStateClosed) node.wg.Add(1) node.unreachableLoop() - }) t.Run("on failed redial, keeps trying", func(t *testing.T) { t.Parallel() @@ -1017,7 +1012,6 @@ func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { node.setState(nodeStateClosed) node.wg.Add(1) node.invalidChainIDLoop() - }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() @@ -1380,7 +1374,6 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { } } } - }) t.Run("total difficulty selection mode", func(t *testing.T) { const syncThreshold = 10 @@ -1432,7 +1425,6 @@ func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { }) } } - }) } @@ -1453,7 +1445,6 @@ func TestUnit_NodeLifecycle_SyncingLoop(t *testing.T) { node.setState(nodeStateClosed) node.wg.Add(1) node.syncingLoop() - }) t.Run("on invalid dial becomes unreachable", func(t *testing.T) { t.Parallel() diff --git a/common/internal/utils/utils.go b/common/internal/utils/utils.go index 1e285868c53..aeaad34a142 100644 --- a/common/internal/utils/utils.go +++ b/common/internal/utils/utils.go @@ -17,7 +17,6 @@ func NewRedialBackoff() backoff.Backoff { Max: 15 * time.Second, Jitter: true, } - } // MinFunc returns the minimum value of the given element array with respect diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go index 1651f6417bf..2a9c1231d7b 100644 --- a/common/txmgr/broadcaster.go +++ b/common/txmgr/broadcaster.go @@ -598,7 +598,6 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand // trying to send the transaction over again. return fmt.Errorf("retryable error while sending transaction %s (tx ID %d): %w", attempt.Hash.String(), etx.ID, err), true } - } // Finds next transaction in the queue, assigns a sequence, and moves it to "in_progress" state ready for broadcast. diff --git a/common/txmgr/confirmer.go b/common/txmgr/confirmer.go index d61f9a3dddd..dd98df0a8fe 100644 --- a/common/txmgr/confirmer.go +++ b/common/txmgr/confirmer.go @@ -814,7 +814,6 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bum func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleInProgressAttempt(ctx context.Context, lggr logger.SugaredLogger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockHeight int64) error { if attempt.State != txmgrtypes.TxAttemptInProgress { - return fmt.Errorf("invariant violation: expected tx_attempt %v to be in_progress, it was %s", attempt.ID, attempt.State) } @@ -1049,7 +1048,6 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) For ec.lggr.Infof("ForceRebroadcast: will rebroadcast transactions for all sequences between %v and %v", seqs[0], seqs[len(seqs)-1]) for _, seq := range seqs { - etx, err := ec.txStore.FindTxWithSequence(ctx, address, seq) if err != nil { return fmt.Errorf("ForceRebroadcast failed: %w", err) @@ -1098,7 +1096,6 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) sen // ResumePendingTaskRuns issues callbacks to task runs that are pending waiting for receipts func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ResumePendingTaskRuns(ctx context.Context, head types.Head[BLOCK_HASH]) error { - receiptsPlus, err := ec.txStore.FindTxesPendingCallback(ctx, head.BlockNumber(), ec.chainID) if err != nil { diff --git a/core/capabilities/registry.go b/core/capabilities/registry.go index 4865116196e..3c7bdf2c971 100644 --- a/core/capabilities/registry.go +++ b/core/capabilities/registry.go @@ -147,7 +147,6 @@ func (r *Registry) Add(ctx context.Context, c capabilities.BaseCapability) error r.m[id] = c r.lggr.Infow("capability added", "id", id, "type", info.CapabilityType, "description", info.Description, "version", info.Version) return nil - } // NewRegistry returns a new Registry. diff --git a/core/capabilities/targets/write_target_test.go b/core/capabilities/targets/write_target_test.go index 744fcd9d2e7..96da502f10d 100644 --- a/core/capabilities/targets/write_target_test.go +++ b/core/capabilities/targets/write_target_test.go @@ -79,7 +79,6 @@ func TestEvmWrite(t *testing.T) { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, // len = 3 0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // elements [1, 2, 3] zero padded }, payload["data"]) - }) ch, err := capability.Execute(ctx, req) diff --git a/core/chains/evm/assets/wei.go b/core/chains/evm/assets/wei.go index 3621e4492a4..4fcdc15146b 100644 --- a/core/chains/evm/assets/wei.go +++ b/core/chains/evm/assets/wei.go @@ -117,7 +117,6 @@ func (w *Wei) text(suf string, exp int32) string { return "0" } return fmt.Sprintf("%s %s", d, suf) - } const u64Eth = 1_000_000_000_000_000_000 @@ -201,7 +200,6 @@ func (w *Wei) UnmarshalText(b []byte) error { } *w = (Wei)(*d.BigInt()) return nil - } // unrecognized or missing suffix d, err := decimal.NewFromString(s) diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go index 62acf146e48..99053bd5aec 100644 --- a/core/chains/evm/client/client_test.go +++ b/core/chains/evm/client/client_test.go @@ -308,7 +308,6 @@ func TestEthClient_GetERC20Balance(t *testing.T) { assert.Equal(t, strings.ToLower(contractAddress.Hex()), callArgs.Get("to").String()) && assert.Equal(t, hexutil.Encode(txData), callArgs.Get("data").String()) && assert.Equal(t, "latest", arr[1].String()) { - resp.Result = `"` + hexutil.EncodeBig(test.balance) + `"` } return @@ -907,7 +906,6 @@ func TestEthClient_ErroringClient(t *testing.T) { _, err = erroringClient.TransactionReceipt(ctx, common.Hash{}) require.Equal(t, err, commonclient.ErroringNodeError) - } const headResult = client.HeadResult diff --git a/core/chains/evm/client/errors_test.go b/core/chains/evm/client/errors_test.go index 744abb89f60..bdf7bfbe726 100644 --- a/core/chains/evm/client/errors_test.go +++ b/core/chains/evm/client/errors_test.go @@ -94,7 +94,6 @@ func Test_Eth_Errors(t *testing.T) { }) t.Run("IsReplacementUnderpriced", func(t *testing.T) { - tests := []errorCase{ {"replacement transaction underpriced", true, "geth"}, {"Replacement transaction underpriced", true, "Besu"}, diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 1db8958443c..7e2771a67d5 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -196,7 +196,6 @@ func NewChainClientWithEmptyNode( noNewHeadsThreshold time.Duration, chainID *big.Int, ) Client { - lggr := logger.Test(t) var chainType commonconfig.ChainType @@ -213,7 +212,6 @@ func NewChainClientWithMockedRpc( chainID *big.Int, rpc RPCClient, ) Client { - lggr := logger.Test(t) var chainType commonconfig.ChainType diff --git a/core/chains/evm/client/node.go b/core/chains/evm/client/node.go index 474ff2700b4..92b7a8301e5 100644 --- a/core/chains/evm/client/node.go +++ b/core/chains/evm/client/node.go @@ -828,7 +828,6 @@ func (n *node) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumb ) return - } func (n *node) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) (val []byte, err error) { @@ -855,7 +854,6 @@ func (n *node) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) (v ) return - } func (n *node) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Block, err error) { diff --git a/core/chains/evm/client/node_lifecycle_test.go b/core/chains/evm/client/node_lifecycle_test.go index 0fcaf54ae3d..878ecabe600 100644 --- a/core/chains/evm/client/node_lifecycle_test.go +++ b/core/chains/evm/client/node_lifecycle_test.go @@ -502,7 +502,6 @@ func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { state, num, _ := n.StateAndLatest() assert.Equal(t, NodeStateAlive, state) assert.Equal(t, int64(stall), num) - }) } diff --git a/core/chains/evm/client/pool_test.go b/core/chains/evm/client/pool_test.go index 462aeed43ee..5f614b7ed24 100644 --- a/core/chains/evm/client/pool_test.go +++ b/core/chains/evm/client/pool_test.go @@ -392,5 +392,4 @@ func TestUnit_Pool_LeaseDuration(t *testing.T) { nodeSwitch.isAlive = true nodeSwitch.mu.Unlock() testutils.WaitForLogMessage(t, observedLogs, `Switching to best node from "n2" to "n1"`) - } diff --git a/core/chains/evm/client/sub_error_wrapper_test.go b/core/chains/evm/client/sub_error_wrapper_test.go index 457d392a50e..5dd81069572 100644 --- a/core/chains/evm/client/sub_error_wrapper_test.go +++ b/core/chains/evm/client/sub_error_wrapper_test.go @@ -70,6 +70,5 @@ func TestSubscriptionErrorWrapper(t *testing.T) { _, ok = <-wrapper.Err() return !ok }) - }) } diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go index 7a7a274127f..3f09d9b7679 100644 --- a/core/chains/evm/forwarders/forwarder_manager.go +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -189,7 +189,6 @@ func (f *FwdMgr) initForwardersCache(ctx context.Context, fwdrs []Forwarder) { continue } f.setCachedSenders(fwdr.Address, senders) - } } diff --git a/core/chains/evm/gas/block_history_estimator_test.go b/core/chains/evm/gas/block_history_estimator_test.go index 43f42c69203..1eeedf43896 100644 --- a/core/chains/evm/gas/block_history_estimator_test.go +++ b/core/chains/evm/gas/block_history_estimator_test.go @@ -1599,7 +1599,6 @@ func TestBlockHistoryEstimator_EffectiveGasPrice(t *testing.T) { res := bhe.EffectiveGasPrice(eipblock, tx) assert.Nil(t, res) }) - } func TestBlockHistoryEstimator_Block_Unmarshal(t *testing.T) { @@ -2343,7 +2342,6 @@ func TestBlockHistoryEstimator_CheckConnectivity(t *testing.T) { assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has tip cap of 10 wei, which is above percentile=60%% (percentile tip cap: 6 wei) for blocks 3 thru 3 (checking 1 blocks)", attempts[0].TxHash)) require.ErrorIs(t, err, commonfee.ErrConnectivity) }) - }) t.Run("in EIP-1559 mode", func(t *testing.T) { diff --git a/core/chains/evm/gas/fixed_price_estimator.go b/core/chains/evm/gas/fixed_price_estimator.go index f4749b093a1..7ac086bf067 100644 --- a/core/chains/evm/gas/fixed_price_estimator.go +++ b/core/chains/evm/gas/fixed_price_estimator.go @@ -118,7 +118,6 @@ func (f *fixedPriceEstimator) BumpDynamicFee( maxGasPriceWei *assets.Wei, _ []EvmPriorAttempt, ) (bumped DynamicFee, err error) { - return BumpDynamicFeeOnly( f.config, f.bhConfig.EIP1559FeeCapBufferBlocks(), diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go index e53ea0cd629..78058efa560 100644 --- a/core/chains/evm/headtracker/head_saver_test.go +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -143,5 +143,4 @@ func TestHeadSaver_Load(t *testing.T) { uncleChain := saver.Chain(h2Uncle.Hash) require.NotNil(t, uncleChain) require.Equal(t, uint32(2), uncleChain.ChainLength()) // h2Uncle -> h1 - } diff --git a/core/chains/evm/headtracker/heads_test.go b/core/chains/evm/headtracker/heads_test.go index 4241b462363..2f468e0f541 100644 --- a/core/chains/evm/headtracker/heads_test.go +++ b/core/chains/evm/headtracker/heads_test.go @@ -154,10 +154,8 @@ func TestHeads_MarkFinalized(t *testing.T) { require.True(t, heads.HeadByHash(head.Hash).IsFinalized, "expected h3 and all ancestors to be finalized", head.BlockNumber()) } require.False(t, heads.HeadByHash(h2Uncle.Hash).IsFinalized, "expected uncle block not to be marked as finalized") - } t.Run("blocks were correctly marked as finalized", ensureProperFinalization) heads.AddHeads(h0, h1, h2, h2Uncle, h3, h4, h5) t.Run("blocks remain finalized after re adding them to the Heads", ensureProperFinalization) - } diff --git a/core/chains/evm/log/eth_subscriber.go b/core/chains/evm/log/eth_subscriber.go index e5ba202dbf2..3d251a331a3 100644 --- a/core/chains/evm/log/eth_subscriber.go +++ b/core/chains/evm/log/eth_subscriber.go @@ -104,7 +104,6 @@ func (sub *ethSubscriber) backfillLogs(fromBlockOverride sql.NullInt64, addresse // On ethereum its 15MB [https://github.com/ethereum/go-ethereum/blob/master/rpc/websocket.go#L40] batchSize := int64(sub.config.LogBackfillBatchSize()) for from := q.FromBlock.Int64(); from <= latestHeight; from += batchSize { - to := from + batchSize - 1 if to > latestHeight { to = latestHeight @@ -204,7 +203,6 @@ func (sub *ethSubscriber) createSubscription(addresses []common.Address, topics defer cancel() utils.RetryWithBackoff(ctx, func() (retry bool) { - filterQuery := ethereum.FilterQuery{ Addresses: addresses, Topics: [][]common.Hash{topics}, diff --git a/core/chains/evm/log/helpers_test.go b/core/chains/evm/log/helpers_test.go index 13aeb8d2338..7f216e8545e 100644 --- a/core/chains/evm/log/helpers_test.go +++ b/core/chains/evm/log/helpers_test.go @@ -146,7 +146,6 @@ func (helper *broadcasterHelper) registerWithTopics(listener log.Listener, contr func (helper *broadcasterHelper) registerWithTopicValues(listener log.Listener, contract log.AbigenContract, numConfirmations uint32, topics map[common.Hash][][]log.Topic) { - unsubscribe := helper.lb.Register(listener, log.ListenerOpts{ Contract: contract.Address(), ParseLog: contract.ParseLog, @@ -328,10 +327,8 @@ func (listener *simpleLogListener) handleLogBroadcast(ctx context.Context, lb lo return false } if !consumed && !listener.skipMarkingConsumed.Load() { - err = listener.MarkConsumed(ctx, lb) if assert.NoError(t, err) { - consumed2, err := listener.WasAlreadyConsumed(ctx, lb) if assert.NoError(t, err) { assert.True(t, consumed2) diff --git a/core/chains/evm/log/integration_test.go b/core/chains/evm/log/integration_test.go index e34533b3cfb..6c06be93dbe 100644 --- a/core/chains/evm/log/integration_test.go +++ b/core/chains/evm/log/integration_test.go @@ -241,7 +241,6 @@ func TestBroadcaster_ReplaysLogs(t *testing.T) { <-cltest.SimulateIncomingHeads(t, blocks.Slice(12, 13), helper.lb) require.Eventually(t, func() bool { return len(listener.getUniqueLogs()) == 4 }, testutils.WaitTimeout(t), time.Second, "expected unique logs to be 4 but was %d", len(listener.getUniqueLogs())) - }() require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) diff --git a/core/chains/evm/log/registrations.go b/core/chains/evm/log/registrations.go index c82fee43b6e..68dd93b9d88 100644 --- a/core/chains/evm/log/registrations.go +++ b/core/chains/evm/log/registrations.go @@ -225,7 +225,6 @@ func (r *registrations) sendLogs(ctx context.Context, logsToSend []logsOnBlock, for _, logsPerBlock := range logsToSend { for numConfirmations, handlers := range r.handlersByConfs { - if numConfirmations != 0 && latestBlockNumber < uint64(numConfirmations) { // Skipping send because the block is definitely too young continue @@ -392,7 +391,6 @@ func (r *handler) sendLog(ctx context.Context, log types.Log, latestHead evmtype broadcasts map[LogBroadcastAsKey]bool, bc broadcastCreator, logger logger.Logger) { - topic := log.Topics[0] latestBlockNumber := uint64(latestHead.Number) diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 4236f0b8ef1..b7dbb074568 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -268,7 +268,6 @@ func mockBatchCallContext(t *testing.T, ec *evmclimocks.Client) { } result := e.Result.(*evmtypes.Head) *result = evmtypes.Head{Number: num, Hash: utils.NewHash()} - } }) } diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index cb211043a4c..097c6f9eef2 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -75,7 +75,6 @@ func populateDatabase(t testing.TB, o logpoller.ORM, chainID *big.Int) (common.H Data: logpoller.EvmWord(uint64(i + 1000*j)).Bytes(), CreatedAt: blockTimestamp, }) - } require.NoError(t, o.InsertLogs(ctx, logs)) require.NoError(t, o.InsertBlock(ctx, utils.RandomHash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) @@ -1956,7 +1955,6 @@ func TestFindLCA(t *testing.T) { }).Once() _, err := lp.FindLCA(lCtx) require.ErrorContains(t, err, "aborted, FindLCA request cancelled") - }) t.Run("Fails, if RPC returns an error", func(t *testing.T) { expectedError := fmt.Errorf("failed to call RPC") diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index d065553886e..5084e0329a7 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -154,7 +154,6 @@ func (o *DSORM) DeleteFilter(ctx context.Context, name string) error { `DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) return err - } // LoadFilters returns all filters for this chain diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 2a1be62dd5b..c89a39aa6b4 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -1728,7 +1728,6 @@ func Benchmark_DeleteExpiredLogs(b *testing.B) { for j := 0; j < 5; j++ { var dbLogs []logpoller.Log for i := 0; i < numberOfReports; i++ { - dbLogs = append(dbLogs, logpoller.Log{ EvmChainId: ubig.New(chainId), LogIndex: int64(i + 1), diff --git a/core/chains/evm/monitor/balance.go b/core/chains/evm/monitor/balance.go index 28bcdd9abdf..5ef41b63be1 100644 --- a/core/chains/evm/monitor/balance.go +++ b/core/chains/evm/monitor/balance.go @@ -103,7 +103,6 @@ func (bm *balanceMonitor) OnNewLongestChain(_ context.Context, head *evmtypes.He if !ok { bm.logger.Debugw("BalanceMonitor: ignoring OnNewLongestChain call, balance monitor is not started", "state", bm.State()) } - } func (bm *balanceMonitor) checkBalance(head *evmtypes.Head) { diff --git a/core/chains/evm/testutils/config_test.go b/core/chains/evm/testutils/config_test.go index 0cbcc5eb63b..1f9d7be4445 100644 --- a/core/chains/evm/testutils/config_test.go +++ b/core/chains/evm/testutils/config_test.go @@ -18,5 +18,4 @@ func TestNewTestChainScopedConfigOverride(t *testing.T) { assert.Equal(t, uint32(100), c.EVM().FinalityDepth()) // fallback.toml values assert.Equal(t, false, c.EVM().GasEstimator().EIP1559DynamicFees()) - } diff --git a/core/chains/evm/txmgr/attempts.go b/core/chains/evm/txmgr/attempts.go index aa1fa8cdeb2..bf2d9a68edf 100644 --- a/core/chains/evm/txmgr/attempts.go +++ b/core/chains/evm/txmgr/attempts.go @@ -138,7 +138,6 @@ func (c *evmTxAttemptBuilder) NewEmptyTxAttempt(ctx context.Context, nonce evmty attempt.SignedRawTx = signedTxBytes attempt.Hash = hash return attempt, nil - } func (c *evmTxAttemptBuilder) newDynamicFeeAttempt(ctx context.Context, etx Tx, fee gas.DynamicFee, gasLimit uint64) (attempt TxAttempt, err error) { diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go index 20c069a46d6..c80ae781034 100644 --- a/core/chains/evm/txmgr/broadcaster_test.go +++ b/core/chains/evm/txmgr/broadcaster_test.go @@ -1094,7 +1094,6 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { // Saved NextNonce must be the same as before because this transaction // was not accepted by the eth node and never can be require.Equal(t, int64(localNextNonce), int64(nonce)) - }) t.Run("with callback", func(t *testing.T) { @@ -1641,7 +1640,6 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { // TEARDOWN: Clear out the unsent tx before the next test pgtest.MustExec(t, db, `DELETE FROM evm.txes WHERE nonce = $1`, localNextNonce) }) - } func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) { diff --git a/core/chains/evm/txmgr/confirmer_test.go b/core/chains/evm/txmgr/confirmer_test.go index 44449b7a44f..db2d8a9092f 100644 --- a/core/chains/evm/txmgr/confirmer_test.go +++ b/core/chains/evm/txmgr/confirmer_test.go @@ -315,7 +315,6 @@ func TestEthConfirmer_CheckForReceipts(t *testing.T) { return len(b) == 2 && cltest.BatchElemMatchesParams(b[0], attempt1_1.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[1], attempt2_1.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // First transaction confirmed @@ -376,7 +375,6 @@ func TestEthConfirmer_CheckForReceipts(t *testing.T) { cltest.BatchElemMatchesParams(b[2], attempt2_1.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[1], attempt2_2.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[0], attempt2_3.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // Most expensive attempt still unconfirmed @@ -933,7 +931,6 @@ func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt(t *testing.T) { cltest.BatchElemMatchesParams(b[3], attempt1_1.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[4], attempt2_1.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[5], attempt3_1.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // First transaction confirmed @@ -999,7 +996,6 @@ func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt(t *testing.T) { cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[2], attempt2_1.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // First transaction still unconfirmed @@ -1046,7 +1042,6 @@ func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt(t *testing.T) { return len(b) == 2 && cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // Both attempts still unconfirmed @@ -1087,7 +1082,6 @@ func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt(t *testing.T) { return len(b) == 2 && cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") - })).Return(nil).Run(func(args mock.Arguments) { elems := args.Get(1).([]rpc.BatchElem) // Both attempts still unconfirmed diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go index dedba07b594..22b9b6678fa 100644 --- a/core/chains/evm/txmgr/evm_tx_store.go +++ b/core/chains/evm/txmgr/evm_tx_store.go @@ -1236,7 +1236,6 @@ func (o *evmTxStore) SaveConfirmedMissingReceiptAttempt(ctx context.Context, tim } if _, err := orm.q.ExecContext(ctx, `UPDATE evm.txes SET state = 'confirmed_missing_receipt' WHERE id = $1`, attempt.TxID); err != nil { return pkgerrors.Wrap(err, "failed to update evm.txes") - } return nil }) @@ -1778,7 +1777,6 @@ func (o *evmTxStore) CreateTransaction(ctx context.Context, txRequest TxRequest, var dbEtx DbEthTx err = o.Transact(ctx, false, func(orm *evmTxStore) error { if txRequest.PipelineTaskRunID != nil { - err = orm.q.GetContext(ctx, &dbEtx, `SELECT * FROM evm.txes WHERE pipeline_task_run_id = $1 AND evm_chain_id = $2`, txRequest.PipelineTaskRunID, chainID.String()) // If no eth_tx matches (the common case) then continue if !errors.Is(err, sql.ErrNoRows) { diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go index cf38b2d1275..20b39f3a83e 100644 --- a/core/chains/evm/txmgr/evm_tx_store_test.go +++ b/core/chains/evm/txmgr/evm_tx_store_test.go @@ -1027,7 +1027,6 @@ func TestORM_SaveInProgressAttempt(t *testing.T) { attemptResult, err := txStore.FindTxAttempt(ctx, attempt.Hash) require.NoError(t, err) assert.Equal(t, txmgrtypes.TxAttemptInProgress, attemptResult.State) - }) } diff --git a/core/chains/evm/txmgr/nonce_tracker.go b/core/chains/evm/txmgr/nonce_tracker.go index 6fb708ed876..941775b7e85 100644 --- a/core/chains/evm/txmgr/nonce_tracker.go +++ b/core/chains/evm/txmgr/nonce_tracker.go @@ -77,7 +77,6 @@ func (s *nonceTracker) getSequenceForAddr(ctx context.Context, address common.Ad } s.lggr.Criticalw("failed to retrieve next sequence from on-chain for address: ", "address", address.String()) return seq, err - } // syncSequence tries to sync the key sequence, retrying indefinitely until success or stop signal is sent diff --git a/core/chains/evm/txmgr/nonce_tracker_test.go b/core/chains/evm/txmgr/nonce_tracker_test.go index c6af58ab7ac..17c042e375e 100644 --- a/core/chains/evm/txmgr/nonce_tracker_test.go +++ b/core/chains/evm/txmgr/nonce_tracker_test.go @@ -72,7 +72,6 @@ func TestNonceTracker_LoadSequenceMap(t *testing.T) { require.NoError(t, err) require.Equal(t, types.Nonce(randNonce2), seq) }) - } func TestNonceTracker_syncOnChain(t *testing.T) { @@ -129,7 +128,6 @@ func TestNonceTracker_syncOnChain(t *testing.T) { require.NoError(t, err) require.Equal(t, types.Nonce(nonce), seq) }) - } func TestNonceTracker_SyncSequence(t *testing.T) { @@ -196,7 +194,6 @@ func TestNonceTracker_GetNextSequence(t *testing.T) { t.Run("fails to get sequence if address doesn't exist in map", func(t *testing.T) { _, err := nonceTracker.GetNextSequence(ctx, addr) require.Error(t, err) - }) t.Run("fails to get sequence if address doesn't exist in map and is disabled", func(t *testing.T) { @@ -227,7 +224,6 @@ func TestNonceTracker_GetNextSequence(t *testing.T) { seq, err := nonceTracker.GetNextSequence(ctx, addr) require.NoError(t, err) require.Equal(t, types.Nonce(txStoreNonce+1), seq) - }) } diff --git a/core/chains/evm/txmgr/transmitchecker.go b/core/chains/evm/txmgr/transmitchecker.go index 8956f2ae626..8d36903ac6a 100644 --- a/core/chains/evm/txmgr/transmitchecker.go +++ b/core/chains/evm/txmgr/transmitchecker.go @@ -357,5 +357,4 @@ func (v *VRFV2Checker) Check( "meta", tx.Meta, "vrfRequestId", vrfRequestID) return nil - } diff --git a/core/chains/evm/types/address.go b/core/chains/evm/types/address.go index 4a77ce5f8db..3c8b0621a9d 100644 --- a/core/chains/evm/types/address.go +++ b/core/chains/evm/types/address.go @@ -92,7 +92,6 @@ func (a *EIP55Address) UnmarshalJSON(input []byte) error { // Value returns this instance serialized for database storage. func (a EIP55Address) Value() (driver.Value, error) { return a.Bytes(), nil - } // Scan reads the database value and returns an instance. diff --git a/core/chains/evm/types/block_json_benchmark_test.go b/core/chains/evm/types/block_json_benchmark_test.go index 21c58bd1987..766b9099819 100644 --- a/core/chains/evm/types/block_json_benchmark_test.go +++ b/core/chains/evm/types/block_json_benchmark_test.go @@ -73,7 +73,6 @@ func unmarshal_block(b *testing.B, block *evmtypes.Block) { func BenchmarkBlock_Small_JSONUnmarshal(b *testing.B) { unmarshal_block(b, smallBlock) - } func BenchmarkBlock_Medium_JSONUnmarshal(b *testing.B) { diff --git a/core/chains/evm/types/head_test.go b/core/chains/evm/types/head_test.go index b4f1de25c6e..97c536a3444 100644 --- a/core/chains/evm/types/head_test.go +++ b/core/chains/evm/types/head_test.go @@ -47,5 +47,4 @@ func TestHead_LatestFinalizedHead(t *testing.T) { } }) } - } diff --git a/core/chains/evm/types/models.go b/core/chains/evm/types/models.go index 1bf47f84726..7e9d41205bf 100644 --- a/core/chains/evm/types/models.go +++ b/core/chains/evm/types/models.go @@ -369,7 +369,6 @@ var ErrMissingBlock = pkgerrors.New("missing block") // UnmarshalJSON unmarshals to a Block func (b *Block) UnmarshalJSON(data []byte) error { - var h codec.Handle = new(codec.JsonHandle) bi := blocks.BlockInternal{} @@ -419,7 +418,6 @@ const LegacyTxType = blocks.TxType(0x0) // UnmarshalJSON unmarshals a Transaction func (t *Transaction) UnmarshalJSON(data []byte) error { - var h codec.Handle = new(codec.JsonHandle) ti := blocks.TransactionInternal{} @@ -443,7 +441,6 @@ func (t *Transaction) UnmarshalJSON(data []byte) error { } func (t *Transaction) MarshalJSON() ([]byte, error) { - ti := toInternalTxn(*t) buf := bytes.NewBuffer(make([]byte, 0, 256)) diff --git a/core/chains/evm/types/models_test.go b/core/chains/evm/types/models_test.go index ef355a01bda..4757ddab5e6 100644 --- a/core/chains/evm/types/models_test.go +++ b/core/chains/evm/types/models_test.go @@ -1096,7 +1096,6 @@ func TestTransaction_UnmarshalJSON(t *testing.T) { err := got.UnmarshalJSON(tt.args.data) require.NoError(t, err) require.Equal(t, tt.want, got) - }) } } @@ -1149,7 +1148,6 @@ func assertTxnsEqual(t *testing.T, txns1, txns2 []evmtypes.Transaction) { } } func TestTxType_JSONRoundtrip(t *testing.T) { - t.Run("non zero", func(t *testing.T) { t.Parallel() want := evmtypes.TxType(2) diff --git a/core/chains/evm/utils/ethabi_test.go b/core/chains/evm/utils/ethabi_test.go index f28a083ff01..71cf15194d7 100644 --- a/core/chains/evm/utils/ethabi_test.go +++ b/core/chains/evm/utils/ethabi_test.go @@ -275,7 +275,6 @@ func TestEVMTranscodeBool(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - out, err := EVMTranscodeBool(test.input) assert.NoError(t, err) assert.Equal(t, test.output, hexutil.Encode(out)) @@ -361,7 +360,6 @@ func TestEVMTranscodeUint256(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - out, err := EVMTranscodeUint256(test.input) if test.wantError { assert.Error(t, err) @@ -451,7 +449,6 @@ func TestEVMTranscodeInt256(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - out, err := EVMTranscodeInt256(test.input) if test.wantError { assert.Error(t, err) diff --git a/core/chains/evm/utils/utils.go b/core/chains/evm/utils/utils.go index 85ae358e597..708a0ac5ff8 100644 --- a/core/chains/evm/utils/utils.go +++ b/core/chains/evm/utils/utils.go @@ -186,7 +186,6 @@ func NewRedialBackoff() backoff.Backoff { Max: 15 * time.Second, Jitter: true, } - } // RetryWithBackoff retries the sleeper and backs off if not Done diff --git a/core/chains/legacyevm/chain_test.go b/core/chains/legacyevm/chain_test.go index c10712d4b6b..a9297aa3b68 100644 --- a/core/chains/legacyevm/chain_test.go +++ b/core/chains/legacyevm/chain_test.go @@ -28,7 +28,6 @@ func TestLegacyChains(t *testing.T) { got, err := l.Get(c.ID().String()) assert.NoError(t, err) assert.Equal(t, c, got) - } func TestChainOpts_Validate(t *testing.T) { diff --git a/core/cmd/app.go b/core/cmd/app.go index 27757ae4d24..378f7e8bd64 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -128,7 +128,6 @@ func NewApp(s *Shell) *cli.App { } return nil - } app.After = func(c *cli.Context) error { if s.CloseLogger != nil { diff --git a/core/cmd/cosmos_keys_commands_test.go b/core/cmd/cosmos_keys_commands_test.go index 7c3b4ed19f7..a0a211a1984 100644 --- a/core/cmd/cosmos_keys_commands_test.go +++ b/core/cmd/cosmos_keys_commands_test.go @@ -80,7 +80,6 @@ func TestShell_CosmosKeys(t *testing.T) { require.Equal(t, 1, len(r.Renders)) keys := *r.Renders[0].(*cmd.CosmosKeyPresenters) assert.True(t, key.PublicKeyStr() == keys[0].PubKey) - }) t.Run("CreateCosmosKey", func(tt *testing.T) { diff --git a/core/cmd/evm_transaction_commands_test.go b/core/cmd/evm_transaction_commands_test.go index 5375abbacee..da153f6884b 100644 --- a/core/cmd/evm_transaction_commands_test.go +++ b/core/cmd/evm_transaction_commands_test.go @@ -190,7 +190,6 @@ func TestShell_SendEther_From_Txm(t *testing.T) { require.NoError(t, err) require.Len(t, attempts, 1) assert.Equal(t, attempts[0].Hash, output.Hash) - } func TestShell_SendEther_From_Txm_WEI(t *testing.T) { diff --git a/core/cmd/ocr2_keys_commands_test.go b/core/cmd/ocr2_keys_commands_test.go index b0c62f01aa5..c2fab273498 100644 --- a/core/cmd/ocr2_keys_commands_test.go +++ b/core/cmd/ocr2_keys_commands_test.go @@ -135,7 +135,6 @@ func TestShell_OCR2Keys(t *testing.T) { require.Equal(t, 1, len(r.Renders)) output := *r.Renders[0].(*cmd.OCR2KeyBundlePresenter) assert.Equal(t, key.ID(), output.ID) - }) t.Run("ImportExportOCR2Key", func(tt *testing.T) { diff --git a/core/cmd/shell.go b/core/cmd/shell.go index adbb66ce63f..a12f80f168b 100644 --- a/core/cmd/shell.go +++ b/core/cmd/shell.go @@ -203,7 +203,6 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G TOMLConfigs: cfg.StarknetConfigs(), } initOps = append(initOps, chainlink.InitStarknet(ctx, relayerFactory, starkCfg)) - } relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go index e7322e513ae..5fbbff4260f 100644 --- a/core/cmd/shell_local_test.go +++ b/core/cmd/shell_local_test.go @@ -56,7 +56,6 @@ func genTestEVMRelayers(t *testing.T, opts legacyevm.ChainRelayExtenderConfig, k t.Fatal(err) } return relayers - } func TestShell_RunNodeWithPasswords(t *testing.T) { @@ -491,7 +490,6 @@ func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) { app.On("GetConfig").Return(config).Once() require.NoError(t, client.RebroadcastTransactions(c)) } - }) } } diff --git a/core/cmd/shell_remote_test.go b/core/cmd/shell_remote_test.go index cdbe12d66b4..f4661a58e82 100644 --- a/core/cmd/shell_remote_test.go +++ b/core/cmd/shell_remote_test.go @@ -258,7 +258,6 @@ func TestShell_DestroyExternalInitiator_NotFound(t *testing.T) { } func TestShell_RemoteLogin(t *testing.T) { - app := startNewApplicationV2(t, nil) orm := app.AuthenticationProvider() @@ -492,7 +491,6 @@ func TestShell_Profile_InvalidSecondsParam(t *testing.T) { err = client.Profile(cli.NewContext(nil, set, nil)) wantErr := cmd.ErrProfileTooLong require.ErrorAs(t, err, &wantErr) - } func TestShell_Profile(t *testing.T) { diff --git a/core/cmd/shell_test.go b/core/cmd/shell_test.go index d9ac44b46ef..1e3b93851f3 100644 --- a/core/cmd/shell_test.go +++ b/core/cmd/shell_test.go @@ -335,7 +335,6 @@ func TestFileSessionRequestBuilder(t *testing.T) { } func TestNewUserCache(t *testing.T) { - r, err := rand.Int(rand.Reader, big.NewInt(256*1024*1024)) require.NoError(t, err) // NewUserCache owns it's Dir. @@ -350,7 +349,6 @@ func TestNewUserCache(t *testing.T) { }() assert.DirExists(t, c.RootDir()) - } func TestSetupSolanaRelayer(t *testing.T) { @@ -603,11 +601,9 @@ func flagSetApplyFromAction(action interface{}, flagSet *flag.FlagSet, parentCom flag.Apply(flagSet) } } - } func recursiveFindFlagsWithName(actionFuncName string, command cli.Command, parent string, foundName bool) []cli.Flag { - if command.Action != nil { if actionFuncName == getFuncName(command.Action) && foundName { return command.Flags diff --git a/core/cmd/solana_keys_commands_test.go b/core/cmd/solana_keys_commands_test.go index 897031877c1..02b06f6596a 100644 --- a/core/cmd/solana_keys_commands_test.go +++ b/core/cmd/solana_keys_commands_test.go @@ -80,7 +80,6 @@ func TestShell_SolanaKeys(t *testing.T) { require.Equal(t, 1, len(r.Renders)) keys := *r.Renders[0].(*cmd.SolanaKeyPresenters) assert.True(t, key.PublicKeyStr() == keys[0].PubKey) - }) t.Run("CreateSolanaKey", func(tt *testing.T) { diff --git a/core/cmd/starknet_keys_commands_test.go b/core/cmd/starknet_keys_commands_test.go index 5823a80b46d..4f4b0bf6b77 100644 --- a/core/cmd/starknet_keys_commands_test.go +++ b/core/cmd/starknet_keys_commands_test.go @@ -79,7 +79,6 @@ func TestShell_StarkNetKeys(t *testing.T) { require.Equal(t, 1, len(r.Renders)) keys := *r.Renders[0].(*cmd.StarkNetKeyPresenters) assert.True(t, key.StarkKeyStr() == keys[0].StarkKey) - }) t.Run("CreateStarkNetKey", func(tt *testing.T) { diff --git a/core/config/toml/types.go b/core/config/toml/types.go index ba74528b3b6..e3e49dbb18b 100644 --- a/core/config/toml/types.go +++ b/core/config/toml/types.go @@ -501,7 +501,6 @@ func (p *AuditLogger) SetFrom(f *AuditLogger) { if v := f.Headers; v != nil { p.Headers = v } - } // LogLevel replaces dpanic with crit/CRIT @@ -882,7 +881,6 @@ func (j *JobPipeline) setFrom(f *JobPipeline) { j.VerboseLogging = v } j.HTTPRequest.setFrom(&f.HTTPRequest) - } type JobPipelineHTTPRequest struct { @@ -1110,7 +1108,6 @@ func (k *Keeper) setFrom(f *Keeper) { } k.Registry.setFrom(&f.Registry) - } type KeeperRegistry struct { diff --git a/core/gethwrappers/versions.go b/core/gethwrappers/versions.go index 43a59ddbb75..2d21f3d0618 100644 --- a/core/gethwrappers/versions.go +++ b/core/gethwrappers/versions.go @@ -51,7 +51,6 @@ func versionsDBLineReader() (*bufio.Scanner, error) { return nil, pkgerrors.Wrapf(err, "could not open versions database") } return bufio.NewScanner(versionsDBFile), nil - } // ReadVersionsDB populates an IntegratedVersion with all the info in the diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 58cedbb96e1..ca9c1bdfcd5 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -341,7 +341,6 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn case UseRealExternalInitiatorManager: externalInitiatorManager = webhook.NewExternalInitiatorManager(ds, clhttptest.NewTestLocalOnlyHTTPClient()) } - } } @@ -411,7 +410,6 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn TOMLConfigs: cfg.StarknetConfigs(), } initOps = append(initOps, chainlink.InitStarknet(testCtx, relayerFactory, starkCfg)) - } relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) if err != nil { @@ -1017,7 +1015,6 @@ func HeadWithHash(n int64, hash common.Hash) *evmtypes.Head { time := uint64(0) h = evmtypes.NewHead(big.NewInt(n), hash, evmutils.NewHash(), time, ubig.New(&FixtureChainID)) return &h - } // LegacyTransactionsFromGasPrices returns transactions matching the given gas prices diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go index 36d10981962..2d7c0afc20a 100644 --- a/core/internal/cltest/mocks.go +++ b/core/internal/cltest/mocks.go @@ -410,7 +410,6 @@ func NewLegacyChainsWithMockChain(t testing.TB, ethClient evmclient.Client, cfg ch.On("Config").Return(scopedCfg) return NewLegacyChainsWithChain(ch, cfg) - } func NewLegacyChainsWithMockChainAndTxManager(t testing.TB, ethClient evmclient.Client, cfg legacyevm.AppConfig, txm txmgr.TxManager) legacyevm.LegacyChainContainer { diff --git a/core/internal/testutils/evmtest/evmtest.go b/core/internal/testutils/evmtest/evmtest.go index 276dea2ac5d..d435252089a 100644 --- a/core/internal/testutils/evmtest/evmtest.go +++ b/core/internal/testutils/evmtest/evmtest.go @@ -53,7 +53,6 @@ func NewChainScopedConfig(t testing.TB, cfg legacyevm.AppConfig) evmconfig.Chain } return evmconfig.NewTOMLChainScopedConfig(evmCfg, logger.TestLogger(t)) - } type TestChainOpts struct { diff --git a/core/logger/audit/audit_logger.go b/core/logger/audit/audit_logger.go index 2f96c40586f..02cee24b0b2 100644 --- a/core/logger/audit/audit_logger.go +++ b/core/logger/audit/audit_logger.go @@ -251,7 +251,6 @@ func (l *AuditLoggerService) postLogToLogService(eventID EventID, data Data) { } l.logger.Errorw("error sending log to HTTP log service", "statusCode", resp.StatusCode, "bodyString", string(bodyBytes)) return - } } diff --git a/core/scripts/chaincli/handler/keeper.go b/core/scripts/chaincli/handler/keeper.go index 1f56eb14080..29a8f5bc9e2 100644 --- a/core/scripts/chaincli/handler/keeper.go +++ b/core/scripts/chaincli/handler/keeper.go @@ -246,7 +246,6 @@ func (k *Keeper) VerifyContract(params ...string) { fmt.Println("Running command to verify contract: ", command) if err := k.runCommand(command); err != nil { log.Println("Contract verification on Explorer failed: ", err) - } } diff --git a/core/scripts/chaincli/handler/ocr2_config.go b/core/scripts/chaincli/handler/ocr2_config.go index caa96112ea8..438b96466d0 100644 --- a/core/scripts/chaincli/handler/ocr2_config.go +++ b/core/scripts/chaincli/handler/ocr2_config.go @@ -20,7 +20,6 @@ import ( ) func OCR2GetConfig(hdlr *baseHandler, registry_addr string) error { - b, err := common.ParseHexOrString(registry_addr) if err != nil { return fmt.Errorf("failed to parse address hash: %s", err) diff --git a/core/scripts/chaincli/handler/report.go b/core/scripts/chaincli/handler/report.go index 1dcbb21ee83..eb4ce5c83ac 100644 --- a/core/scripts/chaincli/handler/report.go +++ b/core/scripts/chaincli/handler/report.go @@ -250,7 +250,6 @@ func (t *OCR2Transaction) To() *common.Address { } func (t *OCR2Transaction) From() (common.Address, error) { - switch t.tx.Type() { case 2: from, err := types.Sender(types.NewLondonSigner(t.tx.ChainId()), &t.tx) @@ -296,7 +295,6 @@ type OCR2TransmitTx struct { } func (t *OCR2TransmitTx) UpkeepsInTransmit() ([]ocr2keepers20.UpkeepResult, error) { - txData := t.tx.Data() // recover Method from signature and ABI @@ -367,7 +365,6 @@ func (t *OCR2TransmitTx) SetStaticValues(elem *OCR2ReportDataElem) { } func (t *OCR2TransmitTx) BatchElem() (rpc.BatchElem, error) { - bn, err := t.BlockNumber() if err != nil { return rpc.BatchElem{}, err diff --git a/core/scripts/common/helpers.go b/core/scripts/common/helpers.go index 0967991e62b..a71222e8749 100644 --- a/core/scripts/common/helpers.go +++ b/core/scripts/common/helpers.go @@ -462,7 +462,6 @@ func BinarySearch(top, bottom *big.Int, test func(amount *big.Int) bool) *big.In // Makes RPC network call eth_getBlockByNumber to blockchain RPC node // to fetch header info func GetRlpHeaders(env Environment, blockNumbers []*big.Int, getParentBlocks bool) (headers [][]byte, hashes []string, err error) { - hashes = make([]string, 0) offset := big.NewInt(0) @@ -513,7 +512,6 @@ func GetRlpHeaders(env Environment, blockNumbers []*big.Int, getParentBlocks boo hashes = append(hashes, h.Hash().String()) } else if IsPolygonEdgeNetwork(env.ChainID) { - // Get child block since it's the one that has the parent hash in its header. nextBlockNum := new(big.Int).Set(blockNum).Add(blockNum, offset) var hash string @@ -523,7 +521,6 @@ func GetRlpHeaders(env Environment, blockNumbers []*big.Int, getParentBlocks boo } hashes = append(hashes, hash) - } else { // Get child block since it's the one that has the parent hash in its header. h, err2 := env.Ec.HeaderByNumber( diff --git a/core/scripts/common/helpers_test.go b/core/scripts/common/helpers_test.go index 4ca0823d811..54c5a59b3ec 100644 --- a/core/scripts/common/helpers_test.go +++ b/core/scripts/common/helpers_test.go @@ -53,7 +53,6 @@ func TestBinarySearch(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - testFunc := func(val *big.Int) bool { return val.Cmp(big.NewInt(test.result)) < 1 } diff --git a/core/scripts/common/vrf/setup-envs/main.go b/core/scripts/common/vrf/setup-envs/main.go index 55a2cb5c3c2..a5198f8abbe 100644 --- a/core/scripts/common/vrf/setup-envs/main.go +++ b/core/scripts/common/vrf/setup-envs/main.go @@ -52,7 +52,6 @@ var ( ) func main() { - vrfPrimaryNodeURL := flag.String("vrf-primary-node-url", "", "remote node URL") vrfBackupNodeURL := flag.String("vrf-backup-node-url", "", "remote node URL") bhsNodeURL := flag.String("bhs-node-url", "", "remote node URL") @@ -198,7 +197,6 @@ func main() { importVRFKeyToNodeIfSet(vrfBackupNodeURL, nodesMap, output, nodesMap[model.VRFBackupNodeName].CredsFile) if *deployContractsAndCreateJobs { - contractAddresses := model.ContractAddresses{ LinkAddress: *linkAddress, LinkEthAddress: *linkEthAddress, diff --git a/core/scripts/functions/src/fetch_keys.go b/core/scripts/functions/src/fetch_keys.go index 4c3b11a7e28..265bd152290 100644 --- a/core/scripts/functions/src/fetch_keys.go +++ b/core/scripts/functions/src/fetch_keys.go @@ -40,5 +40,4 @@ func (g *fetchKeys) Run(args []string) { panic(err) } fmt.Println("Functions OCR2 public keys have been saved to:", filepath) - } diff --git a/core/scripts/functions/src/files_test.go b/core/scripts/functions/src/files_test.go index 4f6c5aeb24c..83ceb5cd9cc 100644 --- a/core/scripts/functions/src/files_test.go +++ b/core/scripts/functions/src/files_test.go @@ -31,7 +31,6 @@ func Test_writeLines(t *testing.T) { got, err := readLines(pth) assert.NoError(t, err) assert.Equal(t, tt.args.lines, got) - }) } } diff --git a/core/scripts/ocr2vrf/setup_ocr2vrf.go b/core/scripts/ocr2vrf/setup_ocr2vrf.go index 1094b823b4e..35d529b0262 100644 --- a/core/scripts/ocr2vrf/setup_ocr2vrf.go +++ b/core/scripts/ocr2vrf/setup_ocr2vrf.go @@ -214,7 +214,6 @@ func setupOCR2VRFNodes(e helpers.Environment) { if *useForwarder { fmt.Println("Setting authorized senders...") for i, f := range forwarderAddresses { - // Convert the sending strings for a transmitter to addresses. var sendinKeysAddresses []common.Address sendingKeysStrings := sendingKeys[i+1] diff --git a/core/scripts/vrfv2plus/testnet/proofs.go b/core/scripts/vrfv2plus/testnet/proofs.go index ef35fd3e0ec..23ddc8ecfd6 100644 --- a/core/scripts/vrfv2plus/testnet/proofs.go +++ b/core/scripts/vrfv2plus/testnet/proofs.go @@ -54,7 +54,6 @@ var rcTemplate = `{ ` func generateProofForV2Plus(e helpers.Environment) { - deployCmd := flag.NewFlagSet("generate-proof", flag.ExitOnError) keyHashString := deployCmd.String("key-hash", "", "key hash for VRF request") diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go index 3c5109f82c1..2181084aeec 100644 --- a/core/services/blockhashstore/delegate.go +++ b/core/services/blockhashstore/delegate.go @@ -116,7 +116,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c *v1.VRFCoordinator if c, err = v1.NewVRFCoordinator( jb.BlockhashStoreSpec.CoordinatorV1Address.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V1 coordinator") } @@ -131,7 +130,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c *v2.VRFCoordinatorV2 if c, err = v2.NewVRFCoordinatorV2( jb.BlockhashStoreSpec.CoordinatorV2Address.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V2 coordinator") } @@ -146,7 +144,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c v2plus.IVRFCoordinatorV2PlusInternalInterface if c, err = v2plus.NewIVRFCoordinatorV2PlusInternal( jb.BlockhashStoreSpec.CoordinatorV2PlusAddress.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V2Plus coordinator") } diff --git a/core/services/blockheaderfeeder/delegate.go b/core/services/blockheaderfeeder/delegate.go index d848ba7c61e..36d1d1cf895 100644 --- a/core/services/blockheaderfeeder/delegate.go +++ b/core/services/blockheaderfeeder/delegate.go @@ -114,7 +114,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c *v1.VRFCoordinator if c, err = v1.NewVRFCoordinator( jb.BlockHeaderFeederSpec.CoordinatorV1Address.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V1 coordinator") } var coord *blockhashstore.V1Coordinator @@ -128,7 +127,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c *v2.VRFCoordinatorV2 if c, err = v2.NewVRFCoordinatorV2( jb.BlockHeaderFeederSpec.CoordinatorV2Address.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V2 coordinator") } var coord *blockhashstore.V2Coordinator @@ -142,7 +140,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi var c v2plus.IVRFCoordinatorV2PlusInternalInterface if c, err = v2plus.NewIVRFCoordinatorV2PlusInternal( jb.BlockHeaderFeederSpec.CoordinatorV2PlusAddress.Address(), chain.Client()); err != nil { - return nil, errors.Wrap(err, "building V2 plus coordinator") } var coord *blockhashstore.V2PlusCoordinator diff --git a/core/services/chainlink/config_general_test.go b/core/services/chainlink/config_general_test.go index 444f34abcbb..29393ee0fdd 100644 --- a/core/services/chainlink/config_general_test.go +++ b/core/services/chainlink/config_general_test.go @@ -114,7 +114,6 @@ func TestValidateDB(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, ErrInvalidSecrets) }) - } func TestConfig_LogSQL(t *testing.T) { diff --git a/core/services/chainlink/config_p2p.go b/core/services/chainlink/config_p2p.go index 4197358b148..5f1b9b88141 100644 --- a/core/services/chainlink/config_p2p.go +++ b/core/services/chainlink/config_p2p.go @@ -69,7 +69,6 @@ func (v *p2pv2) DeltaDial() commonconfig.Duration { func (v *p2pv2) DeltaReconcile() commonconfig.Duration { if d := v.c.DeltaReconcile; d != nil { return *d - } return commonconfig.Duration{} } diff --git a/core/services/chainlink/config_pyroscope_test.go b/core/services/chainlink/config_pyroscope_test.go index fc1f7788e0c..33078f41621 100644 --- a/core/services/chainlink/config_pyroscope_test.go +++ b/core/services/chainlink/config_pyroscope_test.go @@ -19,5 +19,4 @@ func TestPyroscopeConfigTest(t *testing.T) { require.Equal(t, "pyroscope-token", pcfg.AuthToken()) require.Equal(t, "http://localhost:4040", pcfg.ServerAddress()) require.Equal(t, "tests", pcfg.Environment()) - } diff --git a/core/services/chainlink/config_web_server_test.go b/core/services/chainlink/config_web_server_test.go index 946e0b0c12b..d96e6c05d5e 100644 --- a/core/services/chainlink/config_web_server_test.go +++ b/core/services/chainlink/config_web_server_test.go @@ -45,5 +45,4 @@ func TestWebServerConfig(t *testing.T) { mf := ws.MFA() assert.Equal(t, "test-rpid", mf.RPID()) assert.Equal(t, "test-rp-origin", mf.RPOrigin()) - } diff --git a/core/services/chainlink/relayer_chain_interoperators.go b/core/services/chainlink/relayer_chain_interoperators.go index 3ed3c3242ba..32bcc9f18a4 100644 --- a/core/services/chainlink/relayer_chain_interoperators.go +++ b/core/services/chainlink/relayer_chain_interoperators.go @@ -212,7 +212,6 @@ func (rs *CoreRelayerChainInteroperators) LegacyCosmosChains() LegacyCosmosConta // ChainStatus gets [types.ChainStatus] func (rs *CoreRelayerChainInteroperators) ChainStatus(ctx context.Context, id types.RelayID) (types.ChainStatus, error) { - lr, err := rs.Get(id) if err != nil { return types.ChainStatus{}, fmt.Errorf("%w: error getting chain status: %w", chains.ErrNotFound, err) @@ -222,7 +221,6 @@ func (rs *CoreRelayerChainInteroperators) ChainStatus(ctx context.Context, id ty } func (rs *CoreRelayerChainInteroperators) ChainStatuses(ctx context.Context, offset, limit int) ([]types.ChainStatus, int, error) { - var ( stats []types.ChainStatus totalErr error @@ -332,7 +330,6 @@ func FilterRelayersByType(network string) func(id types.RelayID) bool { // A typical usage pattern to use [List] with [FilterByType] to obtain a set of [RelayerChainInteroperators] // for a given chain func (rs *CoreRelayerChainInteroperators) List(filter FilterFn) RelayerChainInteroperators { - matches := make(map[types.RelayID]loop.Relayer) rs.mu.Lock() for id, relayer := range rs.loopRelayers { diff --git a/core/services/chainlink/relayer_chain_interoperators_test.go b/core/services/chainlink/relayer_chain_interoperators_test.go index 8111c1f61b4..c6183cc1a34 100644 --- a/core/services/chainlink/relayer_chain_interoperators_test.go +++ b/core/services/chainlink/relayer_chain_interoperators_test.go @@ -33,14 +33,12 @@ import ( ) func TestCoreRelayerChainInteroperators(t *testing.T) { - evmChainID1, evmChainID2 := ubig.New(big.NewInt(1)), ubig.New(big.NewInt(2)) solanaChainID1, solanaChainID2 := "solana-id-1", "solana-id-2" starknetChainID1, starknetChainID2 := "starknet-id-1", "starknet-id-2" cosmosChainID1, cosmosChainID2 := "cosmos-id-1", "cosmos-id-2" cfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { - cfg := evmcfg.Defaults(evmChainID1) node1_1 := evmcfg.Node{ Name: ptr("Test node chain1:1"), @@ -403,7 +401,6 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { assert.NoError(t, err) assert.Len(t, nodesStats, expectedNodeCnt) assert.Equal(t, cnt, len(nodesStats)) - } assert.EqualValues(t, gotRelayerNetworks, tt.expectedRelayerNetworks) @@ -442,9 +439,7 @@ func TestCoreRelayerChainInteroperators(t *testing.T) { unwanted, err := cr.Get(expectedMissing) assert.Nil(t, unwanted) assert.ErrorIs(t, err, chainlink.ErrNoSuchRelayer) - }) - } t.Run("bad init func", func(t *testing.T) { diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index 8bb06538f03..0e08ed5b420 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -102,7 +102,6 @@ func (r *RelayerFactory) NewSolana(ks keystore.Solana, chainCfgs solana.TOMLConf unique := make(map[string]struct{}) // create one relayer per chain id for _, chainCfg := range chainCfgs { - relayID := types.RelayID{Network: types.NetworkSolana, ChainID: *chainCfg.ChainID} _, alreadyExists := unique[relayID.Name()] if alreadyExists { @@ -119,7 +118,6 @@ func (r *RelayerFactory) NewSolana(ks keystore.Solana, chainCfgs solana.TOMLConf lggr := solLggr.Named(relayID.ChainID) if cmdName := env.SolanaPlugin.Cmd.Get(); cmdName != "" { - // setup the solana relayer to be a LOOP cfgTOML, err := toml.Marshal(struct { Solana solana.TOMLConfig @@ -142,7 +140,6 @@ func (r *RelayerFactory) NewSolana(ks keystore.Solana, chainCfgs solana.TOMLConf } solanaRelayers[relayID] = loop.NewRelayerService(lggr, r.GRPCOpts, solCmdFn, string(cfgTOML), signer) - } else { // fallback to embedded chain opts := solana.ChainOpts{ @@ -233,7 +230,6 @@ func (r *RelayerFactory) NewStarkNet(ks keystore.StarkNet, chainCfgs config.TOML } } return starknetRelayers, nil - } type CosmosFactoryConfig struct { @@ -290,8 +286,6 @@ func (r *RelayerFactory) NewCosmos(config CosmosFactoryConfig) (map[types.RelayI } relayers[relayID] = NewCosmosLoopRelayerChain(cosmos.NewRelayer(lggr, chain), chain) - } return relayers, nil - } diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go index d6032befbdc..e185fbc8c39 100644 --- a/core/services/feeds/service.go +++ b/core/services/feeds/service.go @@ -1050,7 +1050,6 @@ func (s *service) observeJobProposalCounts(ctx context.Context) error { // Set the prometheus gauge metrics. for _, status := range []JobProposalStatus{JobProposalStatusPending, JobProposalStatusApproved, JobProposalStatusCancelled, JobProposalStatusRejected, JobProposalStatusDeleted, JobProposalStatusRevoked} { - status := status promJobProposalCounts.With(prometheus.Labels{"status": string(status)}).Set(metrics[status]) @@ -1131,7 +1130,6 @@ func (s *service) generateJob(ctx context.Context, spec string) (*job.Job, error js, err = fluxmonitorv2.ValidatedFluxMonitorSpec(s.jobCfg, spec) default: return nil, errors.Errorf("unknown job type: %s", jobType) - } if err != nil { return nil, err diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go index dd30156e15e..31db95f2626 100644 --- a/core/services/fluxmonitorv2/flux_monitor.go +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -461,7 +461,6 @@ func formatTime(at time.Time) string { // SetOracleAddress sets the oracle address which matches the node's keys. // If none match, it uses the first available key func (fm *FluxMonitor) SetOracleAddress() error { - // fm on deprecation path, using dangling context ctx, cancel := fm.chStop.NewCtx() defer cancel() diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go index 6b9dcb99262..2dacac54281 100644 --- a/core/services/fluxmonitorv2/integrations_test.go +++ b/core/services/fluxmonitorv2/integrations_test.go @@ -376,7 +376,6 @@ func assertNoSubmission(t *testing.T, duration time.Duration, msg string, ) { - // drain the channel for len(submissionReceived) > 0 { <-submissionReceived diff --git a/core/services/fluxmonitorv2/orm.go b/core/services/fluxmonitorv2/orm.go index e090b84ed04..2f7411d1190 100644 --- a/core/services/fluxmonitorv2/orm.go +++ b/core/services/fluxmonitorv2/orm.go @@ -120,7 +120,6 @@ func (o *orm) CreateEthTransaction( gasLimit uint64, idempotencyKey *string, ) (err error) { - _, err = o.txm.CreateTransaction(ctx, txmgr.TxRequest{ IdempotencyKey: idempotencyKey, FromAddress: fromAddress, diff --git a/core/services/fluxmonitorv2/poll_manager.go b/core/services/fluxmonitorv2/poll_manager.go index 356ce96aaea..78b99aec4d5 100644 --- a/core/services/fluxmonitorv2/poll_manager.go +++ b/core/services/fluxmonitorv2/poll_manager.go @@ -260,7 +260,6 @@ func (pm *PollManager) startPollTicker() { // startIdleTimer starts the idle timer if it is enabled func (pm *PollManager) startIdleTimer(roundStartedAtUTC uint64) { - if pm.cfg.IdleTimerDisabled { pm.idleTimer.Stop() diff --git a/core/services/fluxmonitorv2/poll_manager_test.go b/core/services/fluxmonitorv2/poll_manager_test.go index be6aa9a819b..610e05000e2 100644 --- a/core/services/fluxmonitorv2/poll_manager_test.go +++ b/core/services/fluxmonitorv2/poll_manager_test.go @@ -343,7 +343,6 @@ func TestPollManager_ShouldPerformInitialPoll(t *testing.T) { assert.Equal(t, tc.want, pm.ShouldPerformInitialPoll()) }) - } } diff --git a/core/services/functions/connector_handler_test.go b/core/services/functions/connector_handler_test.go index aadc84ba96a..7e1f3ced34f 100644 --- a/core/services/functions/connector_handler_test.go +++ b/core/services/functions/connector_handler_test.go @@ -130,7 +130,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":true,"rows":[{"slot_id":1,"version":1,"expiration":1},{"slot_id":2,"version":2,"expiration":2}]}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -142,7 +141,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":false,"error_message":"Failed to list secrets: boom"}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -187,7 +185,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":true}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -200,7 +197,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":false,"error_message":"Failed to set secret: boom"}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -216,7 +212,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":false,"error_message":"Failed to set secret: wrong signature"}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -231,7 +226,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":false,"error_message":"Bad request to set secret: invalid character 's' looking for beginning of object key string"}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) @@ -244,7 +238,6 @@ func TestFunctionsConnectorHandler(t *testing.T) { msg, ok := args[2].(*api.Message) require.True(t, ok) require.Equal(t, `{"success":false,"error_message":"user subscription has insufficient balance"}`, string(msg.Body.Payload)) - }).Return(nil).Once() handler.HandleGatewayMessage(ctx, "gw1", &msg) diff --git a/core/services/functions/external_adapter_client_test.go b/core/services/functions/external_adapter_client_test.go index 4ce78ee3fc3..aab969a5050 100644 --- a/core/services/functions/external_adapter_client_test.go +++ b/core/services/functions/external_adapter_client_test.go @@ -215,7 +215,6 @@ func TestRunComputation_ContextRespected(t *testing.T) { } func TestRunComputationRetrial(t *testing.T) { - t.Run("OK-retry_succeeds_after_one_failure", func(t *testing.T) { counter := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/core/services/functions/orm_test.go b/core/services/functions/orm_test.go index 37b3a28256f..859781dada7 100644 --- a/core/services/functions/orm_test.go +++ b/core/services/functions/orm_test.go @@ -258,7 +258,6 @@ func TestORM_FindOldestEntriesByState(t *testing.T) { require.Equal(t, defaultGasLimit, *result[0].CallbackGasLimit) require.Equal(t, defaultCoordinatorContract, *result[0].CoordinatorContractAddress) require.Equal(t, defaultMetadata, result[0].OnchainMetadata) - }) t.Run("with no limit", func(t *testing.T) { diff --git a/core/services/gateway/handlers/functions/allowlist/allowlist_test.go b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go index d4900627bdb..500985acc31 100644 --- a/core/services/gateway/handlers/functions/allowlist/allowlist_test.go +++ b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go @@ -314,11 +314,9 @@ func TestUpdateFromContract(t *testing.T) { return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) }) - } func TestExtractContractVersion(t *testing.T) { - type tc struct { name string versionStr string diff --git a/core/services/job/kv_orm.go b/core/services/job/kv_orm.go index 63384efc25b..ba2b8d5f9ab 100644 --- a/core/services/job/kv_orm.go +++ b/core/services/job/kv_orm.go @@ -36,7 +36,6 @@ func NewKVStore(jobID int32, ds sqlutil.DataSource, lggr logger.Logger) kVStore // Store saves []byte value by key. func (kv kVStore) Store(ctx context.Context, key string, val []byte) error { - sql := `INSERT INTO job_kv_store (job_id, key, val_bytea) VALUES ($1, $2, $3) ON CONFLICT (job_id, key) DO UPDATE SET diff --git a/core/services/job/models.go b/core/services/job/models.go index 3d510efa0d2..578e9e079b8 100644 --- a/core/services/job/models.go +++ b/core/services/job/models.go @@ -404,7 +404,6 @@ func (s *OCR2OracleSpec) getChainID() (string, error) { } func (s *OCR2OracleSpec) getChainIdFromRelayConfig() (string, error) { - v, exists := s.RelayConfig["chainID"] if !exists { return "", fmt.Errorf("chainID does not exist") diff --git a/core/services/job/orm.go b/core/services/job/orm.go index d87b0204263..d54d6fba522 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -1121,7 +1121,6 @@ func (o *orm) loadPipelineRunIDs(ctx context.Context, jobID *int32, offset, limi return } lggr.Debugw("loadPipelineRunIDs empty batch", "minId", minID, "maxID", maxID, "n", n, "len(ids)", len(ids), "limit", limit, "offset", offset, "skipped", skipped) - } } maxID = minID - 1 diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index cdfe39dd17f..0232fbadd9f 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -555,7 +555,6 @@ answer1 [type=median index=0]; } for _, tc := range testCases { - config = configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.P2P.V2.Enabled = ptr(true) c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} diff --git a/core/services/job/spawner.go b/core/services/job/spawner.go index 6bb2cdbf76b..f7cd5e2bcb3 100644 --- a/core/services/job/spawner.go +++ b/core/services/job/spawner.go @@ -107,7 +107,6 @@ func (js *spawner) Start(ctx context.Context) error { return js.StartOnce("JobSpawner", func() error { js.startAllServices(ctx) return nil - }) } @@ -116,7 +115,6 @@ func (js *spawner) Close() error { close(js.chStop) js.stopAllServices() return nil - }) } diff --git a/core/services/keeper/validate_test.go b/core/services/keeper/validate_test.go index 598bc3da4ed..cfa6a1520f5 100644 --- a/core/services/keeper/validate_test.go +++ b/core/services/keeper/validate_test.go @@ -192,5 +192,4 @@ func TestValidatedKeeperSpec(t *testing.T) { require.Equal(t, tt.want.updatedAt, got.KeeperSpec.UpdatedAt) }) } - } diff --git a/core/services/keystore/eth_test.go b/core/services/keystore/eth_test.go index 4a9c8a952ff..b27ec54956b 100644 --- a/core/services/keystore/eth_test.go +++ b/core/services/keystore/eth_test.go @@ -305,7 +305,6 @@ func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { } { - // k2 and k4 are disabled address for SimulatedChainID so even though it's whitelisted, it will be ignored addresses := []common.Address{k4.Address, k3.Address, k1.Address, k2.Address, testutils.NewAddress()} diff --git a/core/services/keystore/keys/exportutils.go b/core/services/keystore/keys/exportutils.go index 5d75b5076e6..c8209bf5c4f 100644 --- a/core/services/keystore/keys/exportutils.go +++ b/core/services/keystore/keys/exportutils.go @@ -32,7 +32,6 @@ func FromEncryptedJSON[E Encrypted, K any]( passwordFunc func(string) string, privKeyToKey func(export E, rawPrivKey []byte) (K, error), ) (K, error) { - // unmarshal byte data to [E] Encrypted key export var export E if err := json.Unmarshal(keyJSON, &export); err != nil { @@ -64,7 +63,6 @@ func ToEncryptedJSON[E Encrypted, K any]( passwordFunc func(string) string, buildExport func(id string, key K, cryptoJSON keystore.CryptoJSON) E, ) (export []byte, err error) { - // encrypt data using prefixed password cryptoJSON, err := keystore.EncryptDataV3( raw, diff --git a/core/services/keystore/keys/vrfkey/public_key_test.go b/core/services/keystore/keys/vrfkey/public_key_test.go index dec6ae1a56a..70c26ae27fe 100644 --- a/core/services/keystore/keys/vrfkey/public_key_test.go +++ b/core/services/keystore/keys/vrfkey/public_key_test.go @@ -33,7 +33,6 @@ func TestValueScanIdentityPointSet(t *testing.T) { assert.Equal(t, pk, nnPk, "setting one PubliKey to another should result in equal keys") } - } // Tests that PublicKey.Hash gives the same result as the VRFCoordinator's diff --git a/core/services/keystore/models_test.go b/core/services/keystore/models_test.go index 93c0f5fcb25..25331a3b218 100644 --- a/core/services/keystore/models_test.go +++ b/core/services/keystore/models_test.go @@ -167,5 +167,4 @@ func TestKeyRing_Encrypt_Decrypt(t *testing.T) { _, err = originalKeyRing.LegacyKeys.UnloadUnsupported(nil) require.Error(t, err) }) - } diff --git a/core/services/keystore/ocr2_test.go b/core/services/keystore/ocr2_test.go index f2c8715ab4f..7288b86d1d1 100644 --- a/core/services/keystore/ocr2_test.go +++ b/core/services/keystore/ocr2_test.go @@ -61,7 +61,6 @@ func Test_OCR2KeyStore_E2E(t *testing.T) { created := map[chaintype.ChainType]bool{} for _, chain := range chaintype.SupportedChainTypes { - // validate no keys exist for chain keys, err := ks.GetAllOfType(chain) require.NoError(t, err) diff --git a/core/services/keystore/starknet.go b/core/services/keystore/starknet.go index e6655a4d3f2..19f8ccb7b3f 100644 --- a/core/services/keystore/starknet.go +++ b/core/services/keystore/starknet.go @@ -168,7 +168,6 @@ var _ loop.Keystore = &StarknetLooppSigner{} // the returned []byte is an encoded [github.com/smartcontractkit/chainlink-common/pkg/loop/adapters/starknet.Signature]. // this enables compatibility with [github.com/smartcontractkit/chainlink-starknet/relayer/pkg/chainlink/txm.NewKeystoreAdapter] func (lk *StarknetLooppSigner) Sign(ctx context.Context, id string, hash []byte) ([]byte, error) { - k, err := lk.Get(id) if err != nil { return nil, err diff --git a/core/services/keystore/starknet_test.go b/core/services/keystore/starknet_test.go index 97d4219272b..aa610124531 100644 --- a/core/services/keystore/starknet_test.go +++ b/core/services/keystore/starknet_test.go @@ -139,7 +139,6 @@ func TestStarknetSigner(t *testing.T) { // TODO BCF-2242 remove this test once we have starknet smoke/integration tests // that exercise the transaction signing. t.Run("keystore adapter integration", func(t *testing.T) { - adapter := starktxm.NewKeystoreAdapter(lk) baseKs.On("Get", starknetSenderAddr).Return(starkKey, nil) hash, err := curve.Curve.PedersenHash([]*big.Int{big.NewInt(42)}) diff --git a/core/services/llo/orm_test.go b/core/services/llo/orm_test.go index 63a6ac21e3b..a25a1bdea2f 100644 --- a/core/services/llo/orm_test.go +++ b/core/services/llo/orm_test.go @@ -30,7 +30,6 @@ func Test_ORM(t *testing.T) { assert.Zero(t, cd) assert.Zero(t, blockNum) - }) t.Run("loads channel definitions from database", func(t *testing.T) { expectedBlockNum := rand.Int63() diff --git a/core/services/nurse.go b/core/services/nurse.go index 1b44beea21c..a9069b5181d 100644 --- a/core/services/nurse.go +++ b/core/services/nurse.go @@ -318,7 +318,6 @@ func (n *Nurse) gatherCPU(now time.Time, wg *sync.WaitGroup) { n.log.Errorw("could not close cpu profile", "err", err) return } - } func (n *Nurse) gatherTrace(now time.Time, wg *sync.WaitGroup) { @@ -485,5 +484,4 @@ func (n *Nurse) listProfiles() ([]fs.FileInfo, error) { out = append(out, info) } return out, nil - } diff --git a/core/services/nurse_test.go b/core/services/nurse_test.go index 7521168aa3f..4597eeb456b 100644 --- a/core/services/nurse_test.go +++ b/core/services/nurse_test.go @@ -98,7 +98,6 @@ func (c mockConfig) GoroutineThreshold() int { } func TestNurse(t *testing.T) { - l := logger.TestLogger(t) nrse := NewNurse(newMockConfig(t), l) nrse.AddCheck("test", func() (bool, Meta) { return true, Meta{} }) diff --git a/core/services/ocr/config_overrider.go b/core/services/ocr/config_overrider.go index a58cb402695..435efa437c7 100644 --- a/core/services/ocr/config_overrider.go +++ b/core/services/ocr/config_overrider.go @@ -53,7 +53,6 @@ func NewConfigOverriderImpl( flags *ContractFlags, pollTicker utils.TickerBase, ) (*ConfigOverriderImpl, error) { - if !flags.ContractExists() { return nil, errors.Errorf("OCRConfigOverrider: Flags contract instance is missing, the contract does not exist: %s. "+ "Please create the contract or remove the OCR.TransmitterAddress configuration variable", contractAddress.Address()) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index 7747cad3360..1addae25601 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -1426,7 +1426,6 @@ func (d *Delegate) newServicesOCR2Keepers20( cfg ocr2keeper.PluginConfig, spec *job.OCR2OracleSpec, ) ([]job.ServiceCtx, error) { - rid, err := spec.RelayID() if err != nil { return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "keepers2.0"} diff --git a/core/services/ocr2/delegate_test.go b/core/services/ocr2/delegate_test.go index bae1f5f3e78..720ad308348 100644 --- a/core/services/ocr2/delegate_test.go +++ b/core/services/ocr2/delegate_test.go @@ -169,7 +169,6 @@ func TestGetEVMEffectiveTransmitterID(t *testing.T) { if !jb.ForwardingAllowed { require.Equal(t, jb.OCR2OracleSpec.TransmitterID.String, effectiveTransmitterID) } - }) } diff --git a/core/services/ocr2/plugins/generic/relayerset.go b/core/services/ocr2/plugins/generic/relayerset.go index 0586f600c50..229166dd36a 100644 --- a/core/services/ocr2/plugins/generic/relayerset.go +++ b/core/services/ocr2/plugins/generic/relayerset.go @@ -20,7 +20,6 @@ type RelayerSet struct { } func NewRelayerSet(relayGetter RelayGetter, externalJobID uuid.UUID, jobID int32, isNew bool) (*RelayerSet, error) { - wrappedRelayers := map[types.RelayID]core.Relayer{} relayers, err := relayGetter.GetIDToRelayerMap() @@ -44,7 +43,6 @@ func (r *RelayerSet) Get(_ context.Context, id types.RelayID) (core.Relayer, err } func (r *RelayerSet) List(_ context.Context, relayIDs ...types.RelayID) (map[types.RelayID]core.Relayer, error) { - if len(relayIDs) == 0 { return r.wrappedRelayers, nil } @@ -72,7 +70,6 @@ type relayerWrapper struct { } func (r relayerWrapper) NewPluginProvider(ctx context.Context, rargs core.RelayArgs, pargs core.PluginArgs) (types.PluginProvider, error) { - relayArgs := types.RelayArgs{ ExternalJobID: r.ExternalJobID, JobID: r.JobID, diff --git a/core/services/ocr2/plugins/generic/relayerset_test.go b/core/services/ocr2/plugins/generic/relayerset_test.go index 9aef7e29d78..021a15c6eff 100644 --- a/core/services/ocr2/plugins/generic/relayerset_test.go +++ b/core/services/ocr2/plugins/generic/relayerset_test.go @@ -15,7 +15,6 @@ import ( ) func TestRelayerSet_List(t *testing.T) { - testRelayersMap := map[types.RelayID]loop.Relayer{} testRelayersMap[types.RelayID{Network: "N1", ChainID: "C1"}] = &TestRelayer{} testRelayersMap[types.RelayID{Network: "N2", ChainID: "C2"}] = &TestRelayer{} @@ -43,7 +42,6 @@ func TestRelayerSet_List(t *testing.T) { } func TestRelayerSet_Get(t *testing.T) { - testRelayersMap := map[types.RelayID]loop.Relayer{} testRelayersMap[types.RelayID{Network: "N1", ChainID: "C1"}] = &TestRelayer{} testRelayersMap[types.RelayID{Network: "N2", ChainID: "C2"}] = &TestRelayer{} diff --git a/core/services/ocr2/plugins/llo/config/config.go b/core/services/ocr2/plugins/llo/config/config.go index 15bb5e816a8..1a6e528980b 100644 --- a/core/services/ocr2/plugins/llo/config/config.go +++ b/core/services/ocr2/plugins/llo/config/config.go @@ -99,7 +99,6 @@ func validateKeyBundleIDs(keyBundleIDs map[string]string) error { if !chaintype.IsSupportedChainType(chaintype.ChainType(k)) { return fmt.Errorf("llo: KeyBundleIDs: key must be a supported chain type, got: %s", k) } - } return nil } diff --git a/core/services/ocr2/plugins/mercury/config/config_test.go b/core/services/ocr2/plugins/mercury/config/config_test.go index cc7c6a82e36..5beba287133 100644 --- a/core/services/ocr2/plugins/mercury/config/config_test.go +++ b/core/services/ocr2/plugins/mercury/config/config_test.go @@ -134,7 +134,6 @@ func Test_PluginConfig(t *testing.T) { require.NotNil(t, mc.NativeFeedID) assert.Equal(t, "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.LinkFeedID).String()) assert.Equal(t, "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.NativeFeedID).String()) - }) t.Run("with invalid values", func(t *testing.T) { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go index c63c0d00f33..7bbbf4d7e32 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go @@ -37,7 +37,6 @@ func TestUnpackTransmitTxInput(t *testing.T) { } func TestUnpackTransmitTxInputErrors(t *testing.T) { - tests := []struct { Name string RawData string diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go index 76212892657..f77b823bb36 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go @@ -218,5 +218,4 @@ func TestEVMAutomationEncoder20(t *testing.T) { assert.Errorf(t, err, "pack failed: failed to pack report data") assert.Len(t, b, 0) }) - } diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go index 9fc2d7891f2..da4dd17d96f 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -399,7 +399,6 @@ func (r *EvmRegistry) registerEvents(chainID uint64, addr common.Address) error } func (r *EvmRegistry) processUpkeepStateLog(l logpoller.Log) error { - hash := l.TxHash.String() if _, ok := r.txHashes[hash]; ok { return nil diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index 8108f1a3466..46314dde418 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -239,7 +239,6 @@ func TestIntegration_LogEventProvider_Backfill(t *testing.T) { for _, tc := range tests { bufferVersion, limitLow := tc.bufferVersion, tc.logLimit t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) defer cancel() diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index e2c1a1531e2..633188c8396 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -167,7 +167,6 @@ func (p *logEventProvider) WithBufferVersion(v BufferVersion) { func (p *logEventProvider) Start(context.Context) error { return p.StartOnce(LogProviderServiceName, func() error { - readQ := make(chan []*big.Int, readJobQueueSize) p.lggr.Infow("starting log event provider", "readInterval", p.opts.ReadInterval, "readMaxBatchSize", readMaxBatchSize, "readers", readerThreads) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 57da895403e..c5bc047e8f4 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -288,7 +288,6 @@ func TestLogEventProvider_ReadLogs(t *testing.T) { }) // TODO: test rate limiting - } func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upkeepFilter) { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go index 5fe21b08724..8cdcd53ade3 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go @@ -12,7 +12,6 @@ type AutomationServices interface { } func New(keyring ocrtypes.OnchainKeyring) (AutomationServices, error) { - services := new(automationServices) services.keyring = NewOnchainKeyringV3Wrapper(keyring) diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go index 1054c59dd1c..29e56460c36 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -427,7 +427,6 @@ func setupForwarderForNode( backend *backends.SimulatedBackend, recipient common.Address, linkAddr common.Address) common.Address { - faddr, _, authorizedForwarder, err := authorized_forwarder.DeployAuthorizedForwarder(caller, backend, linkAddr, caller.From, recipient, []byte{}) require.NoError(t, err) diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go index e7688556124..1cb2d613d31 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go @@ -479,7 +479,6 @@ func (c *coordinator) getBlockhashesMappingFromRequests( currentHeight uint64, recentBlockHashesStartHeight uint64, ) (blockhashesMapping map[uint64]common.Hash, err error) { - // Get all request + callback requests into a mapping. rawBlocksRequested := make(map[uint64]struct{}) for _, l := range randomnessRequestedLogs { @@ -586,7 +585,6 @@ func (c *coordinator) filterUnfulfilledCallbacks( currentHeight uint64, currentBatchGasLimit int64, ) (callbacks []ocr2vrftypes.AbstractCostedCallbackRequest) { - /** * Callback batch ordering: * - Callbacks are first ordered by beacon output + confirmation delay (ascending), in other words @@ -663,7 +661,6 @@ func (c *coordinator) filterEligibleCallbacks( currentHeight uint64, blockhashesMapping map[uint64]common.Hash, ) (callbacks []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, unfulfilled []block, err error) { - for _, r := range randomnessFulfillmentRequestedLogs { // The on-chain machinery will revert requests that specify an unsupported // confirmation delay, so this is more of a sanity check than anything else. @@ -711,7 +708,6 @@ func (c *coordinator) filterEligibleRandomnessRequests( currentHeight uint64, blockhashesMapping map[uint64]common.Hash, ) (unfulfilled []block, err error) { - for _, r := range randomnessRequestedLogs { // The on-chain machinery will revert requests that specify an unsupported // confirmation delay, so this is more of a sanity check than anything else. diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go index beee01eaf7a..e5913b0a844 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go @@ -1298,7 +1298,6 @@ func TestCoordinator_ReportIsOnchain(t *testing.T) { assert.NoError(t, err) assert.False(t, present) }) - } func TestCoordinator_ConfirmationDelays(t *testing.T) { diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go index bd249c6bc27..0dab659258e 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go @@ -27,7 +27,6 @@ type cacheItem[T any] struct { // NewBlockCache constructs a new cache. func NewBlockCache[T any](evictionWindow time.Duration) *ocrCache[T] { - // Construct cache cleaner to evict old items. cleaner := &intervalCacheCleaner[T]{ interval: evictionWindow, @@ -50,7 +49,6 @@ func NewBlockCache[T any](evictionWindow time.Duration) *ocrCache[T] { // AddItem adds an item to the cache. func (l *ocrCache[T]) CacheItem(item T, itemKey common.Hash, timeStored time.Time) { - // Construct new item to be stored. newItem := &cacheItem[T]{ item: item, @@ -72,7 +70,6 @@ func (l *ocrCache[T]) SetEvictonWindow(newWindow time.Duration) { // AddItem adds an item to the cache. func (l *ocrCache[T]) GetItem(itemKey common.Hash) (item *T) { - // Lock, and defer unlock. l.cacheMu.Lock() defer l.cacheMu.Unlock() @@ -90,7 +87,6 @@ func (l *ocrCache[T]) GetItem(itemKey common.Hash) (item *T) { // EvictExpiredItems removes all expired items stored in the cache. func (l *ocrCache[T]) EvictExpiredItems(currentTime time.Time) { - // Lock, and defer unlock. l.cacheMu.Lock() defer l.cacheMu.Unlock() diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go index 57aaf1c5e03..b4be43420b4 100644 --- a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go @@ -16,7 +16,6 @@ func TestNewCache(t *testing.T) { func TestCache(t *testing.T) { t.Run("Happy path, no overwrites.", func(t *testing.T) { - now := time.Now().UTC() tests := []struct { @@ -65,7 +64,6 @@ func TestCache(t *testing.T) { }) t.Run("Happy path, override middle item.", func(t *testing.T) { - now := time.Now().UTC() tests := []struct { @@ -105,7 +103,6 @@ func TestCache(t *testing.T) { }) t.Run("Happy path, override last item.", func(t *testing.T) { - now := time.Now().UTC() tests := []struct { diff --git a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go index 2e1e15fd058..8087591a123 100644 --- a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go +++ b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go @@ -713,7 +713,6 @@ linkEthFeedAddress = "%s" // First arg is the request ID, which starts at zero, second is the index into // the random words. gomega.NewWithT(t).Eventually(func() bool { - var errs []error rw1, err2 := uni.consumer.SReceivedRandomnessByRequestID(nil, redemptionRequestID, big.NewInt(0)) t.Logf("TestRedeemRandomness 1st word err: %+v", err2) diff --git a/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go index e4112b55438..b33c749589f 100644 --- a/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go +++ b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go @@ -25,7 +25,6 @@ func NewReportSerializer(encryptionGroup kyber.Group) types.ReportSerializer { // SerializeReport serializes an abstract report into abi-encoded bytes. func (serializer *reportSerializer) SerializeReport(r types.AbstractReport) ([]byte, error) { - packed, err := serializer.e.SerializeReport(r) if err != nil { diff --git a/core/services/ocr2/plugins/s4/plugin_test.go b/core/services/ocr2/plugins/s4/plugin_test.go index 6321b8ce867..12fac8a070b 100644 --- a/core/services/ocr2/plugins/s4/plugin_test.go +++ b/core/services/ocr2/plugins/s4/plugin_test.go @@ -219,7 +219,6 @@ func TestPlugin_ShouldAcceptFinalizedReport(t *testing.T) { assert.False(t, should) assert.Equal(t, 10, len(ormRows)) compareRows(t, rows, ormRows) - }) t.Run("error", func(t *testing.T) { diff --git a/core/services/ocrcommon/adapters_test.go b/core/services/ocrcommon/adapters_test.go index 6c13ac85f15..669e015e7bc 100644 --- a/core/services/ocrcommon/adapters_test.go +++ b/core/services/ocrcommon/adapters_test.go @@ -111,7 +111,6 @@ type fakeContractTransmitter struct { } func (f fakeContractTransmitter) Transmit(ctx context.Context, rc ocrtypes.ReportContext, report ocrtypes.Report, s []ocrtypes.AttributedOnchainSignature) error { - if !reflect.DeepEqual(report, rwi.Report) { return fmt.Errorf("expected Report %v but got %v", rwi.Report, report) } diff --git a/core/services/ocrcommon/block_translator_test.go b/core/services/ocrcommon/block_translator_test.go index dd900681592..cd80c73898f 100644 --- a/core/services/ocrcommon/block_translator_test.go +++ b/core/services/ocrcommon/block_translator_test.go @@ -34,7 +34,6 @@ func Test_BlockTranslator(t *testing.T) { from, to := bt.NumberToQueryRange(ctx, 42) assert.Equal(t, big.NewInt(42), from) assert.Equal(t, big.NewInt(42), to) - }) t.Run("for arbitrum, returns the ArbitrumBlockTranslator", func(t *testing.T) { diff --git a/core/services/ocrcommon/data_source.go b/core/services/ocrcommon/data_source.go index cb544e01639..336074f81f8 100644 --- a/core/services/ocrcommon/data_source.go +++ b/core/services/ocrcommon/data_source.go @@ -384,7 +384,6 @@ func (ds *inMemoryDataSourceCache) Observe(ctx context.Context, timestamp ocr2ty if time.Since(ds.latestTrrs.GetTaskRunResultsFinishedAt()) >= ds.stalenessAlertThreshold { ds.lggr.Errorf("in memory cache is old and hasn't been updated for over %v, latestUpdateErr is: %v", ds.stalenessAlertThreshold, ds.latestUpdateErr) } - } return ds.parse(latestResult) } diff --git a/core/services/ocrcommon/data_source_test.go b/core/services/ocrcommon/data_source_test.go index a62852eaced..1626976b5be 100644 --- a/core/services/ocrcommon/data_source_test.go +++ b/core/services/ocrcommon/data_source_test.go @@ -190,7 +190,6 @@ func Test_InMemoryDataSourceWithProm(t *testing.T) { assert.Equal(t, jsonParseTaskValue, val.String()) // returns expected value after pipeline run assert.Equal(t, cast.ToFloat64(jsonParseTaskValue), promtestutil.ToFloat64(ocrcommon.PromOcrMedianValues)) assert.Equal(t, cast.ToFloat64(jsonParseTaskValue), promtestutil.ToFloat64(ocrcommon.PromBridgeJsonParseValues)) - } type mockSaver struct { diff --git a/core/services/ocrcommon/discoverer_database_test.go b/core/services/ocrcommon/discoverer_database_test.go index 23d5ad661a4..30fb02a8265 100644 --- a/core/services/ocrcommon/discoverer_database_test.go +++ b/core/services/ocrcommon/discoverer_database_test.go @@ -80,7 +80,6 @@ func Test_DiscovererDatabase(t *testing.T) { require.NoError(t, err) assert.Len(t, announcements, 1) assert.Equal(t, []byte{4, 5, 6}, announcements["remote1"]) - }) } diff --git a/core/services/ocrcommon/peer_wrapper_test.go b/core/services/ocrcommon/peer_wrapper_test.go index a47ed19ec56..8c4f26cdc03 100644 --- a/core/services/ocrcommon/peer_wrapper_test.go +++ b/core/services/ocrcommon/peer_wrapper_test.go @@ -128,7 +128,6 @@ func Test_SingletonPeerWrapper_Close(t *testing.T) { } c.P2P.V2.ListenAddresses = ptr(p2paddresses) c.P2P.V2.AnnounceAddresses = ptr(p2paddresses) - }) pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), db, logger.TestLogger(t)) diff --git a/core/services/ocrcommon/telemetry_test.go b/core/services/ocrcommon/telemetry_test.go index e7a59622d97..f764e7380f8 100644 --- a/core/services/ocrcommon/telemetry_test.go +++ b/core/services/ocrcommon/telemetry_test.go @@ -177,7 +177,6 @@ func TestParseEATelemetry(t *testing.T) { } func TestGetJsonParsedValue(t *testing.T) { - resp := getJsonParsedValue(trrs[0], &trrs) assert.Equal(t, 123456.123456789, *resp) @@ -187,7 +186,6 @@ func TestGetJsonParsedValue(t *testing.T) { resp = getJsonParsedValue(trrs[1], &trrs) assert.Nil(t, resp) - } func TestSendEATelemetry(t *testing.T) { @@ -303,7 +301,6 @@ func TestGetObservation(t *testing.T) { } obs = e.getObservation(finalResult) assert.Equal(t, obs, int64(123456)) - } func TestCollectAndSend(t *testing.T) { @@ -529,7 +526,6 @@ func TestGetPricesFromResults(t *testing.T) { } func TestShouldCollectEnhancedTelemetryMercury(t *testing.T) { - j := job.Job{ Type: job.Type(pipeline.OffchainReporting2JobType), OCR2OracleSpec: &job.OCR2OracleSpec{ diff --git a/core/services/ocrcommon/transmitter.go b/core/services/ocrcommon/transmitter.go index 423db2316a7..bba54334c97 100644 --- a/core/services/ocrcommon/transmitter.go +++ b/core/services/ocrcommon/transmitter.go @@ -46,7 +46,6 @@ func NewTransmitter( chainID *big.Int, keystore roundRobinKeystore, ) (Transmitter, error) { - // Ensure that a keystore is provided. if keystore == nil { return nil, errors.New("nil keystore provided to transmitter") @@ -65,7 +64,6 @@ func NewTransmitter( } func (t *transmitter) CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error { - roundRobinFromAddress, err := t.keystore.GetRoundRobinAddress(ctx, t.chainID, t.fromAddresses...) if err != nil { return errors.Wrap(err, "skipped OCR transmission, error getting round-robin address") diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go index 99581e62720..b9ca9476a06 100644 --- a/core/services/periodicbackup/backup_test.go +++ b/core/services/periodicbackup/backup_test.go @@ -118,7 +118,6 @@ func TestPeriodicBackup_AlternativeOutputDir(t *testing.T) { assert.Greater(t, file.Size(), int64(0)) assert.Contains(t, result.path, "/alternative/cl_backup_0.9.9.dump") - } type testConfig struct { diff --git a/core/services/pg/connection_test.go b/core/services/pg/connection_test.go index b10625a82c9..c4314bfb309 100644 --- a/core/services/pg/connection_test.go +++ b/core/services/pg/connection_test.go @@ -83,5 +83,4 @@ func Test_disallowReplica(t *testing.T) { _, err = db.Exec("SET session_replication_role= 'not_valid_role'") require.Error(t, err) - } diff --git a/core/services/pg/stats.go b/core/services/pg/stats.go index abbc920e0a4..b8b1ed68401 100644 --- a/core/services/pg/stats.go +++ b/core/services/pg/stats.go @@ -91,7 +91,6 @@ func NewStatsReporter(fn StatFn, lggr logger.Logger, opts ...StatsReporterOpt) * } func (r *StatsReporter) Start(ctx context.Context) { - startOnce := func() { r.wg.Add(1) r.lggr.Debug("Starting DB stat reporter") diff --git a/core/services/pg/stats_test.go b/core/services/pg/stats_test.go index 08d4b4221aa..76a8b426fd8 100644 --- a/core/services/pg/stats_test.go +++ b/core/services/pg/stats_test.go @@ -61,7 +61,6 @@ func TestStatReporter(t *testing.T) { {name: "mutli_start", testFn: testMultiStart}, {name: "multi_stop", testFn: testMultiStop}, } { - t.Run(scenario.name, func(t *testing.T) { d := newtestDbStater(t, scenario.name) d.Mock.On("Stats").Return(sql.DBStats{}) diff --git a/core/services/pipeline/common_eth.go b/core/services/pipeline/common_eth.go index d0b11d9006d..b17ca385e50 100644 --- a/core/services/pipeline/common_eth.go +++ b/core/services/pipeline/common_eth.go @@ -195,7 +195,6 @@ func convertToETHABIType(val interface{}, abiType abi.Type) (interface{}, error) case abi.TupleTy: return convertToETHABITuple(abiType, srcVal) - } return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), abiType) } diff --git a/core/services/pipeline/common_http.go b/core/services/pipeline/common_http.go index d787025dc28..492cc8f8f76 100644 --- a/core/services/pipeline/common_http.go +++ b/core/services/pipeline/common_http.go @@ -24,7 +24,6 @@ func makeHTTPRequest( client *http.Client, httpLimit int64, ) ([]byte, int, http.Header, time.Duration, error) { - var bodyReader io.Reader if requestData != nil { bodyBytes, err := json.Marshal(requestData) diff --git a/core/services/pipeline/common_test.go b/core/services/pipeline/common_test.go index f94167d723c..ce545ec14a0 100644 --- a/core/services/pipeline/common_test.go +++ b/core/services/pipeline/common_test.go @@ -319,5 +319,4 @@ func TestGetNextTaskOf(t *testing.T) { nextTask = trrs.GetNextTaskOf(*nextTask) assert.Empty(t, nextTask) - } diff --git a/core/services/pipeline/graph_test.go b/core/services/pipeline/graph_test.go index 2dea29b8b3e..b3960bb1f46 100644 --- a/core/services/pipeline/graph_test.go +++ b/core/services/pipeline/graph_test.go @@ -265,5 +265,4 @@ func TestParse(t *testing.T) { assert.Error(t, err) }) } - } diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go index 0a96a7e08d5..06774e06e99 100644 --- a/core/services/pipeline/orm.go +++ b/core/services/pipeline/orm.go @@ -516,7 +516,6 @@ func (o *orm) insertFinishedRun(ctx context.Context, run *Run, saveSuccessfulTas VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at);` _, err = o.ds.NamedExecContext(ctx, sql, run.PipelineTaskRuns) return errors.Wrap(err, "failed to insert pipeline_task_runs") - } // DeleteRunsOlderThan deletes all pipeline_runs that have been finished for a certain threshold to free DB space diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go index 6ff32e15cc7..8c99635c8d1 100644 --- a/core/services/pipeline/orm_test.go +++ b/core/services/pipeline/orm_test.go @@ -238,7 +238,6 @@ func TestInsertFinishedRuns(t *testing.T) { err = orm.InsertFinishedRuns(ctx, runs, true) require.NoError(t, err) - } func Test_PipelineORM_InsertFinishedRunWithSpec(t *testing.T) { @@ -471,7 +470,6 @@ func Test_PipelineORM_StoreRun_DetectsRestarts(t *testing.T) { ds1 := run.ByDotID("ds1") require.Equal(t, ds1.Output.Val, int64(2)) require.True(t, ds1.FinishedAt.Valid) - } func Test_PipelineORM_StoreRun_UpdateTaskRunResult(t *testing.T) { diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 2de27b3d008..020ac37f28e 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -575,7 +575,6 @@ func (r *runner) ExecuteAndInsertFinishedRun(ctx context.Context, spec Spec, var return 0, trrs, pkgerrors.Wrapf(err, "error inserting finished results for spec ID %v", run.PipelineSpecID) } return run.ID, trrs, nil - } func (r *runner) Run(ctx context.Context, run *Run, l logger.Logger, saveSuccessfulTaskRuns bool, fn func(tx sqlutil.DataSource) error) (incomplete bool, err error) { diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index 44d7acadd27..dddc84e7368 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -607,7 +607,6 @@ func Test_PipelineRunner_AsyncJob_Basic(t *testing.T) { w.Header().Set("X-Chainlink-Pending", "true") response := map[string]interface{}{} require.NoError(t, json.NewEncoder(w).Encode(response)) - }) // 1. Setup bridge @@ -739,7 +738,6 @@ func Test_PipelineRunner_AsyncJob_InstantRestart(t *testing.T) { w.Header().Set("X-Chainlink-Pending", "true") response := map[string]interface{}{} require.NoError(t, json.NewEncoder(w).Encode(response)) - }) // 1. Setup bridge diff --git a/core/services/pipeline/scheduler.go b/core/services/pipeline/scheduler.go index 7663ed948ff..b589c9a7449 100644 --- a/core/services/pipeline/scheduler.go +++ b/core/services/pipeline/scheduler.go @@ -269,7 +269,6 @@ func (s *scheduler) Run() { s.waiting++ } } - } close(s.taskCh) diff --git a/core/services/pipeline/scheduler_test.go b/core/services/pipeline/scheduler_test.go index 1d7da59da9d..eaedfb453a2 100644 --- a/core/services/pipeline/scheduler_test.go +++ b/core/services/pipeline/scheduler_test.go @@ -165,6 +165,5 @@ func TestScheduler(t *testing.T) { } test.assertion(t, *p, s.results) - } } diff --git a/core/services/pipeline/task.bridge_test.go b/core/services/pipeline/task.bridge_test.go index e95aef4984c..626820a682d 100644 --- a/core/services/pipeline/task.bridge_test.go +++ b/core/services/pipeline/task.bridge_test.go @@ -464,7 +464,6 @@ func TestBridgeTask_AsyncJobPendingState(t *testing.T) { // w.Header().Set("X-Chainlink-Pending", "true") response := map[string]interface{}{"pending": true} require.NoError(t, json.NewEncoder(w).Encode(response)) - }) server := httptest.NewServer(handler) @@ -672,7 +671,6 @@ func TestBridgeTask_Variables(t *testing.T) { if test.expectedErrorContains != "" { require.Contains(t, result.Error.Error(), test.expectedErrorContains) } - } else { require.NoError(t, result.Error) require.NotNil(t, result.Value) @@ -942,7 +940,6 @@ func TestAdapterResponse_UnmarshalJSON_Happy(t *testing.T) { } func TestBridgeTask_Headers(t *testing.T) { - db := pgtest.NewSqlxDB(t) cfg := configtest.NewTestGeneralConfig(t) @@ -983,7 +980,6 @@ func TestBridgeTask_Headers(t *testing.T) { standardHeaders := []string{"Content-Length", "38", "Content-Type", "application/json", "User-Agent", "Go-http-client/1.1"} t.Run("sends headers", func(t *testing.T) { - task := pipeline.BridgeTask{ BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), Name: bridge.Name.String(), @@ -1027,7 +1023,6 @@ func TestBridgeTask_Headers(t *testing.T) { }) t.Run("allows to override content-type", func(t *testing.T) { - task := pipeline.BridgeTask{ BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), Name: bridge.Name.String(), diff --git a/core/services/pipeline/task.eth_tx_test.go b/core/services/pipeline/task.eth_tx_test.go index c38c338df20..5b9beeb43e1 100644 --- a/core/services/pipeline/task.eth_tx_test.go +++ b/core/services/pipeline/task.eth_tx_test.go @@ -72,7 +72,6 @@ func TestETHTxTask(t *testing.T) { pipeline.NewVarsFrom(nil), nil, func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { - data := []byte("foobar") gasLimit := uint64(12345) jobID := int32(321) @@ -394,7 +393,6 @@ func TestETHTxTask(t *testing.T) { }), nil, func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { - keyStore.On("GetRoundRobinAddress", mock.Anything, testutils.FixtureChainID).Return(nil, errors.New("uh oh")) }, nil, pipeline.ErrTaskRunFailed, "while querying keystore", pipeline.RunInfo{IsRetryable: true}, diff --git a/core/services/pipeline/task.http_test.go b/core/services/pipeline/task.http_test.go index 4098ce50d2a..8d1571e6333 100644 --- a/core/services/pipeline/task.http_test.go +++ b/core/services/pipeline/task.http_test.go @@ -192,7 +192,6 @@ func TestHTTPTask_Variables(t *testing.T) { if test.expectedErrorContains != "" { require.Contains(t, result.Error.Error(), test.expectedErrorContains) } - } else { require.NoError(t, result.Error) require.NotNil(t, result.Value) diff --git a/core/services/pipeline/task_object_params.go b/core/services/pipeline/task_object_params.go index 9bcc0d62dc9..8760ede780a 100644 --- a/core/services/pipeline/task_object_params.go +++ b/core/services/pipeline/task_object_params.go @@ -124,7 +124,6 @@ func (o *ObjectParam) UnmarshalPipelineParam(val interface{}) error { o.StringValue = v.StringValue o.DecimalValue = v.DecimalValue return nil - } return fmt.Errorf("bad input for task: %T", val) diff --git a/core/services/pipeline/task_params.go b/core/services/pipeline/task_params.go index 61d3b8650ad..12582f6ef49 100644 --- a/core/services/pipeline/task_params.go +++ b/core/services/pipeline/task_params.go @@ -510,7 +510,6 @@ func (m *MapParam) UnmarshalPipelineParam(val interface{}) error { *m = v.MapValue return nil } - } return errors.Wrapf(ErrBadInput, "expected map, got %T", val) diff --git a/core/services/pipeline/task_params_test.go b/core/services/pipeline/task_params_test.go index 299736cbbc8..88ec0455481 100644 --- a/core/services/pipeline/task_params_test.go +++ b/core/services/pipeline/task_params_test.go @@ -82,7 +82,6 @@ func TestStringSliceParam_UnmarshalPipelineParam(t *testing.T) { } }) } - } func TestBytesParam_UnmarshalPipelineParam(t *testing.T) { diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 4778c983c9c..8c02c4e2e7e 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -387,7 +387,6 @@ func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *oc } func addConfig(t *testing.T, user *bind.TransactOpts, configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple, config ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple) { - _, err := configStoreContract.AddConfig(user, config) require.NoError(t, err) } diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index 585d20df3ab..81816c34368 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -162,7 +162,6 @@ func (r *Relayer) HealthReport() (report map[string]error) { } func (r *Relayer) NewPluginProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.PluginProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() @@ -256,7 +255,6 @@ func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commonty } func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() @@ -317,7 +315,6 @@ func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes. } func (r *Relayer) NewFunctionsProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.FunctionsProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() @@ -422,7 +419,6 @@ func newConfigWatcher(lggr logger.Logger, replayCtx: replayCtx, replayCancel: replayCancel, } - } func (c *configWatcher) Name() string { diff --git a/core/services/relay/evm/functions/contract_transmitter.go b/core/services/relay/evm/functions/contract_transmitter.go index 4a8ba25fd9d..23143ed3ef1 100644 --- a/core/services/relay/evm/functions/contract_transmitter.go +++ b/core/services/relay/evm/functions/contract_transmitter.go @@ -120,7 +120,6 @@ func NewFunctionsContractTransmitter( } func (oc *contractTransmitter) createEthTransaction(ctx context.Context, toAddress common.Address, payload []byte) error { - roundRobinFromAddress, err := oc.keystore.GetRoundRobinAddress(ctx, oc.chainID, oc.fromAddresses...) if err != nil { return errors.Wrap(err, "skipped OCR transmission, error getting round-robin address") diff --git a/core/services/relay/evm/mercury/orm_test.go b/core/services/relay/evm/mercury/orm_test.go index 2b2e15ffd53..f928acdb538 100644 --- a/core/services/relay/evm/mercury/orm_test.go +++ b/core/services/relay/evm/mercury/orm_test.go @@ -148,7 +148,6 @@ func TestORM(t *testing.T) { transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID) require.NoError(t, err) require.Len(t, transmissions, 1) - } func TestORM_PruneTransmitRequests(t *testing.T) { diff --git a/core/services/relay/evm/mercury/wsrpc/pool.go b/core/services/relay/evm/mercury/wsrpc/pool.go index 94c48736f5d..0bd49ddb5ea 100644 --- a/core/services/relay/evm/mercury/wsrpc/pool.go +++ b/core/services/relay/evm/mercury/wsrpc/pool.go @@ -181,7 +181,6 @@ func (p *pool) remove(serverURL string, clientPubKey credentials.StaticSizedPubl if len(p.connections[serverURL]) == 0 { delete(p.connections, serverURL) } - } func (p *pool) newConnection(lggr logger.Logger, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) *connection { diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go index a839ce8430e..d272ce5aa86 100644 --- a/core/services/relay/evm/ocr2keeper.go +++ b/core/services/relay/evm/ocr2keeper.go @@ -81,7 +81,6 @@ func NewOCR2KeeperRelayer(ds sqlutil.DataSource, chain legacyevm.Chain, lggr log } func (r *ocr2keeperRelayer) NewOCR2KeeperProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2KeeperProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() diff --git a/core/services/relay/evm/ocr2vrf.go b/core/services/relay/evm/ocr2vrf.go index b83ce0fd81e..3f9fb11bfc9 100644 --- a/core/services/relay/evm/ocr2vrf.go +++ b/core/services/relay/evm/ocr2vrf.go @@ -56,7 +56,6 @@ func NewOCR2VRFRelayer(chain legacyevm.Chain, lggr logger.Logger, ethKeystore ke } func (r *ocr2vrfRelayer) NewDKGProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (DKGProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() @@ -83,7 +82,6 @@ func (r *ocr2vrfRelayer) NewDKGProvider(rargs commontypes.RelayArgs, pargs commo } func (r *ocr2vrfRelayer) NewOCR2VRFProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2VRFProvider, error) { - // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 ctx := context.Background() diff --git a/core/services/relay/evm/relayer_extender.go b/core/services/relay/evm/relayer_extender.go index 5f49a0b16c9..f262948c9c5 100644 --- a/core/services/relay/evm/relayer_extender.go +++ b/core/services/relay/evm/relayer_extender.go @@ -139,7 +139,6 @@ func NewChainRelayerExtenders(ctx context.Context, opts legacyevm.ChainRelayExte var result []*ChainRelayerExt var err error for i := range enabled { - cid := enabled[i].ChainID.String() privOpts := legacyevm.ChainRelayExtenderConfig{ Logger: opts.Logger.Named(cid), diff --git a/core/services/relay/evm/relayer_extender_test.go b/core/services/relay/evm/relayer_extender_test.go index b9a6433c3a7..f2bc3a33a93 100644 --- a/core/services/relay/evm/relayer_extender_test.go +++ b/core/services/relay/evm/relayer_extender_test.go @@ -66,5 +66,4 @@ func TestChainRelayExtenders(t *testing.T) { s, err := relayExt.GetChainStatus(testutils.Context(t)) assert.NotEmpty(t, s) assert.NoError(t, err) - } diff --git a/core/services/relay/evm/types/size_helper_test.go b/core/services/relay/evm/types/size_helper_test.go index 202269a4536..bdacd4271f5 100644 --- a/core/services/relay/evm/types/size_helper_test.go +++ b/core/services/relay/evm/types/size_helper_test.go @@ -159,7 +159,6 @@ func TestGetMaxSize(t *testing.T) { {big.NewInt(3), true}, } runSizeTest(t, anyNumElements, args, arg1) - }) t.Run("Bytes pack themselves", func(t *testing.T) { @@ -243,7 +242,6 @@ func TestGetMaxSize(t *testing.T) { } func runSizeTest(t *testing.T, n int, args abi.Arguments, params ...any) { - actual, err := types.GetMaxSize(n, args) require.NoError(t, err) diff --git a/core/services/signatures/ethdss/ethdss_test.go b/core/services/signatures/ethdss/ethdss_test.go index e1991147181..014f0f94186 100644 --- a/core/services/signatures/ethdss/ethdss_test.go +++ b/core/services/signatures/ethdss/ethdss_test.go @@ -266,7 +266,6 @@ func _genDistSecret() []*dkg.DistKeyShare { dkss[i] = dks } return dkss - } func genDistSecret(checkValidPublicKey bool) []*dkg.DistKeyShare { diff --git a/core/services/signatures/secp256k1/field_test.go b/core/services/signatures/secp256k1/field_test.go index 463e363e7e4..0914c6390df 100644 --- a/core/services/signatures/secp256k1/field_test.go +++ b/core/services/signatures/secp256k1/field_test.go @@ -109,7 +109,6 @@ func TestField_Clone(t *testing.T) { assert.Equal(t, f, g, "clone output does not equal original") g.Add(f, f) assert.Equal(t, f, h, "clone does not make a copy") - } func TestField_SetBytesAndBytes(t *testing.T) { diff --git a/core/services/synchronization/telemetry_ingress_client.go b/core/services/synchronization/telemetry_ingress_client.go index 75aa3106a8c..dc4ced31d09 100644 --- a/core/services/synchronization/telemetry_ingress_client.go +++ b/core/services/synchronization/telemetry_ingress_client.go @@ -127,7 +127,6 @@ func (tc *telemetryIngressClient) connect(clientPrivKey []byte) { // Wait for close <-tc.chDone - }() } diff --git a/core/services/synchronization/telemetry_ingress_client_test.go b/core/services/synchronization/telemetry_ingress_client_test.go index e7e14eda748..407051ff19b 100644 --- a/core/services/synchronization/telemetry_ingress_client_test.go +++ b/core/services/synchronization/telemetry_ingress_client_test.go @@ -22,7 +22,6 @@ import ( ) func TestTelemetryIngressClient_Send_HappyPath(t *testing.T) { - // Create mocks telemClient := mocks.NewTelemClient(t) csaKeystore := new(ksmocks.CSA) diff --git a/core/services/telemetry/manager.go b/core/services/telemetry/manager.go index 228a997eeca..a65759a5c62 100644 --- a/core/services/telemetry/manager.go +++ b/core/services/telemetry/manager.go @@ -97,7 +97,6 @@ func (m *Manager) HealthReport() map[string]error { // GenMonitoringEndpoint creates a new monitoring endpoints based on the existing available endpoints defined in the core config TOML, if no endpoint for the network and chainID exists, a NOOP agent will be used and the telemetry will not be sent func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) commontypes.MonitoringEndpoint { - e, found := m.getEndpoint(network, chainID) if !found { @@ -110,7 +109,6 @@ func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contract } return NewIngressAgent(e.client, network, chainID, contractID, telemType) - } func (m *Manager) addEndpoint(e config.TelemetryIngressEndpoint) error { diff --git a/core/services/telemetry/manager_test.go b/core/services/telemetry/manager_test.go index 9b83ef08234..4e55cb75752 100644 --- a/core/services/telemetry/manager_test.go +++ b/core/services/telemetry/manager_test.go @@ -64,7 +64,6 @@ func TestManagerAgents(t *testing.T) { } func TestNewManager(t *testing.T) { - type endpointTest struct { network string chainID string @@ -181,7 +180,6 @@ func TestNewManager(t *testing.T) { } require.Equal(t, true, found, "cannot find log: %s", e.expectedError) } - } require.Equal(t, "TelemetryManager", m.Name()) @@ -246,7 +244,6 @@ func TestCorrectEndpointRouting(t *testing.T) { Network: e.network, client: clientMock, } - } //Unknown networks or chainID noopEndpoint := tm.GenMonitoringEndpoint("unknown-network", "unknown-chainID", "some-contractID", "some-type") @@ -282,5 +279,4 @@ func TestCorrectEndpointRouting(t *testing.T) { require.Equal(t, telemType, string(clientSent[i].TelemType)) require.Equal(t, []byte(e.chainID), clientSent[i].Telemetry) } - } diff --git a/core/services/vrf/v2/coordinator_v2x_interface.go b/core/services/vrf/v2/coordinator_v2x_interface.go index 9389f12b9f8..31621562588 100644 --- a/core/services/vrf/v2/coordinator_v2x_interface.go +++ b/core/services/vrf/v2/coordinator_v2x_interface.go @@ -157,7 +157,6 @@ func (c *coordinatorV2) Version() vrfcommon.Version { func (c *coordinatorV2) RegisterProvingKey(opts *bind.TransactOpts, oracle *common.Address, publicProvingKey [2]*big.Int, maxGasPrice *uint64) (*types.Transaction, error) { if maxGasPrice != nil { return nil, fmt.Errorf("max gas price not supported for registering proving key in v2") - } return c.coordinator.RegisterProvingKey(opts, *oracle, publicProvingKey) } diff --git a/core/services/vrf/v2/reverted_txns.go b/core/services/vrf/v2/reverted_txns.go index cfd9954a208..846ba347bc7 100644 --- a/core/services/vrf/v2/reverted_txns.go +++ b/core/services/vrf/v2/reverted_txns.go @@ -111,7 +111,6 @@ func (lsn *listenerV2) fetchRecentSingleTxns(ctx context.Context, ds sqlutil.DataSource, chainID uint64, pollPeriod time.Duration) ([]TxnReceiptDB, error) { - // (state = 'confirmed' OR state = 'unconfirmed') sqlQuery := fmt.Sprintf(` WITH already_ff as ( @@ -234,7 +233,6 @@ func (lsn *listenerV2) fetchRevertedForceFulfilmentTxns(ctx context.Context, ds sqlutil.DataSource, chainID uint64, pollPeriod time.Duration) ([]TxnReceiptDB, error) { - sqlQuery := fmt.Sprintf(` WITH txes AS ( SELECT * @@ -416,7 +414,6 @@ func (lsn *listenerV2) postSqlLog(ctx context.Context, begin time.Time, pollPeri func (lsn *listenerV2) filterRevertedTxns(ctx context.Context, recentReceipts []TxnReceiptDB) []RevertedVRFTxn { - revertedVRFTxns := make([]RevertedVRFTxn, 0) for _, txnReceipt := range recentReceipts { switch txnReceipt.ToAddress.Hex() { @@ -471,7 +468,6 @@ func (lsn *listenerV2) filterRevertedTxns(ctx context.Context, func (lsn *listenerV2) filterSingleRevertedTxn(ctx context.Context, txnReceiptDB TxnReceiptDB) ( *RevertedVRFTxn, error) { - requestID := common.HexToHash(txnReceiptDB.RequestID).Big() commitment, err := lsn.coordinator.GetCommitment(&bind.CallOpts{Context: ctx}, requestID) if err != nil { diff --git a/core/services/workflows/engine_test.go b/core/services/workflows/engine_test.go index 212ad37367e..1ad7a3c2ae2 100644 --- a/core/services/workflows/engine_test.go +++ b/core/services/workflows/engine_test.go @@ -474,7 +474,6 @@ func mockAction() (*mockCapability, values.Value) { nil, ), func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { - return capabilities.CapabilityResponse{ Value: outputs, }, nil diff --git a/core/services/workflows/models_yaml_test.go b/core/services/workflows/models_yaml_test.go index efcdaf6f332..5fa326dda5d 100644 --- a/core/services/workflows/models_yaml_test.go +++ b/core/services/workflows/models_yaml_test.go @@ -240,7 +240,6 @@ func TestParsesIntsCorrectly(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(3600), n.Config["aggregation_config"].(map[string]any)["0x1111111111111111111100000000000000000000000000000000000000000000"].(map[string]any)["heartbeat"]) - } func TestMappingCustomType(t *testing.T) { diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index 8169368eb1f..8a7d1628a4c 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -336,7 +336,6 @@ ON jobs.offchainreporting2_oracle_spec_id = ocr2.id` require.Equal(t, jobIdAndContractId{ID: 30, ContractID: "evm_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[1]) require.Equal(t, jobIdAndContractId{ID: 10, ContractID: "terra_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[2]) require.Equal(t, jobIdAndContractId{ID: 20, ContractID: "sol_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[3]) - } func TestMigrate_101_GenericOCR2(t *testing.T) { @@ -518,7 +517,6 @@ func TestNoTriggers(t *testing.T) { _, db := heavyweight.FullTestDBEmptyV2(t, nil) assert_num_triggers := func(expected int) { - row := db.DB.QueryRow("select count(*) from information_schema.triggers") var count int err := row.Scan(&count) @@ -536,7 +534,6 @@ func TestNoTriggers(t *testing.T) { err := goose.UpTo(db.DB, migrationDir, int64(v)) require.NoError(t, err) assert_num_triggers(1) - } func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) { diff --git a/core/store/models/errors.go b/core/store/models/errors.go index 6feddd96c03..d11caa466d5 100644 --- a/core/store/models/errors.go +++ b/core/store/models/errors.go @@ -56,7 +56,6 @@ func (jae *JSONAPIErrors) Merge(e error) { return } jae.Add(e.Error()) - } // CoerceEmptyToNil will return nil if JSONAPIErrors has no errors. diff --git a/core/utils/collection_test.go b/core/utils/collection_test.go index 20bfb2c6261..f956192d3a0 100644 --- a/core/utils/collection_test.go +++ b/core/utils/collection_test.go @@ -53,8 +53,6 @@ func TestBatchSplit(t *testing.T) { } // assert order has not changed when list is reconstructed assert.Equal(t, r.input, temp) - }) } - } diff --git a/core/utils/deferable_write_closer.go b/core/utils/deferable_write_closer.go index e27b6d91d6e..eebea80f822 100644 --- a/core/utils/deferable_write_closer.go +++ b/core/utils/deferable_write_closer.go @@ -49,7 +49,6 @@ func NewDeferableWriteCloser(wc io.WriteCloser) *DeferableWriteCloser { // Should be called explicitly AND defered // Thread safe func (wc *DeferableWriteCloser) Close() error { - wc.mu.Lock() defer wc.mu.Unlock() if !wc.closed { @@ -57,5 +56,4 @@ func (wc *DeferableWriteCloser) Close() error { wc.closed = true } return wc.closeErr - } diff --git a/core/utils/deferable_write_closer_test.go b/core/utils/deferable_write_closer_test.go index d12ff1c40cb..ef03acf9d66 100644 --- a/core/utils/deferable_write_closer_test.go +++ b/core/utils/deferable_write_closer_test.go @@ -10,7 +10,6 @@ import ( ) func TestDeferableWriteCloser_Close(t *testing.T) { - d := t.TempDir() f, err := os.Create(filepath.Join(d, "test-file")) require.NoError(t, err) diff --git a/core/utils/utils.go b/core/utils/utils.go index 78151517c62..d076284112f 100644 --- a/core/utils/utils.go +++ b/core/utils/utils.go @@ -479,7 +479,6 @@ func NewRedialBackoff() backoff.Backoff { Max: 15 * time.Second, Jitter: true, } - } // KeyedMutex allows to lock based on particular values diff --git a/core/utils/utils_test.go b/core/utils/utils_test.go index c08983ff4b8..587bd46efb9 100644 --- a/core/utils/utils_test.go +++ b/core/utils/utils_test.go @@ -549,5 +549,4 @@ func TestErrorBuffer(t *testing.T) { combined := buff.Flush() require.Nil(t, combined) }) - } diff --git a/core/web/common.go b/core/web/common.go index 66159d8b60a..36c169bc748 100644 --- a/core/web/common.go +++ b/core/web/common.go @@ -16,7 +16,6 @@ var ( ) func getChain(legacyChains legacyevm.LegacyChainContainer, chainIDstr string) (chain legacyevm.Chain, err error) { - if chainIDstr != "" && chainIDstr != "" { // evm keys are expected to be parsable as a big int _, ok := big.NewInt(0).SetString(chainIDstr, 10) diff --git a/core/web/cors_test.go b/core/web/cors_test.go index fcd5d9b3874..c08e11bc125 100644 --- a/core/web/cors_test.go +++ b/core/web/cors_test.go @@ -55,7 +55,6 @@ func TestCors_OverrideOrigins(t *testing.T) { for _, test := range tests { t.Run(test.origin, func(t *testing.T) { - config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) { c.WebServer.AllowOrigins = ptr(test.allow) }) diff --git a/core/web/cosmos_chains_controller_test.go b/core/web/cosmos_chains_controller_test.go index 9aaa0dd9eeb..2d5eb42515a 100644 --- a/core/web/cosmos_chains_controller_test.go +++ b/core/web/cosmos_chains_controller_test.go @@ -165,7 +165,6 @@ func Test_CosmosChainsController_Index(t *testing.T) { tomlB, err := chainB.TOMLString() require.NoError(t, err) assert.Equal(t, tomlB, chains[0].Config) - } type TestCosmosChainsController struct { diff --git a/core/web/dkgencrypt_keys_controller_test.go b/core/web/dkgencrypt_keys_controller_test.go index fde00eb6420..0f4344fdc23 100644 --- a/core/web/dkgencrypt_keys_controller_test.go +++ b/core/web/dkgencrypt_keys_controller_test.go @@ -93,7 +93,6 @@ func TestDKGEncryptKeysController_Delete_HappyPath(t *testing.T) { afterKeys, err := keyStore.DKGEncrypt().GetAll() assert.NoError(t, err) assert.Equal(t, initialLength-1, len(afterKeys)) - } func setupDKGEncryptKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { diff --git a/core/web/dkgsign_keys_controller_test.go b/core/web/dkgsign_keys_controller_test.go index b253a36ccc3..01b8489c0b8 100644 --- a/core/web/dkgsign_keys_controller_test.go +++ b/core/web/dkgsign_keys_controller_test.go @@ -93,7 +93,6 @@ func TestDKGSignKeysController_Delete_HappyPath(t *testing.T) { afterKeys, err := keyStore.DKGSign().GetAll() assert.NoError(t, err) assert.Equal(t, initialLength-1, len(afterKeys)) - } func setupDKGSignKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { diff --git a/core/web/eth_keys_controller.go b/core/web/eth_keys_controller.go index e53f30a925a..043362ff441 100644 --- a/core/web/eth_keys_controller.go +++ b/core/web/eth_keys_controller.go @@ -115,7 +115,6 @@ func (ekc *ETHKeysController) Index(c *gin.Context) { }) jsonAPIResponseWithStatus(c, resources, "keys", http.StatusOK) - } // Create adds a new account @@ -362,7 +361,6 @@ func (ekc *ETHKeysController) getEthBalance(ctx context.Context, state ethkey.St } return bal - } func (ekc *ETHKeysController) setLinkBalance(bal *commonassets.Link) presenters.NewETHKeyOption { diff --git a/core/web/eth_keys_controller_test.go b/core/web/eth_keys_controller_test.go index 34cde6f6a64..9cb6a27b434 100644 --- a/core/web/eth_keys_controller_test.go +++ b/core/web/eth_keys_controller_test.go @@ -73,11 +73,9 @@ func TestETHKeysController_Index_Success(t *testing.T) { if balance.Address == expectedKeys[0].Address.Hex() { assert.Equal(t, "0.000000000000000256", balance.EthBalance.String()) assert.Equal(t, "256", balance.LinkBalance.String()) - } else { assert.Equal(t, "0.000000000000000001", balance.EthBalance.String()) assert.Equal(t, "1", balance.LinkBalance.String()) - } } } diff --git a/core/web/evm_chains_controller_test.go b/core/web/evm_chains_controller_test.go index 157978bdd46..ab8bf35e6cb 100644 --- a/core/web/evm_chains_controller_test.go +++ b/core/web/evm_chains_controller_test.go @@ -106,7 +106,6 @@ func Test_EVMChainsController_Index(t *testing.T) { // sort test chain ids to make expected comparison easy chainIDs := []*big.Int{testutils.NewRandomEVMChainID(), testutils.NewRandomEVMChainID(), testutils.NewRandomEVMChainID()} sort.Slice(chainIDs, func(i, j int) bool { - return chainIDs[i].String() < chainIDs[j].String() }) diff --git a/core/web/evm_forwarders_controller_test.go b/core/web/evm_forwarders_controller_test.go index 38e8c2f91f0..cacab870717 100644 --- a/core/web/evm_forwarders_controller_test.go +++ b/core/web/evm_forwarders_controller_test.go @@ -100,7 +100,6 @@ func Test_EVMForwardersController_Index(t *testing.T) { }, } for _, fwdr := range fwdrs { - body, err := json.Marshal(web.TrackEVMForwarderRequest{ EVMChainID: chainId, Address: fwdr.Address, diff --git a/core/web/jobs_controller_test.go b/core/web/jobs_controller_test.go index 0146038d91b..8aaae0d5ba3 100644 --- a/core/web/jobs_controller_test.go +++ b/core/web/jobs_controller_test.go @@ -323,7 +323,6 @@ func TestJobController_Create_HappyPath(t *testing.T) { return fmt.Sprintf(testspecs.FluxMonitorSpecTemplate, nameAndExternalJobID, nameAndExternalJobID) }, assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { - require.Equal(t, http.StatusInternalServerError, r.StatusCode) errs := cltest.ParseJSONAPIErrors(t, r.Body) diff --git a/core/web/lca_controller.go b/core/web/lca_controller.go index bb4866c3d08..0c3a065f2fd 100644 --- a/core/web/lca_controller.go +++ b/core/web/lca_controller.go @@ -48,7 +48,6 @@ func (bdc *LCAController) FindLCA(c *gin.Context) { EVMChainID: big.New(chainID), } jsonAPIResponse(c, &response, "response") - } type LCAResponse struct { diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index 28c54b72450..61e75e8bb48 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -53,7 +53,6 @@ func TestLogController_GetLogConfig(t *testing.T) { require.Equal(t, "warn", svcLogConfig.DefaultLogLevel) for i, svcName := range svcLogConfig.ServiceName { - if svcName == "Global" { assert.Equal(t, zapcore.WarnLevel.String(), svcLogConfig.LogLevel[i]) } @@ -130,7 +129,6 @@ func TestLogController_PatchLogConfig(t *testing.T) { require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &svcLogConfig)) for i, svcName := range svcLogConfig.ServiceName { - if svcName == "Global" { assert.Equal(t, tc.expectedLogLevel.String(), svcLogConfig.LogLevel[i]) } diff --git a/core/web/loop_registry.go b/core/web/loop_registry.go index b94778675e0..ffa80146449 100644 --- a/core/web/loop_registry.go +++ b/core/web/loop_registry.go @@ -70,7 +70,6 @@ func (l *LoopRegistryServer) discoveryHandler(w http.ResponseWriter, req *http.R w.WriteHeader(http.StatusInternalServerError) l.logger.Error(err) } - } func metricTarget(hostName string, port int, path string) *targetgroup.Group { diff --git a/core/web/resolver/chain_test.go b/core/web/resolver/chain_test.go index a0f2ca22b07..5e51356d928 100644 --- a/core/web/resolver/chain_test.go +++ b/core/web/resolver/chain_test.go @@ -77,7 +77,6 @@ ResendAfterThreshold = '1h0m0s' name: "success", authenticated: true, before: func(f *gqlTestFramework) { - chainConf := evmtoml.EVMConfig{ ChainID: &chainID, Enabled: chain.Enabled, @@ -94,7 +93,6 @@ ResendAfterThreshold = '1h0m0s' Config: chainConfToml, }}, }}) - }, query: query, result: fmt.Sprintf(` @@ -117,7 +115,6 @@ ResendAfterThreshold = '1h0m0s' authenticated: true, before: func(f *gqlTestFramework) { f.App.On("GetRelayers").Return(&chainlinkmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{}}) - }, query: query, result: ` diff --git a/core/web/resolver/helpers.go b/core/web/resolver/helpers.go index 8c9ac09c943..b2ec3ab0709 100644 --- a/core/web/resolver/helpers.go +++ b/core/web/resolver/helpers.go @@ -83,7 +83,6 @@ func ValidateBridgeType(bt *bridges.BridgeTypeRequest) error { } if bt.MinimumContractPayment != nil && bt.MinimumContractPayment.Cmp(assets.NewLinkFromJuels(0)) < 0 { - return errors.New("MinimumContractPayment must be positive") } diff --git a/core/web/resolver/job_proposal_spec_test.go b/core/web/resolver/job_proposal_spec_test.go index c65702c5622..5875a5acb69 100644 --- a/core/web/resolver/job_proposal_spec_test.go +++ b/core/web/resolver/job_proposal_spec_test.go @@ -160,7 +160,6 @@ func TestResolver_CancelJobProposalSpec(t *testing.T) { f.Mocks.feedsSvc.On("GetSpec", mock.Anything, specID).Return(&feeds.JobProposalSpec{ ID: specID, }, nil) - }, query: mutation, variables: variables, @@ -349,7 +348,6 @@ func TestResolver_UpdateJobProposalSpecDefinition(t *testing.T) { before: func(f *gqlTestFramework) { f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) f.Mocks.feedsSvc.On("UpdateSpecDefinition", mock.Anything, specID, "").Return(sql.ErrNoRows) - }, query: mutation, variables: variables, diff --git a/core/web/resolver/node_test.go b/core/web/resolver/node_test.go index 62e964a6820..e103a470097 100644 --- a/core/web/resolver/node_test.go +++ b/core/web/resolver/node_test.go @@ -57,7 +57,6 @@ func TestResolver_Nodes(t *testing.T) { }}, }, }) - }, query: query, result: ` diff --git a/core/web/router.go b/core/web/router.go index 158ea4b411f..9c5cb4b661d 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -228,7 +228,6 @@ func loopRoutes(app chainlink.Application, r *gin.RouterGroup) { loopRegistry := NewLoopRegistryServer(app) r.GET("/discovery", ginHandlerFromHTTP(loopRegistry.discoveryHandler)) r.GET("/plugins/:name/metrics", loopRegistry.pluginMetricHandler) - } func v2Routes(app chainlink.Application, r *gin.RouterGroup) { diff --git a/tools/flakeytests/runner.go b/tools/flakeytests/runner.go index a37b123d5cf..d4c6451a945 100644 --- a/tools/flakeytests/runner.go +++ b/tools/flakeytests/runner.go @@ -232,7 +232,6 @@ func (r *Runner) runTests(rep *Report) (*Report, error) { report.SetTest(pkg, t, 1) } } - } } @@ -292,7 +291,6 @@ func dedupeEntries(report *Report) (*Report, error) { out.SetTest(pkg, tn, report.tests[pkg][tn]) } - } return out, nil From 700a82719451611381ab5dbb94fe00547660440b Mon Sep 17 00:00:00 2001 From: Christopher Dimitri Sastropranoto Date: Tue, 7 May 2024 17:56:34 +0700 Subject: [PATCH 9/9] implement remove nodes (#13102) --- .changeset/tiny-rocks-shake.md | 5 + contracts/.changeset/stupid-horses-promise.md | 5 + .../src/v0.8/keystone/CapabilityRegistry.sol | 24 +++ .../CapabilityRegistry_RemoveNodesTest.t.sol | 98 ++++++++++++ .../keystone_capability_registry.go | 147 +++++++++++++++++- ...rapper-dependency-versions-do-not-edit.txt | 2 +- 6 files changed, 278 insertions(+), 3 deletions(-) create mode 100644 .changeset/tiny-rocks-shake.md create mode 100644 contracts/.changeset/stupid-horses-promise.md create mode 100644 contracts/src/v0.8/keystone/test/CapabilityRegistry_RemoveNodesTest.t.sol diff --git a/.changeset/tiny-rocks-shake.md b/.changeset/tiny-rocks-shake.md new file mode 100644 index 00000000000..d6311632f08 --- /dev/null +++ b/.changeset/tiny-rocks-shake.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#internal generate geth wrappers for capability registry remove nodes diff --git a/contracts/.changeset/stupid-horses-promise.md b/contracts/.changeset/stupid-horses-promise.md new file mode 100644 index 00000000000..b7de55939d8 --- /dev/null +++ b/contracts/.changeset/stupid-horses-promise.md @@ -0,0 +1,5 @@ +--- +"@chainlink/contracts": patch +--- + +implement remove nodes on capability registry diff --git a/contracts/src/v0.8/keystone/CapabilityRegistry.sol b/contracts/src/v0.8/keystone/CapabilityRegistry.sol index 6ac4caedf50..60989fce61a 100644 --- a/contracts/src/v0.8/keystone/CapabilityRegistry.sol +++ b/contracts/src/v0.8/keystone/CapabilityRegistry.sol @@ -106,6 +106,10 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { /// @param nodeOperatorId The ID of the node operator that manages this node event NodeAdded(bytes32 p2pId, uint256 nodeOperatorId); + /// @notice This event is emitted when a node is removed + /// @param p2pId The P2P ID of the node that was removed + event NodeRemoved(bytes32 p2pId); + /// @notice This event is emitted when a node is updated /// @param p2pId The P2P ID of the node /// @param nodeOperatorId The ID of the node operator that manages this node @@ -269,6 +273,26 @@ contract CapabilityRegistry is OwnerIsCreator, TypeAndVersionInterface { } } + /// @notice Removes nodes. The node operator admin or contract owner + /// can remove nodes + /// @param removedNodeP2PIds The P2P Ids of the nodes to remove + function removeNodes(bytes32[] calldata removedNodeP2PIds) external { + bool isOwner = msg.sender == owner(); + for (uint256 i; i < removedNodeP2PIds.length; ++i) { + bytes32 p2pId = removedNodeP2PIds[i]; + Node memory node = s_nodes[p2pId]; + + bool nodeExists = s_nodes[p2pId].supportedHashedCapabilityIds.length > 0; + if (!nodeExists) revert InvalidNodeP2PId(p2pId); + + NodeOperator memory nodeOperator = s_nodeOperators[node.nodeOperatorId]; + + if (!isOwner && msg.sender != nodeOperator.admin) revert AccessForbidden(); + delete s_nodes[p2pId]; + emit NodeRemoved(p2pId); + } + } + /// @notice Updates nodes. The node admin can update the node's signer address /// and reconfigure its supported capabilities /// @param nodes The nodes to update diff --git a/contracts/src/v0.8/keystone/test/CapabilityRegistry_RemoveNodesTest.t.sol b/contracts/src/v0.8/keystone/test/CapabilityRegistry_RemoveNodesTest.t.sol new file mode 100644 index 00000000000..d432a8aed04 --- /dev/null +++ b/contracts/src/v0.8/keystone/test/CapabilityRegistry_RemoveNodesTest.t.sol @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {CapabilityRegistry} from "../CapabilityRegistry.sol"; + +contract CapabilityRegistry_RemoveNodesTest is BaseTest { + event NodeRemoved(bytes32 p2pId); + + uint256 private constant TEST_NODE_OPERATOR_ONE_ID = 0; + uint256 private constant TEST_NODE_OPERATOR_TWO_ID = 1; + bytes32 private constant INVALID_P2P_ID = bytes32("fake-p2p"); + + function setUp() public override { + BaseTest.setUp(); + changePrank(ADMIN); + s_capabilityRegistry.addNodeOperators(_getNodeOperators()); + s_capabilityRegistry.addCapability(s_basicCapability); + s_capabilityRegistry.addCapability(s_capabilityWithConfigurationContract); + + CapabilityRegistry.Node[] memory nodes = new CapabilityRegistry.Node[](1); + bytes32[] memory hashedCapabilityIds = new bytes32[](2); + hashedCapabilityIds[0] = s_basicHashedCapabilityId; + hashedCapabilityIds[1] = s_capabilityWithConfigurationContractId; + + nodes[0] = CapabilityRegistry.Node({ + nodeOperatorId: TEST_NODE_OPERATOR_ONE_ID, + p2pId: P2P_ID, + signer: NODE_OPERATOR_ONE_SIGNER_ADDRESS, + supportedHashedCapabilityIds: hashedCapabilityIds + }); + + changePrank(NODE_OPERATOR_ONE_ADMIN); + + s_capabilityRegistry.addNodes(nodes); + } + + function test_RevertWhen_CalledByNonNodeOperatorAdminAndNonOwner() public { + changePrank(STRANGER); + bytes32[] memory nodes = new bytes32[](1); + nodes[0] = P2P_ID; + + vm.expectRevert(CapabilityRegistry.AccessForbidden.selector); + s_capabilityRegistry.removeNodes(nodes); + } + + function test_RevertWhen_NodeDoesNotExist() public { + changePrank(NODE_OPERATOR_ONE_ADMIN); + bytes32[] memory nodes = new bytes32[](1); + nodes[0] = INVALID_P2P_ID; + + vm.expectRevert(abi.encodeWithSelector(CapabilityRegistry.InvalidNodeP2PId.selector, INVALID_P2P_ID)); + s_capabilityRegistry.removeNodes(nodes); + } + + function test_RevertWhen_P2PIDEmpty() public { + changePrank(NODE_OPERATOR_ONE_ADMIN); + bytes32[] memory nodes = new bytes32[](1); + nodes[0] = bytes32(""); + + vm.expectRevert(abi.encodeWithSelector(CapabilityRegistry.InvalidNodeP2PId.selector, bytes32(""))); + s_capabilityRegistry.removeNodes(nodes); + } + + function test_RemovesNode() public { + changePrank(NODE_OPERATOR_ONE_ADMIN); + + bytes32[] memory nodes = new bytes32[](1); + nodes[0] = P2P_ID; + + vm.expectEmit(address(s_capabilityRegistry)); + emit NodeRemoved(P2P_ID); + s_capabilityRegistry.removeNodes(nodes); + + CapabilityRegistry.Node memory node = s_capabilityRegistry.getNode(P2P_ID); + assertEq(node.nodeOperatorId, 0); + assertEq(node.p2pId, bytes32("")); + assertEq(node.signer, address(0)); + assertEq(node.supportedHashedCapabilityIds.length, 0); + } + + function test_OwnerCanRemoveNodes() public { + changePrank(ADMIN); + + bytes32[] memory nodes = new bytes32[](1); + nodes[0] = P2P_ID; + + vm.expectEmit(address(s_capabilityRegistry)); + emit NodeRemoved(P2P_ID); + s_capabilityRegistry.removeNodes(nodes); + + CapabilityRegistry.Node memory node = s_capabilityRegistry.getNode(P2P_ID); + assertEq(node.nodeOperatorId, 0); + assertEq(node.p2pId, bytes32("")); + assertEq(node.signer, address(0)); + assertEq(node.supportedHashedCapabilityIds.length, 0); + } +} diff --git a/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go b/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go index 9080fbd7807..afa4dd13812 100644 --- a/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go +++ b/core/gethwrappers/keystone/generated/keystone_capability_registry/keystone_capability_registry.go @@ -50,8 +50,8 @@ type CapabilityRegistryNodeOperator struct { } var CapabilityRegistryMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAlreadyDeprecated\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CapabilityAlreadyExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityDoesNotExist\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedConfigurationContract\",\"type\":\"address\"}],\"name\":\"InvalidCapabilityConfigurationContractInterface\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"hashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"name\":\"InvalidNodeCapabilities\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidNodeOperatorAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"}],\"name\":\"InvalidNodeP2PId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidNodeSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"lengthOne\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"lengthTwo\",\"type\":\"uint256\"}],\"name\":\"LengthMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityDeprecated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeOperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"NodeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"capability\",\"type\":\"tuple\"}],\"name\":\"addCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"addNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node[]\",\"name\":\"nodes\",\"type\":\"tuple[]\"}],\"name\":\"addNodes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"deprecateCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCapabilities\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedId\",\"type\":\"bytes32\"}],\"name\":\"getCapability\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"name\":\"getHashedCapabilityId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"}],\"name\":\"getNode\",\"outputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"getNodeOperator\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"isCapabilityDeprecated\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"}],\"name\":\"removeNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"updateNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node[]\",\"name\":\"nodes\",\"type\":\"tuple[]\"}],\"name\":\"updateNodes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x60806040523480156200001157600080fd5b503380600081620000695760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200009c576200009c81620000a5565b50505062000150565b336001600160a01b03821603620000ff5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000060565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6125e680620001606000396000f3fe608060405234801561001057600080fd5b506004361061011b5760003560e01c806365c14dc7116100b2578063ae3c241c11610081578063c2d483a111610066578063c2d483a1146102d3578063ddbe4f82146102e6578063f2fde38b146102fb57600080fd5b8063ae3c241c146102ad578063b38e51f6146102c057600080fd5b806365c14dc71461023d57806379ba50971461025d5780638da5cb5b146102655780639cb7c5f41461028d57600080fd5b80631cdf6343116100ee5780631cdf6343146101af57806336b402fb146101c2578063398f37731461020a57806350c946fe1461021d57600080fd5b80630c5801e314610120578063117392ce146101355780631257001114610148578063181f5a7714610170575b600080fd5b61013361012e366004611b8d565b61030e565b005b610133610143366004611bf9565b61061f565b61015b610156366004611c11565b61086a565b60405190151581526020015b60405180910390f35b604080518082018252601881527f4361706162696c697479526567697374727920312e302e300000000000000000602082015290516101679190611c8e565b6101336101bd366004611ca1565b61087d565b6101fc6101d0366004611ce3565b604080516020808201949094528082019290925280518083038201815260609092019052805191012090565b604051908152602001610167565b610133610218366004611ca1565b610940565b61023061022b366004611c11565b610ad9565b6040516101679190611d05565b61025061024b366004611c11565b610ba1565b6040516101679190611d8b565b610133610c7e565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610167565b6102a061029b366004611c11565b610d7b565b6040516101679190611e6d565b6101336102bb366004611c11565b610e25565b6101336102ce366004611ca1565b610ef0565b6101336102e1366004611ca1565b61130c565b6102ee6116ed565b6040516101679190611e7b565b610133610309366004611eeb565b611832565b828114610356576040517fab8b67c600000000000000000000000000000000000000000000000000000000815260048101849052602481018290526044015b60405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff16905b8481101561061757600086868381811061038e5761038e611f08565b90506020020135905060008585848181106103ab576103ab611f08565b90506020028101906103bd9190611f37565b6103c69061203f565b805190915073ffffffffffffffffffffffffffffffffffffffff16610417576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff16331480159061045457503373ffffffffffffffffffffffffffffffffffffffff851614155b1561048b576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805160008381526007602052604090205473ffffffffffffffffffffffffffffffffffffffff908116911614158061053d57506020808201516040516104d19201611c8e565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120600086815260078352929092209192610524926001019101612158565b6040516020818303038152906040528051906020012014155b15610604578051600083815260076020908152604090912080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9093169290921782558201516001909101906105aa9082612247565b50806000015173ffffffffffffffffffffffffffffffffffffffff167f14c8f513e8a6d86d2d16b0cb64976de4e72386c4f8068eca3b7354373f8fe97a8383602001516040516105fb929190612361565b60405180910390a25b505080610610906123a9565b9050610372565b505050505050565b610627611846565b60408051823560208281019190915280840135828401528251808303840181526060909201909252805191012061065f6003826118c9565b15610696576040517fe288638f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006106a86080840160608501611eeb565b73ffffffffffffffffffffffffffffffffffffffff1614610813576106d36080830160608401611eeb565b73ffffffffffffffffffffffffffffffffffffffff163b15806107b357506107016080830160608401611eeb565b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f884efe6100000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff91909116906301ffc9a790602401602060405180830381865afa15801561078d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107b191906123e1565b155b15610813576107c86080830160608401611eeb565b6040517fabb5e3fd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff909116600482015260240161034d565b61081e6003826118e4565b50600081815260026020526040902082906108398282612403565b505060405181907f65610e5677eedff94555572640e442f89848a109ef8593fa927ac30b2565ff0690600090a25050565b60006108776005836118c9565b92915050565b610885611846565b60005b8181101561093b5760008383838181106108a4576108a4611f08565b60209081029290920135600081815260079093526040832080547fffffffffffffffffffffffff00000000000000000000000000000000000000001681559093509190506108f56001830182611aa7565b50506040518181527f1e5877d7b3001d1569bf733b76c7eceda58bd6c031e5b8d0b7042308ba2e9d4f9060200160405180910390a150610934816123a9565b9050610888565b505050565b610948611846565b60005b8181101561093b57600083838381811061096757610967611f08565b90506020028101906109799190611f37565b6109829061203f565b805190915073ffffffffffffffffffffffffffffffffffffffff166109d3576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600954604080518082018252835173ffffffffffffffffffffffffffffffffffffffff908116825260208086015181840190815260008681526007909252939020825181547fffffffffffffffffffffffff000000000000000000000000000000000000000016921691909117815591519091906001820190610a569082612247565b50905050600960008154610a69906123a9565b909155508151602083015160405173ffffffffffffffffffffffffffffffffffffffff909216917fda6697b182650034bd205cdc2dbfabb06bdb3a0a83a2b45bfefa3c4881284e0b91610abe91859190612361565b60405180910390a2505080610ad2906123a9565b905061094b565b6040805160808101825260008082526020820181905291810191909152606080820152600082815260086020908152604091829020825160808101845281548152600182015481840152600282015473ffffffffffffffffffffffffffffffffffffffff16818501526003820180548551818602810186019096528086529194929360608601939290830182828015610b9157602002820191906000526020600020905b815481526020019060010190808311610b7d575b5050505050815250509050919050565b6040805180820190915260008152606060208201526000828152600760209081526040918290208251808401909352805473ffffffffffffffffffffffffffffffffffffffff1683526001810180549192840191610bfe9061210b565b80601f0160208091040260200160405190810160405280929190818152602001828054610c2a9061210b565b8015610b915780601f10610c4c57610100808354040283529160200191610b91565b820191906000526020600020905b815481529060010190602001808311610c5a57505050919092525091949350505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610cff576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161034d565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b604080516080808201835260008083526020808401829052838501829052606084018290528582526002808252918590208551938401865280548452600180820154928501929092529182015493949293919284019160ff1690811115610de457610de4611dce565b6001811115610df557610df5611dce565b815260029190910154610100900473ffffffffffffffffffffffffffffffffffffffff1660209091015292915050565b610e2d611846565b610e386003826118c9565b610e71576040517fe181733f0000000000000000000000000000000000000000000000000000000081526004810182905260240161034d565b610e7c6005826118c9565b15610eb6576040517f16950d1d0000000000000000000000000000000000000000000000000000000081526004810182905260240161034d565b610ec16005826118e4565b5060405181907fdcea1b78b6ddc31592a94607d537543fcaafda6cc52d6d5cc7bbfca1422baf2190600090a250565b60005b8181101561093b576000838383818110610f0f57610f0f611f08565b9050602002810190610f219190612485565b610f2a906124b9565b90506000610f4d60005473ffffffffffffffffffffffffffffffffffffffff1690565b825160009081526007602090815260408083208151808301909252805473ffffffffffffffffffffffffffffffffffffffff90811683526001820180549690911633149650939491939092840191610fa49061210b565b80601f0160208091040260200160405190810160405280929190818152602001828054610fd09061210b565b801561101d5780601f10610ff25761010080835404028352916020019161101d565b820191906000526020600020905b81548152906001019060200180831161100057829003601f168201915b50505050508152505090508115801561104d5750805173ffffffffffffffffffffffffffffffffffffffff163314155b15611084576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808401516000908152600890915260409020600301541515806110dd5783602001516040517f64e2ee9200000000000000000000000000000000000000000000000000000000815260040161034d91815260200190565b604084015173ffffffffffffffffffffffffffffffffffffffff1661112e576040517f8377314600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8360600151516000036111735783606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161034d919061258e565b60005b846060015151811015611200576111b48560600151828151811061119c5761119c611f08565b602002602001015160036118c990919063ffffffff16565b6111f05784606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161034d919061258e565b6111f9816123a9565b9050611176565b506020848101805160009081526008835260409081902087518155915160018301558601516002820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055606086015180518793611285926003850192910190611ae1565b509050507f6bbba867c646be512c2f3241e65fdffdefd5528d7e7939649e06e10ee5addc3e8460200151856000015186604001516040516112ef93929190928352602083019190915273ffffffffffffffffffffffffffffffffffffffff16604082015260600190565b60405180910390a15050505080611305906123a9565b9050610ef3565b60005b8181101561093b57600083838381811061132b5761132b611f08565b905060200281019061133d9190612485565b611346906124b9565b9050600061136960005473ffffffffffffffffffffffffffffffffffffffff1690565b825160009081526007602090815260408083208151808301909252805473ffffffffffffffffffffffffffffffffffffffff908116835260018201805496909116331496509394919390928401916113c09061210b565b80601f01602080910402602001604051908101604052809291908181526020018280546113ec9061210b565b80156114395780601f1061140e57610100808354040283529160200191611439565b820191906000526020600020905b81548152906001019060200180831161141c57829003601f168201915b5050505050815250509050811580156114695750805173ffffffffffffffffffffffffffffffffffffffff163314155b156114a0576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602080840151600090815260089091526040902060030154151580806114c857506020840151155b156115075783602001516040517f64e2ee9200000000000000000000000000000000000000000000000000000000815260040161034d91815260200190565b604084015173ffffffffffffffffffffffffffffffffffffffff16611558576040517f8377314600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83606001515160000361159d5783606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161034d919061258e565b60005b846060015151811015611612576115c68560600151828151811061119c5761119c611f08565b6116025784606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161034d919061258e565b61160b816123a9565b90506115a0565b506020848101805160009081526008835260409081902087518155915160018301558601516002820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055606086015180518793611697926003850192910190611ae1565b505050602084810151855160408051928352928201527f5bfe8a52ad26ac6ee7b0cd46d2fd92be04735a31c45ef8aa3d4b7ea1b61bbc1f910160405180910390a150505050806116e6906123a9565b905061130f565b606060006116fb60036118f0565b9050600061170960056118fd565b825161171591906125c6565b67ffffffffffffffff81111561172d5761172d611f75565b60405190808252806020026020018201604052801561179d57816020015b6040805160808101825260008082526020808301829052928201819052606082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90920191018161174b5790505b5090506000805b83518110156118295760008482815181106117c1576117c1611f08565b602002602001015190506117df8160056118c990919063ffffffff16565b611818576117ec81610d7b565b8484815181106117fe576117fe611f08565b60200260200101819052508280611814906123a9565b9350505b50611822816123a9565b90506117a4565b50909392505050565b61183a611846565b61184381611907565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146118c7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161034d565b565b600081815260018301602052604081205415155b9392505050565b60006118dd83836119fc565b606060006118dd83611a4b565b6000610877825490565b3373ffffffffffffffffffffffffffffffffffffffff821603611986576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161034d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000818152600183016020526040812054611a4357508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610877565b506000610877565b606081600001805480602002602001604051908101604052809291908181526020018280548015611a9b57602002820191906000526020600020905b815481526020019060010190808311611a87575b50505050509050919050565b508054611ab39061210b565b6000825580601f10611ac3575050565b601f0160209004906000526020600020908101906118439190611b2c565b828054828255906000526020600020908101928215611b1c579160200282015b82811115611b1c578251825591602001919060010190611b01565b50611b28929150611b2c565b5090565b5b80821115611b285760008155600101611b2d565b60008083601f840112611b5357600080fd5b50813567ffffffffffffffff811115611b6b57600080fd5b6020830191508360208260051b8501011115611b8657600080fd5b9250929050565b60008060008060408587031215611ba357600080fd5b843567ffffffffffffffff80821115611bbb57600080fd5b611bc788838901611b41565b90965094506020870135915080821115611be057600080fd5b50611bed87828801611b41565b95989497509550505050565b600060808284031215611c0b57600080fd5b50919050565b600060208284031215611c2357600080fd5b5035919050565b6000815180845260005b81811015611c5057602081850181015186830182015201611c34565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006118dd6020830184611c2a565b60008060208385031215611cb457600080fd5b823567ffffffffffffffff811115611ccb57600080fd5b611cd785828601611b41565b90969095509350505050565b60008060408385031215611cf657600080fd5b50508035926020909101359150565b6000602080835260a0830184518285015281850151604085015273ffffffffffffffffffffffffffffffffffffffff6040860151166060850152606085015160808086015281815180845260c0870191508483019350600092505b80831015611d805783518252928401926001929092019190840190611d60565b509695505050505050565b6020815273ffffffffffffffffffffffffffffffffffffffff825116602082015260006020830151604080840152611dc66060840182611c2a565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8051825260208101516020830152604081015160028110611e47577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b604083015260609081015173ffffffffffffffffffffffffffffffffffffffff16910152565b608081016108778284611dfd565b6020808252825182820181905260009190848201906040850190845b81811015611ebd57611eaa838551611dfd565b9284019260809290920191600101611e97565b50909695505050505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461184357600080fd5b600060208284031215611efd57600080fd5b81356118dd81611ec9565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112611f6b57600080fd5b9190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715611fc757611fc7611f75565b60405290565b6040516080810167ffffffffffffffff81118282101715611fc757611fc7611f75565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561203757612037611f75565b604052919050565b60006040823603121561205157600080fd5b612059611fa4565b823561206481611ec9565b815260208381013567ffffffffffffffff8082111561208257600080fd5b9085019036601f83011261209557600080fd5b8135818111156120a7576120a7611f75565b6120d7847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611ff0565b915080825236848285010111156120ed57600080fd5b80848401858401376000908201840152918301919091525092915050565b600181811c9082168061211f57607f821691505b602082108103611c0b577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b600060208083526000845461216c8161210b565b8084870152604060018084166000811461218d57600181146121c5576121f3565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838a01528284151560051b8a010195506121f3565b896000528660002060005b858110156121eb5781548b82018601529083019088016121d0565b8a0184019650505b509398975050505050505050565b601f82111561093b57600081815260208120601f850160051c810160208610156122285750805b601f850160051c820191505b8181101561061757828155600101612234565b815167ffffffffffffffff81111561226157612261611f75565b6122758161226f845461210b565b84612201565b602080601f8311600181146122c857600084156122925750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610617565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015612315578886015182559484019460019091019084016122f6565b508582101561235157878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b828152604060208201526000611dc66040830184611c2a565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036123da576123da61237a565b5060010190565b6000602082840312156123f357600080fd5b815180151581146118dd57600080fd5b81358155602082013560018201556002810160408301356002811061242757600080fd5b8154606085013561243781611ec9565b74ffffffffffffffffffffffffffffffffffffffff008160081b1660ff84167fffffffffffffffffffffff000000000000000000000000000000000000000000841617178455505050505050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81833603018112611f6b57600080fd5b6000608082360312156124cb57600080fd5b6124d3611fcd565b823581526020808401358183015260408401356124ef81611ec9565b6040830152606084013567ffffffffffffffff8082111561250f57600080fd5b9085019036601f83011261252257600080fd5b81358181111561253457612534611f75565b8060051b9150612545848301611ff0565b818152918301840191848101903684111561255f57600080fd5b938501935b8385101561257d57843582529385019390850190612564565b606087015250939695505050505050565b6020808252825182820181905260009190848201906040850190845b81811015611ebd578351835292840192918401916001016125aa565b818103818111156108775761087761237a56fea164736f6c6343000813000a", + ABI: "[{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAlreadyDeprecated\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CapabilityAlreadyExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityDoesNotExist\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedConfigurationContract\",\"type\":\"address\"}],\"name\":\"InvalidCapabilityConfigurationContractInterface\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"hashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"name\":\"InvalidNodeCapabilities\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidNodeOperatorAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"}],\"name\":\"InvalidNodeP2PId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidNodeSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"lengthOne\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"lengthTwo\",\"type\":\"uint256\"}],\"name\":\"LengthMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"CapabilityDeprecated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"NodeOperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"name\":\"NodeOperatorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"}],\"name\":\"NodeRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"NodeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"capability\",\"type\":\"tuple\"}],\"name\":\"addCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"addNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node[]\",\"name\":\"nodes\",\"type\":\"tuple[]\"}],\"name\":\"addNodes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"deprecateCapability\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCapabilities\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedId\",\"type\":\"bytes32\"}],\"name\":\"getCapability\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"},{\"internalType\":\"enumCapabilityRegistry.CapabilityResponseType\",\"name\":\"responseType\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"configurationContract\",\"type\":\"address\"}],\"internalType\":\"structCapabilityRegistry.Capability\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"labelledName\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"version\",\"type\":\"bytes32\"}],\"name\":\"getHashedCapabilityId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"}],\"name\":\"getNode\",\"outputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"}],\"name\":\"getNodeOperator\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hashedCapabilityId\",\"type\":\"bytes32\"}],\"name\":\"isCapabilityDeprecated\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"}],\"name\":\"removeNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"removedNodeP2PIds\",\"type\":\"bytes32[]\"}],\"name\":\"removeNodes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"nodeOperatorIds\",\"type\":\"uint256[]\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"}],\"internalType\":\"structCapabilityRegistry.NodeOperator[]\",\"name\":\"nodeOperators\",\"type\":\"tuple[]\"}],\"name\":\"updateNodeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"nodeOperatorId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"p2pId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"supportedHashedCapabilityIds\",\"type\":\"bytes32[]\"}],\"internalType\":\"structCapabilityRegistry.Node[]\",\"name\":\"nodes\",\"type\":\"tuple[]\"}],\"name\":\"updateNodes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b503380600081620000695760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200009c576200009c81620000a5565b50505062000150565b336001600160a01b03821603620000ff5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000060565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b61293380620001606000396000f3fe608060405234801561001057600080fd5b50600436106101365760003560e01c806365c14dc7116100b2578063ae3c241c11610081578063c2d483a111610066578063c2d483a114610301578063ddbe4f8214610314578063f2fde38b1461032957600080fd5b8063ae3c241c146102db578063b38e51f6146102ee57600080fd5b806365c14dc71461026b57806379ba50971461028b5780638da5cb5b146102935780639cb7c5f4146102bb57600080fd5b80631cdf63431161010957806336b402fb116100ee57806336b402fb146101f0578063398f37731461023857806350c946fe1461024b57600080fd5b80631cdf6343146101ca5780632c01a1e8146101dd57600080fd5b80630c5801e31461013b578063117392ce146101505780631257001114610163578063181f5a771461018b575b600080fd5b61014e610149366004611eda565b61033c565b005b61014e61015e366004611f46565b61064d565b610176610171366004611f5e565b610898565b60405190151581526020015b60405180910390f35b604080518082018252601881527f4361706162696c697479526567697374727920312e302e300000000000000000602082015290516101829190611fdb565b61014e6101d8366004611fee565b6108ab565b61014e6101eb366004611fee565b61096e565b61022a6101fe366004612030565b604080516020808201949094528082019290925280518083038201815260609092019052805191012090565b604051908152602001610182565b61014e610246366004611fee565b610c6f565b61025e610259366004611f5e565b610e08565b6040516101829190612052565b61027e610279366004611f5e565b610ed0565b60405161018291906120d8565b61014e610fad565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610182565b6102ce6102c9366004611f5e565b6110aa565b60405161018291906121ba565b61014e6102e9366004611f5e565b611154565b61014e6102fc366004611fee565b61121f565b61014e61030f366004611fee565b61163b565b61031c611a1c565b60405161018291906121c8565b61014e610337366004612238565b611b61565b828114610384576040517fab8b67c600000000000000000000000000000000000000000000000000000000815260048101849052602481018290526044015b60405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff16905b848110156106455760008686838181106103bc576103bc612255565b90506020020135905060008585848181106103d9576103d9612255565b90506020028101906103eb9190612284565b6103f49061238c565b805190915073ffffffffffffffffffffffffffffffffffffffff16610445576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff16331480159061048257503373ffffffffffffffffffffffffffffffffffffffff851614155b156104b9576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805160008381526007602052604090205473ffffffffffffffffffffffffffffffffffffffff908116911614158061056b57506020808201516040516104ff9201611fdb565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201206000868152600783529290922091926105529260010191016124a5565b6040516020818303038152906040528051906020012014155b15610632578051600083815260076020908152604090912080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9093169290921782558201516001909101906105d89082612594565b50806000015173ffffffffffffffffffffffffffffffffffffffff167f14c8f513e8a6d86d2d16b0cb64976de4e72386c4f8068eca3b7354373f8fe97a8383602001516040516106299291906126ae565b60405180910390a25b50508061063e906126f6565b90506103a0565b505050505050565b610655611b75565b60408051823560208281019190915280840135828401528251808303840181526060909201909252805191012061068d600382611bf8565b156106c4576040517fe288638f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006106d66080840160608501612238565b73ffffffffffffffffffffffffffffffffffffffff1614610841576107016080830160608401612238565b73ffffffffffffffffffffffffffffffffffffffff163b15806107e1575061072f6080830160608401612238565b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f884efe6100000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff91909116906301ffc9a790602401602060405180830381865afa1580156107bb573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107df919061272e565b155b15610841576107f66080830160608401612238565b6040517fabb5e3fd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff909116600482015260240161037b565b61084c600382611c13565b50600081815260026020526040902082906108678282612750565b505060405181907f65610e5677eedff94555572640e442f89848a109ef8593fa927ac30b2565ff0690600090a25050565b60006108a5600583611bf8565b92915050565b6108b3611b75565b60005b818110156109695760008383838181106108d2576108d2612255565b60209081029290920135600081815260079093526040832080547fffffffffffffffffffffffff00000000000000000000000000000000000000001681559093509190506109236001830182611dd6565b50506040518181527f1e5877d7b3001d1569bf733b76c7eceda58bd6c031e5b8d0b7042308ba2e9d4f9060200160405180910390a150610962816126f6565b90506108b6565b505050565b6000805473ffffffffffffffffffffffffffffffffffffffff163314905b82811015610c695760008484838181106109a8576109a8612255565b602090810292909201356000818152600884526040808220815160808101835281548152600182015481880152600282015473ffffffffffffffffffffffffffffffffffffffff16818401526003820180548451818a0281018a0190955280855295985093969095509093606086019391830182828015610a4857602002820191906000526020600020905b815481526020019060010190808311610a34575b50505091909252505050600083815260086020526040902060030154909150151580610aa3576040517f64e2ee920000000000000000000000000000000000000000000000000000000081526004810184905260240161037b565b815160009081526007602090815260408083208151808301909252805473ffffffffffffffffffffffffffffffffffffffff1682526001810180549293919291840191610aef90612458565b80601f0160208091040260200160405190810160405280929190818152602001828054610b1b90612458565b8015610b685780601f10610b3d57610100808354040283529160200191610b68565b820191906000526020600020905b815481529060010190602001808311610b4b57829003601f168201915b505050505081525050905085158015610b985750805173ffffffffffffffffffffffffffffffffffffffff163314155b15610bcf576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000848152600860205260408120818155600181018290556002810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905590610c1f6003830182611e10565b50506040518481527f5254e609a97bab37b7cc79fe128f85c097bd6015c6e1624ae0ba392eb97532059060200160405180910390a15050505080610c62906126f6565b905061098c565b50505050565b610c77611b75565b60005b81811015610969576000838383818110610c9657610c96612255565b9050602002810190610ca89190612284565b610cb19061238c565b805190915073ffffffffffffffffffffffffffffffffffffffff16610d02576040517feeacd93900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600954604080518082018252835173ffffffffffffffffffffffffffffffffffffffff908116825260208086015181840190815260008681526007909252939020825181547fffffffffffffffffffffffff000000000000000000000000000000000000000016921691909117815591519091906001820190610d859082612594565b50905050600960008154610d98906126f6565b909155508151602083015160405173ffffffffffffffffffffffffffffffffffffffff909216917fda6697b182650034bd205cdc2dbfabb06bdb3a0a83a2b45bfefa3c4881284e0b91610ded918591906126ae565b60405180910390a2505080610e01906126f6565b9050610c7a565b6040805160808101825260008082526020820181905291810191909152606080820152600082815260086020908152604091829020825160808101845281548152600182015481840152600282015473ffffffffffffffffffffffffffffffffffffffff16818501526003820180548551818602810186019096528086529194929360608601939290830182828015610ec057602002820191906000526020600020905b815481526020019060010190808311610eac575b5050505050815250509050919050565b6040805180820190915260008152606060208201526000828152600760209081526040918290208251808401909352805473ffffffffffffffffffffffffffffffffffffffff1683526001810180549192840191610f2d90612458565b80601f0160208091040260200160405190810160405280929190818152602001828054610f5990612458565b8015610ec05780601f10610f7b57610100808354040283529160200191610ec0565b820191906000526020600020905b815481529060010190602001808311610f8957505050919092525091949350505050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461102e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161037b565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b604080516080808201835260008083526020808401829052838501829052606084018290528582526002808252918590208551938401865280548452600180820154928501929092529182015493949293919284019160ff16908111156111135761111361211b565b60018111156111245761112461211b565b815260029190910154610100900473ffffffffffffffffffffffffffffffffffffffff1660209091015292915050565b61115c611b75565b611167600382611bf8565b6111a0576040517fe181733f0000000000000000000000000000000000000000000000000000000081526004810182905260240161037b565b6111ab600582611bf8565b156111e5576040517f16950d1d0000000000000000000000000000000000000000000000000000000081526004810182905260240161037b565b6111f0600582611c13565b5060405181907fdcea1b78b6ddc31592a94607d537543fcaafda6cc52d6d5cc7bbfca1422baf2190600090a250565b60005b8181101561096957600083838381811061123e5761123e612255565b905060200281019061125091906127d2565b61125990612806565b9050600061127c60005473ffffffffffffffffffffffffffffffffffffffff1690565b825160009081526007602090815260408083208151808301909252805473ffffffffffffffffffffffffffffffffffffffff908116835260018201805496909116331496509394919390928401916112d390612458565b80601f01602080910402602001604051908101604052809291908181526020018280546112ff90612458565b801561134c5780601f106113215761010080835404028352916020019161134c565b820191906000526020600020905b81548152906001019060200180831161132f57829003601f168201915b50505050508152505090508115801561137c5750805173ffffffffffffffffffffffffffffffffffffffff163314155b156113b3576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208084015160009081526008909152604090206003015415158061140c5783602001516040517f64e2ee9200000000000000000000000000000000000000000000000000000000815260040161037b91815260200190565b604084015173ffffffffffffffffffffffffffffffffffffffff1661145d576040517f8377314600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8360600151516000036114a25783606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161037b91906128db565b60005b84606001515181101561152f576114e3856060015182815181106114cb576114cb612255565b60200260200101516003611bf890919063ffffffff16565b61151f5784606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161037b91906128db565b611528816126f6565b90506114a5565b506020848101805160009081526008835260409081902087518155915160018301558601516002820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790556060860151805187936115b4926003850192910190611e2e565b509050507f6bbba867c646be512c2f3241e65fdffdefd5528d7e7939649e06e10ee5addc3e84602001518560000151866040015160405161161e93929190928352602083019190915273ffffffffffffffffffffffffffffffffffffffff16604082015260600190565b60405180910390a15050505080611634906126f6565b9050611222565b60005b8181101561096957600083838381811061165a5761165a612255565b905060200281019061166c91906127d2565b61167590612806565b9050600061169860005473ffffffffffffffffffffffffffffffffffffffff1690565b825160009081526007602090815260408083208151808301909252805473ffffffffffffffffffffffffffffffffffffffff908116835260018201805496909116331496509394919390928401916116ef90612458565b80601f016020809104026020016040519081016040528092919081815260200182805461171b90612458565b80156117685780601f1061173d57610100808354040283529160200191611768565b820191906000526020600020905b81548152906001019060200180831161174b57829003601f168201915b5050505050815250509050811580156117985750805173ffffffffffffffffffffffffffffffffffffffff163314155b156117cf576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602080840151600090815260089091526040902060030154151580806117f757506020840151155b156118365783602001516040517f64e2ee9200000000000000000000000000000000000000000000000000000000815260040161037b91815260200190565b604084015173ffffffffffffffffffffffffffffffffffffffff16611887576040517f8377314600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8360600151516000036118cc5783606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161037b91906128db565b60005b846060015151811015611941576118f5856060015182815181106114cb576114cb612255565b6119315784606001516040517f3748d4c600000000000000000000000000000000000000000000000000000000815260040161037b91906128db565b61193a816126f6565b90506118cf565b506020848101805160009081526008835260409081902087518155915160018301558601516002820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790556060860151805187936119c6926003850192910190611e2e565b505050602084810151855160408051928352928201527f5bfe8a52ad26ac6ee7b0cd46d2fd92be04735a31c45ef8aa3d4b7ea1b61bbc1f910160405180910390a15050505080611a15906126f6565b905061163e565b60606000611a2a6003611c1f565b90506000611a386005611c2c565b8251611a449190612913565b67ffffffffffffffff811115611a5c57611a5c6122c2565b604051908082528060200260200182016040528015611acc57816020015b6040805160808101825260008082526020808301829052928201819052606082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181611a7a5790505b5090506000805b8351811015611b58576000848281518110611af057611af0612255565b60200260200101519050611b0e816005611bf890919063ffffffff16565b611b4757611b1b816110aa565b848481518110611b2d57611b2d612255565b60200260200101819052508280611b43906126f6565b9350505b50611b51816126f6565b9050611ad3565b50909392505050565b611b69611b75565b611b7281611c36565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314611bf6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161037b565b565b600081815260018301602052604081205415155b9392505050565b6000611c0c8383611d2b565b60606000611c0c83611d7a565b60006108a5825490565b3373ffffffffffffffffffffffffffffffffffffffff821603611cb5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161037b565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000818152600183016020526040812054611d72575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556108a5565b5060006108a5565b606081600001805480602002602001604051908101604052809291908181526020018280548015611dca57602002820191906000526020600020905b815481526020019060010190808311611db6575b50505050509050919050565b508054611de290612458565b6000825580601f10611df2575050565b601f016020900490600052602060002090810190611b729190611e79565b5080546000825590600052602060002090810190611b729190611e79565b828054828255906000526020600020908101928215611e69579160200282015b82811115611e69578251825591602001919060010190611e4e565b50611e75929150611e79565b5090565b5b80821115611e755760008155600101611e7a565b60008083601f840112611ea057600080fd5b50813567ffffffffffffffff811115611eb857600080fd5b6020830191508360208260051b8501011115611ed357600080fd5b9250929050565b60008060008060408587031215611ef057600080fd5b843567ffffffffffffffff80821115611f0857600080fd5b611f1488838901611e8e565b90965094506020870135915080821115611f2d57600080fd5b50611f3a87828801611e8e565b95989497509550505050565b600060808284031215611f5857600080fd5b50919050565b600060208284031215611f7057600080fd5b5035919050565b6000815180845260005b81811015611f9d57602081850181015186830182015201611f81565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000611c0c6020830184611f77565b6000806020838503121561200157600080fd5b823567ffffffffffffffff81111561201857600080fd5b61202485828601611e8e565b90969095509350505050565b6000806040838503121561204357600080fd5b50508035926020909101359150565b6000602080835260a0830184518285015281850151604085015273ffffffffffffffffffffffffffffffffffffffff6040860151166060850152606085015160808086015281815180845260c0870191508483019350600092505b808310156120cd57835182529284019260019290920191908401906120ad565b509695505050505050565b6020815273ffffffffffffffffffffffffffffffffffffffff8251166020820152600060208301516040808401526121136060840182611f77565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8051825260208101516020830152604081015160028110612194577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b604083015260609081015173ffffffffffffffffffffffffffffffffffffffff16910152565b608081016108a5828461214a565b6020808252825182820181905260009190848201906040850190845b8181101561220a576121f783855161214a565b92840192608092909201916001016121e4565b50909695505050505050565b73ffffffffffffffffffffffffffffffffffffffff81168114611b7257600080fd5b60006020828403121561224a57600080fd5b8135611c0c81612216565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc18336030181126122b857600080fd5b9190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715612314576123146122c2565b60405290565b6040516080810167ffffffffffffffff81118282101715612314576123146122c2565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715612384576123846122c2565b604052919050565b60006040823603121561239e57600080fd5b6123a66122f1565b82356123b181612216565b815260208381013567ffffffffffffffff808211156123cf57600080fd5b9085019036601f8301126123e257600080fd5b8135818111156123f4576123f46122c2565b612424847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161233d565b9150808252368482850101111561243a57600080fd5b80848401858401376000908201840152918301919091525092915050565b600181811c9082168061246c57607f821691505b602082108103611f58577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b60006020808352600084546124b981612458565b808487015260406001808416600081146124da576001811461251257612540565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838a01528284151560051b8a01019550612540565b896000528660002060005b858110156125385781548b820186015290830190880161251d565b8a0184019650505b509398975050505050505050565b601f82111561096957600081815260208120601f850160051c810160208610156125755750805b601f850160051c820191505b8181101561064557828155600101612581565b815167ffffffffffffffff8111156125ae576125ae6122c2565b6125c2816125bc8454612458565b8461254e565b602080601f83116001811461261557600084156125df5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610645565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b8281101561266257888601518255948401946001909101908401612643565b508582101561269e57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8281526040602082015260006121136040830184611f77565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203612727576127276126c7565b5060010190565b60006020828403121561274057600080fd5b81518015158114611c0c57600080fd5b81358155602082013560018201556002810160408301356002811061277457600080fd5b8154606085013561278481612216565b74ffffffffffffffffffffffffffffffffffffffff008160081b1660ff84167fffffffffffffffffffffff000000000000000000000000000000000000000000841617178455505050505050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff818336030181126122b857600080fd5b60006080823603121561281857600080fd5b61282061231a565b8235815260208084013581830152604084013561283c81612216565b6040830152606084013567ffffffffffffffff8082111561285c57600080fd5b9085019036601f83011261286f57600080fd5b813581811115612881576128816122c2565b8060051b915061289284830161233d565b81815291830184019184810190368411156128ac57600080fd5b938501935b838510156128ca578435825293850193908501906128b1565b606087015250939695505050505050565b6020808252825182820181905260009190848201906040850190845b8181101561220a578351835292840192918401916001016128f7565b818103818111156108a5576108a56126c756fea164736f6c6343000813000a", } var CapabilityRegistryABI = CapabilityRegistryMetaData.ABI @@ -438,6 +438,18 @@ func (_CapabilityRegistry *CapabilityRegistryTransactorSession) RemoveNodeOperat return _CapabilityRegistry.Contract.RemoveNodeOperators(&_CapabilityRegistry.TransactOpts, nodeOperatorIds) } +func (_CapabilityRegistry *CapabilityRegistryTransactor) RemoveNodes(opts *bind.TransactOpts, removedNodeP2PIds [][32]byte) (*types.Transaction, error) { + return _CapabilityRegistry.contract.Transact(opts, "removeNodes", removedNodeP2PIds) +} + +func (_CapabilityRegistry *CapabilityRegistrySession) RemoveNodes(removedNodeP2PIds [][32]byte) (*types.Transaction, error) { + return _CapabilityRegistry.Contract.RemoveNodes(&_CapabilityRegistry.TransactOpts, removedNodeP2PIds) +} + +func (_CapabilityRegistry *CapabilityRegistryTransactorSession) RemoveNodes(removedNodeP2PIds [][32]byte) (*types.Transaction, error) { + return _CapabilityRegistry.Contract.RemoveNodes(&_CapabilityRegistry.TransactOpts, removedNodeP2PIds) +} + func (_CapabilityRegistry *CapabilityRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { return _CapabilityRegistry.contract.Transact(opts, "transferOwnership", to) } @@ -1221,6 +1233,123 @@ func (_CapabilityRegistry *CapabilityRegistryFilterer) ParseNodeOperatorUpdated( return event, nil } +type CapabilityRegistryNodeRemovedIterator struct { + Event *CapabilityRegistryNodeRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CapabilityRegistryNodeRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CapabilityRegistryNodeRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CapabilityRegistryNodeRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CapabilityRegistryNodeRemovedIterator) Error() error { + return it.fail +} + +func (it *CapabilityRegistryNodeRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CapabilityRegistryNodeRemoved struct { + P2pId [32]byte + Raw types.Log +} + +func (_CapabilityRegistry *CapabilityRegistryFilterer) FilterNodeRemoved(opts *bind.FilterOpts) (*CapabilityRegistryNodeRemovedIterator, error) { + + logs, sub, err := _CapabilityRegistry.contract.FilterLogs(opts, "NodeRemoved") + if err != nil { + return nil, err + } + return &CapabilityRegistryNodeRemovedIterator{contract: _CapabilityRegistry.contract, event: "NodeRemoved", logs: logs, sub: sub}, nil +} + +func (_CapabilityRegistry *CapabilityRegistryFilterer) WatchNodeRemoved(opts *bind.WatchOpts, sink chan<- *CapabilityRegistryNodeRemoved) (event.Subscription, error) { + + logs, sub, err := _CapabilityRegistry.contract.WatchLogs(opts, "NodeRemoved") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CapabilityRegistryNodeRemoved) + if err := _CapabilityRegistry.contract.UnpackLog(event, "NodeRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CapabilityRegistry *CapabilityRegistryFilterer) ParseNodeRemoved(log types.Log) (*CapabilityRegistryNodeRemoved, error) { + event := new(CapabilityRegistryNodeRemoved) + if err := _CapabilityRegistry.contract.UnpackLog(event, "NodeRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + type CapabilityRegistryNodeUpdatedIterator struct { Event *CapabilityRegistryNodeUpdated @@ -1626,6 +1755,8 @@ func (_CapabilityRegistry *CapabilityRegistry) ParseLog(log types.Log) (generate return _CapabilityRegistry.ParseNodeOperatorRemoved(log) case _CapabilityRegistry.abi.Events["NodeOperatorUpdated"].ID: return _CapabilityRegistry.ParseNodeOperatorUpdated(log) + case _CapabilityRegistry.abi.Events["NodeRemoved"].ID: + return _CapabilityRegistry.ParseNodeRemoved(log) case _CapabilityRegistry.abi.Events["NodeUpdated"].ID: return _CapabilityRegistry.ParseNodeUpdated(log) case _CapabilityRegistry.abi.Events["OwnershipTransferRequested"].ID: @@ -1662,6 +1793,10 @@ func (CapabilityRegistryNodeOperatorUpdated) Topic() common.Hash { return common.HexToHash("0x14c8f513e8a6d86d2d16b0cb64976de4e72386c4f8068eca3b7354373f8fe97a") } +func (CapabilityRegistryNodeRemoved) Topic() common.Hash { + return common.HexToHash("0x5254e609a97bab37b7cc79fe128f85c097bd6015c6e1624ae0ba392eb9753205") +} + func (CapabilityRegistryNodeUpdated) Topic() common.Hash { return common.HexToHash("0x6bbba867c646be512c2f3241e65fdffdefd5528d7e7939649e06e10ee5addc3e") } @@ -1707,6 +1842,8 @@ type CapabilityRegistryInterface interface { RemoveNodeOperators(opts *bind.TransactOpts, nodeOperatorIds []*big.Int) (*types.Transaction, error) + RemoveNodes(opts *bind.TransactOpts, removedNodeP2PIds [][32]byte) (*types.Transaction, error) + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) UpdateNodeOperators(opts *bind.TransactOpts, nodeOperatorIds []*big.Int, nodeOperators []CapabilityRegistryNodeOperator) (*types.Transaction, error) @@ -1749,6 +1886,12 @@ type CapabilityRegistryInterface interface { ParseNodeOperatorUpdated(log types.Log) (*CapabilityRegistryNodeOperatorUpdated, error) + FilterNodeRemoved(opts *bind.FilterOpts) (*CapabilityRegistryNodeRemovedIterator, error) + + WatchNodeRemoved(opts *bind.WatchOpts, sink chan<- *CapabilityRegistryNodeRemoved) (event.Subscription, error) + + ParseNodeRemoved(log types.Log) (*CapabilityRegistryNodeRemoved, error) + FilterNodeUpdated(opts *bind.FilterOpts) (*CapabilityRegistryNodeUpdatedIterator, error) WatchNodeUpdated(opts *bind.WatchOpts, sink chan<- *CapabilityRegistryNodeUpdated) (event.Subscription, error) diff --git a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt index e2bb9865809..f5b47e58913 100644 --- a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -1,4 +1,4 @@ GETH_VERSION: 1.13.8 forwarder: ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin b4c900aae9e022f01abbac7993d41f93912247613ac6270b0c4da4ef6f2016e3 -keystone_capability_registry: ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.abi ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.bin 98d53a1997053a3037827ffd170c12f49d2005a5c266a1ea9eb69bb51e862f37 +keystone_capability_registry: ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.abi ../../../contracts/solc/v0.8.19/CapabilityRegistry/CapabilityRegistry.bin 878d2b539e07962af90e8c283fa5a90a15b5b59ddbc0854a137a3be621f0afcd ocr3_capability: ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.abi ../../../contracts/solc/v0.8.19/OCR3Capability/OCR3Capability.bin 9dcbdf55bd5729ba266148da3f17733eb592c871c2108ccca546618628fd9ad2