diff --git a/.github/actions/build-test-image/action.yml b/.github/actions/build-test-image/action.yml index 0cd1364cbeb..b3c0a6bb8d3 100644 --- a/.github/actions/build-test-image/action.yml +++ b/.github/actions/build-test-image/action.yml @@ -34,7 +34,7 @@ runs: # Base Test Image Logic - name: Get CTF Version id: version - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@b6e5189ee90d0246c915da35c020c5822f233d42 # v2.3.13 with: go-project-path: ./integration-tests module-name: github.com/smartcontractkit/chainlink-testing-framework diff --git a/.github/workflows/automation-benchmark-tests.yml b/.github/workflows/automation-benchmark-tests.yml index 1faf8cea969..4078322b232 100644 --- a/.github/workflows/automation-benchmark-tests.yml +++ b/.github/workflows/automation-benchmark-tests.yml @@ -48,7 +48,7 @@ jobs: - name: Parse base64 config uses: ./.github/actions/setup-parse-base64-config with: - base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} - name: Send details to Step Summary shell: bash run: | @@ -57,7 +57,7 @@ jobs: echo "### chainlink-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY - echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY - name: Build Test Image uses: ./.github/actions/build-test-image with: @@ -75,6 +75,8 @@ jobs: INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com TEST_TYPE: ${{ github.event.inputs.testType }} TEST_TEST_TYPE: ${{ github.event.inputs.testType }} + RR_MEM: 4Gi + TEST_LOG_LEVEL: info with: test_command_to_run: cd integration-tests && go test -timeout 30m -v -run ^TestAutomationBenchmark$ ./benchmark -count=1 test_download_vendor_packages_command: make gomod diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1f121b635fb..992a7ad1f50 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -71,7 +71,7 @@ jobs: echo "should-enforce=$SHOULD_ENFORCE" >> $GITHUB_OUTPUT - name: Enforce CTF Version if: steps.condition-check.outputs.should-enforce == 'true' - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@e29366cdecfe6befff9ab8c3cfe4825218505d58 # v2.3.16 with: go-project-path: ./integration-tests module-name: github.com/smartcontractkit/chainlink-testing-framework @@ -468,7 +468,7 @@ jobs: with: name: cl-node-coverage-data-${{ matrix.product.name }} path: .covdata - retention-days: 1 + retention-days: 1 eth-smoke-tests-matrix: if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} @@ -796,7 +796,7 @@ jobs: # Run the setup if the matrix finishes but this time save the cache if we have a cache hit miss # this will also only run if both of the matrix jobs pass eth-smoke-go-mod-cache: - + environment: integration needs: [eth-smoke-tests] runs-on: ubuntu-latest @@ -917,7 +917,7 @@ jobs: with: name: cl-node-coverage-data-migration-tests path: .covdata - retention-days: 1 + retention-days: 1 - name: Notify Slack if: failure() && github.event_name != 'workflow_dispatch' uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 @@ -1210,12 +1210,12 @@ jobs: QA_KUBECONFIG: "" run_setup: false go_coverage_src_dir: /var/tmp/go-coverage - go_coverage_dest_dir: ${{ github.workspace }}/.covdata + go_coverage_dest_dir: ${{ github.workspace }}/.covdata - name: Upload Coverage Data uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: cl-node-coverage-data-solana-tests path: .covdata - retention-days: 1 + retention-days: 1 diff --git a/.gitignore b/.gitignore index ca388c85c2b..bcf51bea35b 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,11 @@ tmp-manifest-*.yaml ztarrepo.tar.gz **/test-ledger/* __debug_bin* +.test_summary/ +.run.id +integration-tests/**/traces/ +benchmark_report.csv +benchmark_summary.json # goreleaser builds cosign.* @@ -87,10 +92,6 @@ go.work* # This sometimes shows up for some reason tools/flakeytests/coverage.txt -# Integration tests create these files -.test_summary/ -.run.id - # Fuzz tests can create these files **/testdata/fuzz/* diff --git a/integration-tests/actions/automation_ocr_helpers.go b/integration-tests/actions/automation_ocr_helpers.go index 53fb92499eb..9ee14d4a6a1 100644 --- a/integration-tests/actions/automation_ocr_helpers.go +++ b/integration-tests/actions/automation_ocr_helpers.go @@ -193,8 +193,10 @@ func CreateOCRKeeperJobs( contractVersion = "v2.1+" } else if registryVersion == ethereum.RegistryVersion_2_1 { contractVersion = "v2.1" + } else if registryVersion == ethereum.RegistryVersion_2_0 { + contractVersion = "v2.0" } else { - require.FailNow(t, "v2.0, v2.1, and v2.2 are the only supported versions") + require.FailNow(t, fmt.Sprintf("v2.0, v2.1, and v2.2 are the only supported versions, but got something else: %v (iota)", registryVersion)) } bootstrapSpec := &client.OCR2TaskJobSpec{ diff --git a/integration-tests/actions/automationv2/actions.go b/integration-tests/actions/automationv2/actions.go index 4ce56d9b870..25e6a4a7a06 100644 --- a/integration-tests/actions/automationv2/actions.go +++ b/integration-tests/actions/automationv2/actions.go @@ -1,6 +1,7 @@ package automationv2 import ( + "context" "crypto/ed25519" "encoding/hex" "encoding/json" @@ -14,10 +15,12 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/lib/pq" + "github.com/rs/zerolog" "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" ocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" ocr3 "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" "gopkg.in/guregu/null.v4" @@ -25,9 +28,7 @@ import ( ocr2keepers20config "github.com/smartcontractkit/chainlink-automation/pkg/v2/config" ocr2keepers30config "github.com/smartcontractkit/chainlink-automation/pkg/v3/config" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" - "github.com/smartcontractkit/chainlink-testing-framework/logging" - + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_registrar_wrapper2_1" @@ -40,7 +41,9 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/store/models" + ctf_concurrency "github.com/smartcontractkit/chainlink-testing-framework/concurrency" ctfTestEnv "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/logging" ) type NodeDetails struct { @@ -53,8 +56,7 @@ type NodeDetails struct { } type AutomationTest struct { - ChainClient blockchain.EVMClient - Deployer contracts.ContractDeployer + ChainClient *seth.Client LinkToken contracts.LinkToken Transcoder contracts.UpkeepTranscoder @@ -82,6 +84,7 @@ type AutomationTest struct { mercuryCredentialName string TransmitterKeyIndex int + Logger zerolog.Logger useLogBufferV1 bool } @@ -99,36 +102,36 @@ type UpkeepConfig struct { } func NewAutomationTestK8s( - chainClient blockchain.EVMClient, - deployer contracts.ContractDeployer, + l zerolog.Logger, + chainClient *seth.Client, chainlinkNodes []*client.ChainlinkK8sClient, ) *AutomationTest { return &AutomationTest{ ChainClient: chainClient, - Deployer: deployer, ChainlinkNodesk8s: chainlinkNodes, IsOnk8s: true, TransmitterKeyIndex: 0, - UpkeepPrivilegeManager: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + UpkeepPrivilegeManager: chainClient.MustGetRootKeyAddress(), mercuryCredentialName: "", useLogBufferV1: false, + Logger: l, } } func NewAutomationTestDocker( - chainClient blockchain.EVMClient, - deployer contracts.ContractDeployer, + l zerolog.Logger, + chainClient *seth.Client, chainlinkNodes []*client.ChainlinkClient, ) *AutomationTest { return &AutomationTest{ ChainClient: chainClient, - Deployer: deployer, ChainlinkNodes: chainlinkNodes, IsOnk8s: false, TransmitterKeyIndex: 0, - UpkeepPrivilegeManager: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + UpkeepPrivilegeManager: chainClient.MustGetRootKeyAddress(), mercuryCredentialName: "", useLogBufferV1: false, + Logger: l, } } @@ -157,20 +160,16 @@ func (a *AutomationTest) SetDockerEnv(env *test_env.CLClusterTestEnv) { } func (a *AutomationTest) DeployLINK() error { - linkToken, err := a.Deployer.DeployLinkTokenContract() + linkToken, err := contracts.DeployLinkTokenContract(a.Logger, a.ChainClient) if err != nil { return err } a.LinkToken = linkToken - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for link token contract to deploy")) - } return nil } func (a *AutomationTest) LoadLINK(address string) error { - linkToken, err := a.Deployer.LoadLinkToken(common.HexToAddress(address)) + linkToken, err := contracts.LoadLinkTokenContract(a.Logger, a.ChainClient, common.HexToAddress(address)) if err != nil { return err } @@ -179,20 +178,16 @@ func (a *AutomationTest) LoadLINK(address string) error { } func (a *AutomationTest) DeployTranscoder() error { - transcoder, err := a.Deployer.DeployUpkeepTranscoder() + transcoder, err := contracts.DeployUpkeepTranscoder(a.ChainClient) if err != nil { return err } a.Transcoder = transcoder - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for transcoder contract to deploy")) - } return nil } func (a *AutomationTest) LoadTranscoder(address string) error { - transcoder, err := a.Deployer.LoadUpkeepTranscoder(common.HexToAddress(address)) + transcoder, err := contracts.LoadUpkeepTranscoder(a.ChainClient, common.HexToAddress(address)) if err != nil { return err } @@ -201,20 +196,16 @@ func (a *AutomationTest) LoadTranscoder(address string) error { } func (a *AutomationTest) DeployEthLinkFeed() error { - ethLinkFeed, err := a.Deployer.DeployMockETHLINKFeed(a.RegistrySettings.FallbackLinkPrice) + ethLinkFeed, err := contracts.DeployMockETHLINKFeed(a.ChainClient, a.RegistrySettings.FallbackLinkPrice) if err != nil { return err } a.EthLinkFeed = ethLinkFeed - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for Mock ETH LINK feed to deploy")) - } return nil } func (a *AutomationTest) LoadEthLinkFeed(address string) error { - ethLinkFeed, err := a.Deployer.LoadETHLINKFeed(common.HexToAddress(address)) + ethLinkFeed, err := contracts.LoadMockETHLINKFeed(a.ChainClient, common.HexToAddress(address)) if err != nil { return err } @@ -223,20 +214,16 @@ func (a *AutomationTest) LoadEthLinkFeed(address string) error { } func (a *AutomationTest) DeployGasFeed() error { - gasFeed, err := a.Deployer.DeployMockGasFeed(a.RegistrySettings.FallbackGasPrice) + gasFeed, err := contracts.DeployMockGASFeed(a.ChainClient, a.RegistrySettings.FallbackGasPrice) if err != nil { return err } a.GasFeed = gasFeed - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for mock gas feed to deploy")) - } return nil } func (a *AutomationTest) LoadEthGasFeed(address string) error { - gasFeed, err := a.Deployer.LoadGasFeed(common.HexToAddress(address)) + gasFeed, err := contracts.LoadMockGASFeed(a.ChainClient, common.HexToAddress(address)) if err != nil { return err } @@ -254,20 +241,16 @@ func (a *AutomationTest) DeployRegistry() error { RegistrarAddr: utils.ZeroAddress.Hex(), Settings: a.RegistrySettings, } - registry, err := a.Deployer.DeployKeeperRegistry(registryOpts) + registry, err := contracts.DeployKeeperRegistry(a.ChainClient, registryOpts) if err != nil { return err } a.Registry = registry - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for registry contract to deploy")) - } return nil } func (a *AutomationTest) LoadRegistry(address string) error { - registry, err := a.Deployer.LoadKeeperRegistry(common.HexToAddress(address), a.RegistrySettings.RegistryVersion) + registry, err := contracts.LoadKeeperRegistry(a.Logger, a.ChainClient, common.HexToAddress(address), a.RegistrySettings.RegistryVersion) if err != nil { return err } @@ -280,15 +263,11 @@ func (a *AutomationTest) DeployRegistrar() error { return fmt.Errorf("registry must be deployed or loaded before registrar") } a.RegistrarSettings.RegistryAddr = a.Registry.Address() - registrar, err := a.Deployer.DeployKeeperRegistrar(a.RegistrySettings.RegistryVersion, a.LinkToken.Address(), a.RegistrarSettings) + registrar, err := contracts.DeployKeeperRegistrar(a.ChainClient, a.RegistrySettings.RegistryVersion, a.LinkToken.Address(), a.RegistrarSettings) if err != nil { return err } a.Registrar = registrar - err = a.ChainClient.WaitForEvents() - if err != nil { - return errors.Join(err, fmt.Errorf("failed waiting for registrar contract to deploy")) - } return nil } @@ -297,7 +276,7 @@ func (a *AutomationTest) LoadRegistrar(address string) error { return fmt.Errorf("registry must be deployed or loaded before registrar") } a.RegistrarSettings.RegistryAddr = a.Registry.Address() - registrar, err := a.Deployer.LoadKeeperRegistrar(common.HexToAddress(address), a.RegistrySettings.RegistryVersion) + registrar, err := contracts.LoadKeeperRegistrar(a.ChainClient, common.HexToAddress(address), a.RegistrySettings.RegistryVersion) if err != nil { return err } @@ -342,7 +321,7 @@ func (a *AutomationTest) CollectNodeDetails() error { } } - TransmitterKeys, err := node.EthAddressesForChain(a.ChainClient.GetChainID().String()) + TransmitterKeys, err := node.EthAddressesForChain(fmt.Sprint(a.ChainClient.ChainID)) nodeDetail.TransmitterAddresses = make([]string, 0) if err != nil { return errors.Join(err, fmt.Errorf("failed to read Transmitter keys from node %d", i)) @@ -368,7 +347,7 @@ func (a *AutomationTest) AddBootstrapJob() error { ContractID: a.Registry.Address(), Relay: "evm", RelayConfig: map[string]interface{}{ - "chainID": int(a.ChainClient.GetChainID().Int64()), + "chainID": int(a.ChainClient.ChainID), }, ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), }, @@ -411,7 +390,7 @@ func (a *AutomationTest) AddAutomationJobs() error { ContractID: a.Registry.Address(), Relay: "evm", RelayConfig: map[string]interface{}{ - "chainID": int(a.ChainClient.GetChainID().Int64()), + "chainID": int(a.ChainClient.ChainID), }, PluginConfig: pluginCfg, ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), @@ -602,61 +581,134 @@ func calculateOCR3ConfigArgs(a *AutomationTest, S []int, oracleIdentities []conf ) } -func (a *AutomationTest) RegisterUpkeeps(upkeepConfigs []UpkeepConfig) ([]common.Hash, error) { - var registrarABI *abi.ABI - var err error - var registrationRequest []byte - registrationTxHashes := make([]common.Hash, 0) +type registrationResult struct { + txHash common.Hash +} + +func (r registrationResult) GetResult() common.Hash { + return r.txHash +} + +func (a *AutomationTest) RegisterUpkeeps(upkeepConfigs []UpkeepConfig, maxConcurrency int) ([]common.Hash, error) { + concurrency, err := actions_seth.GetAndAssertCorrectConcurrency(a.ChainClient, 1) + if err != nil { + return nil, err + } + + if concurrency > maxConcurrency { + concurrency = maxConcurrency + a.Logger.Debug(). + Msgf("Concurrency is higher than max concurrency, setting concurrency to %d", concurrency) + } - for _, upkeepConfig := range upkeepConfigs { + var registerUpkeep = func(resultCh chan registrationResult, errorCh chan error, executorNum int, upkeepConfig UpkeepConfig) { + keyNum := executorNum + 1 // key 0 is the root key + var registrationRequest []byte + var registrarABI *abi.ABI + var err error switch a.RegistrySettings.RegistryVersion { case ethereum.RegistryVersion_2_0: registrarABI, err = keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.GetAbi() if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to get registrar abi")) + errorCh <- errors.Join(err, fmt.Errorf("failed to get registrar abi")) + return } registrationRequest, err = registrarABI.Pack( - "register", upkeepConfig.UpkeepName, upkeepConfig.EncryptedEmail, - upkeepConfig.UpkeepContract, upkeepConfig.GasLimit, upkeepConfig.AdminAddress, + "register", + upkeepConfig.UpkeepName, + upkeepConfig.EncryptedEmail, + upkeepConfig.UpkeepContract, + upkeepConfig.GasLimit, + upkeepConfig.AdminAddress, upkeepConfig.CheckData, - upkeepConfig.OffchainConfig, upkeepConfig.FundingAmount, - common.HexToAddress(a.ChainClient.GetDefaultWallet().Address())) + upkeepConfig.OffchainConfig, + upkeepConfig.FundingAmount, + a.ChainClient.Addresses[keyNum]) if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to pack registrar request")) + errorCh <- errors.Join(err, fmt.Errorf("failed to pack registrar request")) + return } case ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: // 2.1 and 2.2 use the same registrar registrarABI, err = automation_registrar_wrapper2_1.AutomationRegistrarMetaData.GetAbi() if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to get registrar abi")) + errorCh <- errors.Join(err, fmt.Errorf("failed to get registrar abi")) + return } registrationRequest, err = registrarABI.Pack( - "register", upkeepConfig.UpkeepName, upkeepConfig.EncryptedEmail, - upkeepConfig.UpkeepContract, upkeepConfig.GasLimit, upkeepConfig.AdminAddress, - upkeepConfig.TriggerType, upkeepConfig.CheckData, upkeepConfig.TriggerConfig, - upkeepConfig.OffchainConfig, upkeepConfig.FundingAmount, - common.HexToAddress(a.ChainClient.GetDefaultWallet().Address())) + "register", + upkeepConfig.UpkeepName, + upkeepConfig.EncryptedEmail, + upkeepConfig.UpkeepContract, + upkeepConfig.GasLimit, + upkeepConfig.AdminAddress, + upkeepConfig.TriggerType, + upkeepConfig.CheckData, + upkeepConfig.TriggerConfig, + upkeepConfig.OffchainConfig, + upkeepConfig.FundingAmount, + a.ChainClient.Addresses[keyNum]) if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to pack registrar request")) + errorCh <- errors.Join(err, fmt.Errorf("failed to pack registrar request")) + return } default: - return nil, fmt.Errorf("v2.0, v2.1, and v2.2 are the only supported versions") + errorCh <- fmt.Errorf("v2.0, v2.1, and v2.2 are the only supported versions") + return } - tx, err := a.LinkToken.TransferAndCall(a.Registrar.Address(), upkeepConfig.FundingAmount, registrationRequest) + + tx, err := a.LinkToken.TransferAndCallFromKey(a.Registrar.Address(), upkeepConfig.FundingAmount, registrationRequest, keyNum) if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to register upkeep")) + errorCh <- errors.Join(err, fmt.Errorf("client number %d failed to register upkeep %s", keyNum, upkeepConfig.UpkeepContract.Hex())) + return } - registrationTxHashes = append(registrationTxHashes, tx.Hash()) + + resultCh <- registrationResult{txHash: tx.Hash()} + } + + executor := ctf_concurrency.NewConcurrentExecutor[common.Hash, registrationResult, UpkeepConfig](a.Logger) + results, err := executor.Execute(concurrency, upkeepConfigs, registerUpkeep) + if err != nil { + return nil, err + } + + if len(results) != len(upkeepConfigs) { + return nil, fmt.Errorf("failed to register all upkeeps. Expected %d, got %d", len(upkeepConfigs), len(results)) } - return registrationTxHashes, nil + + a.Logger.Info().Msg("Successfully registered all upkeeps") + + return results, nil +} + +type UpkeepId = *big.Int + +type confirmationResult struct { + upkeepID UpkeepId +} + +func (c confirmationResult) GetResult() UpkeepId { + return c.upkeepID } -func (a *AutomationTest) ConfirmUpkeepsRegistered(registrationTxHashes []common.Hash) ([]*big.Int, error) { - upkeepIds := make([]*big.Int, 0) - for _, txHash := range registrationTxHashes { - receipt, err := a.ChainClient.GetTxReceipt(txHash) +func (a *AutomationTest) ConfirmUpkeepsRegistered(registrationTxHashes []common.Hash, maxConcurrency int) ([]*big.Int, error) { + concurrency, err := actions_seth.GetAndAssertCorrectConcurrency(a.ChainClient, 1) + if err != nil { + return nil, err + } + + if concurrency > maxConcurrency { + concurrency = maxConcurrency + a.Logger.Debug(). + Msgf("Concurrency is higher than max concurrency, setting concurrency to %d", concurrency) + } + + var confirmUpkeep = func(resultCh chan confirmationResult, errorCh chan error, _ int, txHash common.Hash) { + receipt, err := a.ChainClient.Client.TransactionReceipt(context.Background(), txHash) if err != nil { - return nil, errors.Join(err, fmt.Errorf("failed to confirm upkeep registration")) + errorCh <- errors.Join(err, fmt.Errorf("failed to confirm upkeep registration")) + return } + var upkeepId *big.Int for _, rawLog := range receipt.Logs { parsedUpkeepId, err := a.Registry.ParseUpkeepIdFromRegisteredLog(rawLog) @@ -666,12 +718,35 @@ func (a *AutomationTest) ConfirmUpkeepsRegistered(registrationTxHashes []common. } } if upkeepId == nil { - return nil, fmt.Errorf("failed to parse upkeep id from registration receipt") + errorCh <- fmt.Errorf("failed to parse upkeep id from registration receipt") + return + } + resultCh <- confirmationResult{upkeepID: upkeepId} + } + + executor := ctf_concurrency.NewConcurrentExecutor[UpkeepId, confirmationResult, common.Hash](a.Logger) + results, err := executor.Execute(concurrency, registrationTxHashes, confirmUpkeep) + + if err != nil { + return nil, fmt.Errorf("failed confirmations: %d | successful confirmations: %d", len(executor.GetErrors()), len(results)) + } + + if len(registrationTxHashes) != len(results) { + return nil, fmt.Errorf("failed to confirm all upkeeps. Expected %d, got %d", len(registrationTxHashes), len(results)) + } + + seen := make(map[*big.Int]bool) + for _, upkeepId := range results { + if seen[upkeepId] { + return nil, fmt.Errorf("duplicate upkeep id: %s. Something went wrong during upkeep confirmation. Please check the test code", upkeepId.String()) } - upkeepIds = append(upkeepIds, upkeepId) + seen[upkeepId] = true } - a.UpkeepIDs = upkeepIds - return upkeepIds, nil + + a.Logger.Info().Msg("Successfully confirmed all upkeeps") + a.UpkeepIDs = results + + return results, nil } func (a *AutomationTest) AddJobsAndSetConfig(t *testing.T) { @@ -754,5 +829,4 @@ func (a *AutomationTest) LoadAutomationDeployment(t *testing.T, linkTokenAddress require.NoError(t, err, "Error loading registrar contract") a.AddJobsAndSetConfig(t) - } diff --git a/integration-tests/actions/seth/actions.go b/integration-tests/actions/seth/actions.go index 7b128620158..c43ae820f7e 100644 --- a/integration-tests/actions/seth/actions.go +++ b/integration-tests/actions/seth/actions.go @@ -4,26 +4,40 @@ import ( "context" "crypto/ecdsa" "fmt" + "math" "math/big" + "strings" "testing" "time" + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rpc" "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/smartcontractkit/seth" "github.com/test-go/testify/require" + "go.uber.org/zap/zapcore" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/testreporters" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/operator_factory" "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ContractDeploymentInterval = 200 @@ -51,9 +65,61 @@ func FundChainlinkNodes( nodes []contracts.ChainlinkNodeWithKeysAndAddress, privateKey *ecdsa.PrivateKey, amount *big.Float, +) error { + keyAddressFn := func(cl contracts.ChainlinkNodeWithKeysAndAddress) (string, error) { + return cl.PrimaryEthAddress() + } + return fundChainlinkNodesAtAnyKey(logger, client, nodes, privateKey, amount, keyAddressFn) +} + +// FundChainlinkNodesAtKeyIndexFromRootAddress sends native token amount (expressed in human-scale) to each Chainlink Node +// from root private key.It returns an error if any of the transactions failed. It sends the funds to +// node address at keyIndex (as each node can have multiple addresses). +func FundChainlinkNodesAtKeyIndexFromRootAddress( + logger zerolog.Logger, + client *seth.Client, + nodes []contracts.ChainlinkNodeWithKeysAndAddress, + amount *big.Float, + keyIndex int, +) error { + if len(client.PrivateKeys) == 0 { + return errors.Wrap(errors.New(seth.ErrNoKeyLoaded), fmt.Sprintf("requested key: %d", 0)) + } + + return FundChainlinkNodesAtKeyIndex(logger, client, nodes, client.PrivateKeys[0], amount, keyIndex) +} + +// FundChainlinkNodesAtKeyIndex sends native token amount (expressed in human-scale) to each Chainlink Node +// from private key's address. It returns an error if any of the transactions failed. It sends the funds to +// node address at keyIndex (as each node can have multiple addresses). +func FundChainlinkNodesAtKeyIndex( + logger zerolog.Logger, + client *seth.Client, + nodes []contracts.ChainlinkNodeWithKeysAndAddress, + privateKey *ecdsa.PrivateKey, + amount *big.Float, + keyIndex int, +) error { + keyAddressFn := func(cl contracts.ChainlinkNodeWithKeysAndAddress) (string, error) { + toAddress, err := cl.EthAddresses() + if err != nil { + return "", err + } + return toAddress[keyIndex], nil + } + return fundChainlinkNodesAtAnyKey(logger, client, nodes, privateKey, amount, keyAddressFn) +} + +func fundChainlinkNodesAtAnyKey( + logger zerolog.Logger, + client *seth.Client, + nodes []contracts.ChainlinkNodeWithKeysAndAddress, + privateKey *ecdsa.PrivateKey, + amount *big.Float, + keyAddressFn func(contracts.ChainlinkNodeWithKeysAndAddress) (string, error), ) error { for _, cl := range nodes { - toAddress, err := cl.PrimaryEthAddress() + toAddress, err := keyAddressFn(cl) if err != nil { return err } @@ -121,20 +187,27 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa return nil, err } - gasLimit := uint64(client.Cfg.Network.TransferGasFee) + var gasLimit int64 + gasLimitRaw, err := client.EstimateGasLimitForFundTransfer(fromAddress, payload.ToAddress, payload.Amount) + if err != nil { + gasLimit = client.Cfg.Network.TransferGasFee + } else { + gasLimit = int64(gasLimitRaw) + } + gasPrice := big.NewInt(0) gasFeeCap := big.NewInt(0) gasTipCap := big.NewInt(0) if payload.GasLimit != nil { - gasLimit = uint64(*payload.GasLimit) + gasLimit = *payload.GasLimit } if client.Cfg.Network.EIP1559DynamicFees { // if any of the dynamic fees are not set, we need to either estimate them or read them from config if payload.GasFeeCap == nil || payload.GasTipCap == nil { - // estimatior or config reading happens here - txOptions := client.NewTXOpts(seth.WithGasLimit(gasLimit)) + // estimation or config reading happens here + txOptions := client.NewTXOpts(seth.WithGasLimit(uint64(gasLimit))) gasFeeCap = txOptions.GasFeeCap gasTipCap = txOptions.GasTipCap } @@ -149,7 +222,7 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa } } else { if payload.GasPrice == nil { - txOptions := client.NewTXOpts((seth.WithGasLimit(gasLimit))) + txOptions := client.NewTXOpts(seth.WithGasLimit(uint64(gasLimit))) gasPrice = txOptions.GasPrice } else { gasPrice = payload.GasPrice @@ -163,7 +236,7 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa Nonce: nonce, To: &payload.ToAddress, Value: payload.Amount, - Gas: gasLimit, + Gas: uint64(gasLimit), GasFeeCap: gasFeeCap, GasTipCap: gasTipCap, } @@ -172,7 +245,7 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa Nonce: nonce, To: &payload.ToAddress, Value: payload.Amount, - Gas: gasLimit, + Gas: uint64(gasLimit), GasPrice: gasPrice, } } @@ -188,6 +261,18 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa txTimeout = *payload.TxTimeout } + logger.Debug(). + Str("From", fromAddress.Hex()). + Str("To", payload.ToAddress.Hex()). + Str("Amount (wei/ether)", fmt.Sprintf("%s/%s", payload.Amount, conversions.WeiToEther(payload.Amount).Text('f', -1))). + Uint64("Nonce", nonce). + Int64("Gas Limit", gasLimit). + Str("Gas Price", gasPrice.String()). + Str("Gas Fee Cap", gasFeeCap.String()). + Str("Gas Tip Cap", gasTipCap.String()). + Bool("Dynamic fees", client.Cfg.Network.EIP1559DynamicFees). + Msg("About to send funds") + ctx, cancel = context.WithTimeout(ctx, txTimeout) defer cancel() err = client.Client.SendTransaction(ctx, signedTx) @@ -199,9 +284,9 @@ func SendFunds(logger zerolog.Logger, client *seth.Client, payload FundsToSendPa Str("From", fromAddress.Hex()). Str("To", payload.ToAddress.Hex()). Str("TxHash", signedTx.Hash().String()). - Str("Amount", conversions.WeiToEther(payload.Amount).String()). + Str("Amount (wei/ether)", fmt.Sprintf("%s/%s", payload.Amount, conversions.WeiToEther(payload.Amount).Text('f', -1))). Uint64("Nonce", nonce). - Uint64("Gas Limit", gasLimit). + Int64("Gas Limit", gasLimit). Str("Gas Price", gasPrice.String()). Str("Gas Fee Cap", gasFeeCap.String()). Str("Gas Tip Cap", gasTipCap.String()). @@ -406,6 +491,43 @@ func ConfigureOCRv2AggregatorContracts( return nil } +// TeardownSuite tears down networks/clients and environment and creates a logs folder for failed tests in the +// specified path. Can also accept a testreporter (if one was used) to log further results +func TeardownSuite( + t *testing.T, + chainClient *seth.Client, + env *environment.Environment, + chainlinkNodes []*client.ChainlinkK8sClient, + optionalTestReporter testreporters.TestReporter, // Optionally pass in a test reporter to log further metrics + failingLogLevel zapcore.Level, // Examines logs after the test, and fails the test if any Chainlink logs are found at or above provided level + grafnaUrlProvider testreporters.GrafanaURLProvider, +) error { + l := logging.GetTestLogger(t) + if err := testreporters.WriteTeardownLogs(t, env, optionalTestReporter, failingLogLevel, grafnaUrlProvider); err != nil { + return fmt.Errorf("Error dumping environment logs, leaving environment running for manual retrieval, err: %w", err) + } + // Delete all jobs to stop depleting the funds + err := DeleteAllJobs(chainlinkNodes) + if err != nil { + l.Warn().Msgf("Error deleting jobs %+v", err) + } + + if chainlinkNodes != nil { + if err := ReturnFundsFromNodes(l, chainClient, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(chainlinkNodes)); err != nil { + // This printed line is required for tests that use real funds to propagate the failure + // out to the system running the test. Do not remove + fmt.Println(environment.FAILED_FUND_RETURN) + l.Error().Err(err).Str("Namespace", env.Cfg.Namespace). + Msg("Error attempting to return funds from chainlink nodes to network's default wallet. " + + "Environment is left running so you can try manually!") + } + } else { + l.Info().Msg("Successfully returned funds from chainlink nodes to default network wallets") + } + + return env.Shutdown() +} + // TeardownRemoteSuite sends a report and returns funds from chainlink nodes to network's default wallet func TeardownRemoteSuite( t *testing.T, @@ -425,11 +547,12 @@ func TeardownRemoteSuite( l.Warn().Msgf("Error deleting jobs %+v", err) } - if err = ReturnFunds(l, client, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(chainlinkNodes)); err != nil { + if err = ReturnFundsFromNodes(l, client, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(chainlinkNodes)); err != nil { l.Error().Err(err).Str("Namespace", namespace). Msg("Error attempting to return funds from chainlink nodes to network's default wallet. " + "Environment is left running so you can try manually!") } + return err } @@ -647,3 +770,330 @@ func WatchNewFluxRound( } } } + +// EstimateCostForChainlinkOperations estimates the cost of running a number of operations on the Chainlink node based on estimated gas costs. It supports +// both legacy and EIP-1559 transactions. +func EstimateCostForChainlinkOperations(l zerolog.Logger, client *seth.Client, network blockchain.EVMNetwork, amountOfOperations int) (*big.Float, error) { + bigAmountOfOperations := big.NewInt(int64(amountOfOperations)) + estimations := client.CalculateGasEstimations(client.NewDefaultGasEstimationRequest()) + + gasLimit := network.GasEstimationBuffer + network.ChainlinkTransactionLimit + + var gasPriceInWei *big.Int + if client.Cfg.Network.EIP1559DynamicFees { + gasPriceInWei = estimations.GasFeeCap + } else { + gasPriceInWei = estimations.GasPrice + } + + gasCostPerOperationWei := big.NewInt(1).Mul(big.NewInt(1).SetUint64(gasLimit), gasPriceInWei) + gasCostPerOperationETH := conversions.WeiToEther(gasCostPerOperationWei) + // total Wei needed for all TXs = total value for TX * number of TXs + totalWeiForAllOperations := big.NewInt(1).Mul(gasCostPerOperationWei, bigAmountOfOperations) + totalEthForAllOperations := conversions.WeiToEther(totalWeiForAllOperations) + + l.Debug(). + Int("Number of Operations", amountOfOperations). + Uint64("Gas Limit per Operation", gasLimit). + Str("Value per Operation (ETH)", gasCostPerOperationETH.String()). + Str("Total (ETH)", totalEthForAllOperations.String()). + Msg("Calculated ETH for Chainlink Operations") + + return totalEthForAllOperations, nil +} + +// GetLatestFinalizedBlockHeader returns latest finalised block header for given network (taking into account finality tag/depth) +func GetLatestFinalizedBlockHeader(ctx context.Context, client *seth.Client, network blockchain.EVMNetwork) (*types.Header, error) { + if network.FinalityTag { + return client.Client.HeaderByNumber(ctx, big.NewInt(rpc.FinalizedBlockNumber.Int64())) + } + if network.FinalityDepth == 0 { + return nil, fmt.Errorf("finality depth is 0 and finality tag is not enabled") + } + header, err := client.Client.HeaderByNumber(ctx, nil) + if err != nil { + return nil, err + } + latestBlockNumber := header.Number.Uint64() + finalizedBlockNumber := latestBlockNumber - network.FinalityDepth + return client.Client.HeaderByNumber(ctx, big.NewInt(int64(finalizedBlockNumber))) +} + +// SendLinkFundsToDeploymentAddresses sends LINK token to all addresses, but the root one, from the root address. It uses +// Multicall contract to batch all transfers in a single transaction. It also checks if the funds were transferred correctly. +// It's primary use case is to fund addresses that will be used for Upkeep registration (as that requires LINK balance) during +// Automation/Keeper test setup. +func SendLinkFundsToDeploymentAddresses( + chainClient *seth.Client, + concurrency, + totalUpkeeps, + operationsPerAddress int, + multicallAddress common.Address, + linkAmountPerUpkeep *big.Int, + linkToken contracts.LinkToken, +) error { + var generateCallData = func(receiver common.Address, amount *big.Int) ([]byte, error) { + abi, err := link_token_interface.LinkTokenMetaData.GetAbi() + if err != nil { + return nil, err + } + data, err := abi.Pack("transfer", receiver, amount) + if err != nil { + return nil, err + } + return data, nil + } + + toTransferToMultiCallContract := big.NewInt(0).Mul(linkAmountPerUpkeep, big.NewInt(int64(totalUpkeeps+concurrency))) + toTransferPerClient := big.NewInt(0).Mul(linkAmountPerUpkeep, big.NewInt(int64(operationsPerAddress+1))) + err := linkToken.Transfer(multicallAddress.Hex(), toTransferToMultiCallContract) + if err != nil { + return errors.Wrapf(err, "Error transferring LINK to multicall contract") + } + + balance, err := linkToken.BalanceOf(context.Background(), multicallAddress.Hex()) + if err != nil { + return errors.Wrapf(err, "Error getting LINK balance of multicall contract") + } + + if balance.Cmp(toTransferToMultiCallContract) < 0 { + return fmt.Errorf("Incorrect LINK balance of multicall contract. Expected at least: %s. Got: %s", toTransferToMultiCallContract.String(), balance.String()) + } + + // Transfer LINK to ephemeral keys + multiCallData := make([][]byte, 0) + for i := 1; i <= concurrency; i++ { + data, err := generateCallData(chainClient.Addresses[i], toTransferPerClient) + if err != nil { + return errors.Wrapf(err, "Error generating call data for LINK transfer") + } + multiCallData = append(multiCallData, data) + } + + var call []contracts.Call + for _, d := range multiCallData { + data := contracts.Call{Target: common.HexToAddress(linkToken.Address()), AllowFailure: false, CallData: d} + call = append(call, data) + } + + multiCallABI, err := abi.JSON(strings.NewReader(contracts.MultiCallABI)) + if err != nil { + return errors.Wrapf(err, "Error getting Multicall contract ABI") + } + boundContract := bind.NewBoundContract(multicallAddress, multiCallABI, chainClient.Client, chainClient.Client, chainClient.Client) + // call aggregate3 to group all msg call data and send them in a single transaction + _, err = chainClient.Decode(boundContract.Transact(chainClient.NewTXOpts(), "aggregate3", call)) + if err != nil { + return errors.Wrapf(err, "Error calling Multicall contract") + } + + for i := 1; i <= concurrency; i++ { + balance, err := linkToken.BalanceOf(context.Background(), chainClient.Addresses[i].Hex()) + if err != nil { + return errors.Wrapf(err, "Error getting LINK balance of ephemeral key %d", i) + } + if balance.Cmp(toTransferPerClient) < 0 { + return fmt.Errorf("Incorrect LINK balance after transfer. Ephemeral key %d. Expected: %s. Got: %s", i, toTransferPerClient.String(), balance.String()) + } + } + + return nil +} + +var noOpSethConfigFn = func(cfg *seth.Config) error { return nil } + +type SethConfigFunction = func(*seth.Config) error + +// OneEphemeralKeysLiveTestnetCheckFn checks whether there's at least one ephemeral key on a simulated network or at least one static key on a live network, +// and that there are no epehemeral keys on a live network. Root key is excluded from the check. +var OneEphemeralKeysLiveTestnetCheckFn = func(sethCfg *seth.Config) error { + concurrency := sethCfg.GetMaxConcurrency() + + if sethCfg.IsSimulatedNetwork() { + if concurrency < 1 { + return fmt.Errorf(INSUFFICIENT_EPHEMERAL_KEYS, 0) + } + + return nil + } + + if sethCfg.EphemeralAddrs != nil && int(*sethCfg.EphemeralAddrs) > 0 { + ephMsg := ` + Error: Ephemeral Addresses Detected on Live Network + + Ephemeral addresses are currently set for use on a live network, which is not permitted. The number of ephemeral addresses set is %d. Please make the following update to your TOML configuration file to correct this: + '[Seth] ephemeral_addresses_number = 0' + + Additionally, ensure the following requirements are met to run this test on a live network: + 1. Use more than one private key in your network configuration. + ` + + return errors.New(ephMsg) + } + + if concurrency < 1 { + return fmt.Errorf(INSUFFICIENT_STATIC_KEYS, len(sethCfg.Network.PrivateKeys)) + } + + return nil +} + +// OneEphemeralKeysLiveTestnetAutoFixFn checks whether there's at least one ephemeral key on a simulated network or at least one static key on a live network, +// and that there are no epehemeral keys on a live network (if ephemeral keys count is different from zero, it will disable them). Root key is excluded from the check. +var OneEphemeralKeysLiveTestnetAutoFixFn = func(sethCfg *seth.Config) error { + concurrency := sethCfg.GetMaxConcurrency() + + if sethCfg.IsSimulatedNetwork() { + if concurrency < 1 { + return fmt.Errorf(INSUFFICIENT_EPHEMERAL_KEYS, 0) + } + + return nil + } + + if sethCfg.EphemeralAddrs != nil && int(*sethCfg.EphemeralAddrs) > 0 { + var zero int64 = 0 + sethCfg.EphemeralAddrs = &zero + } + + if concurrency < 1 { + return fmt.Errorf(INSUFFICIENT_STATIC_KEYS, len(sethCfg.Network.PrivateKeys)) + } + + return nil +} + +// GetChainClient returns a seth client for the given network after validating the config +func GetChainClient(config ctf_config.SethConfig, network blockchain.EVMNetwork) (*seth.Client, error) { + return GetChainClientWithConfigFunction(config, network, noOpSethConfigFn) +} + +// GetChainClientWithConfigFunction returns a seth client for the given network after validating the config and applying the config function +func GetChainClientWithConfigFunction(config ctf_config.SethConfig, network blockchain.EVMNetwork, configFn SethConfigFunction) (*seth.Client, error) { + readSethCfg := config.GetSethConfig() + if readSethCfg == nil { + return nil, fmt.Errorf("Seth config not found") + } + + sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(network, *readSethCfg) + if err != nil { + return nil, errors.Wrapf(err, "Error merging seth and evm network configs") + } + + err = configFn(&sethCfg) + if err != nil { + return nil, errors.Wrapf(err, "Error applying seth config function") + } + + err = utils.ValidateSethNetworkConfig(sethCfg.Network) + if err != nil { + return nil, errors.Wrapf(err, "Error validating seth network config") + } + + chainClient, err := seth.NewClientWithConfig(&sethCfg) + if err != nil { + return nil, errors.Wrapf(err, "Error creating seth client") + } + + return chainClient, nil +} + +// GenerateUpkeepReport generates a report of performed, successful, reverted and stale upkeeps for a given registry contract based on transaction logs. In case of test failure it can help us +// to triage the issue by providing more context. +func GenerateUpkeepReport(t *testing.T, chainClient *seth.Client, startBlock, endBlock *big.Int, instance contracts.KeeperRegistry, registryVersion ethereum.KeeperRegistryVersion) (performedUpkeeps, successfulUpkeeps, revertedUpkeeps, staleUpkeeps int, err error) { + registryLogs := []gethtypes.Log{} + l := logging.GetTestLogger(t) + + var ( + blockBatchSize int64 = 100 + logs []gethtypes.Log + timeout = 5 * time.Second + addr = common.HexToAddress(instance.Address()) + queryStartBlock = startBlock + ) + + // Gather logs from the registry in 100 block chunks to avoid read limits + for queryStartBlock.Cmp(endBlock) < 0 { + filterQuery := geth.FilterQuery{ + Addresses: []common.Address{addr}, + FromBlock: queryStartBlock, + ToBlock: big.NewInt(0).Add(queryStartBlock, big.NewInt(blockBatchSize)), + } + + // This RPC call can possibly time out or otherwise die. Failure is not an option, keep retrying to get our stats. + err = fmt.Errorf("initial error") // to ensure our for loop runs at least once + for err != nil { + ctx, cancel := context.WithTimeout(testcontext.Get(t), timeout) + logs, err = chainClient.Client.FilterLogs(ctx, filterQuery) + cancel() + if err != nil { + l.Error(). + Err(err). + Interface("Filter Query", filterQuery). + Str("Timeout", timeout.String()). + Msg("Error getting logs from chain, trying again") + timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) + continue + } + l.Info(). + Uint64("From Block", queryStartBlock.Uint64()). + Uint64("To Block", filterQuery.ToBlock.Uint64()). + Int("Log Count", len(logs)). + Str("Registry Address", addr.Hex()). + Msg("Collected logs") + queryStartBlock.Add(queryStartBlock, big.NewInt(blockBatchSize)) + registryLogs = append(registryLogs, logs...) + } + } + + var contractABI *abi.ABI + contractABI, err = contracts.GetRegistryContractABI(registryVersion) + if err != nil { + return + } + + for _, allLogs := range registryLogs { + log := allLogs + var eventDetails *abi.Event + eventDetails, err = contractABI.EventByID(log.Topics[0]) + if err != nil { + l.Error().Err(err).Str("Log Hash", log.TxHash.Hex()).Msg("Error getting event details for log, report data inaccurate") + break + } + if eventDetails.Name == "UpkeepPerformed" { + performedUpkeeps++ + var parsedLog *contracts.UpkeepPerformedLog + parsedLog, err = instance.ParseUpkeepPerformedLog(&log) + if err != nil { + l.Error().Err(err).Str("Log Hash", log.TxHash.Hex()).Msg("Error parsing upkeep performed log, report data inaccurate") + break + } + if !parsedLog.Success { + revertedUpkeeps++ + } else { + successfulUpkeeps++ + } + } else if eventDetails.Name == "StaleUpkeepReport" { + staleUpkeeps++ + } + } + + return +} + +func GetStalenessReportCleanupFn(t *testing.T, logger zerolog.Logger, chainClient *seth.Client, startBlock uint64, registry contracts.KeeperRegistry, registryVersion ethereum.KeeperRegistryVersion) func() { + return func() { + if t.Failed() { + endBlock, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get end block") + + total, ok, reverted, stale, err := GenerateUpkeepReport(t, chainClient, big.NewInt(int64(startBlock)), big.NewInt(int64(endBlock)), registry, registryVersion) + require.NoError(t, err, "Failed to get staleness data") + if stale > 0 || reverted > 0 { + logger.Warn().Int("Total upkeeps", total).Int("Successful upkeeps", ok).Int("Reverted Upkeeps", reverted).Int("Stale Upkeeps", stale).Msg("Staleness data") + } else { + logger.Info().Int("Total upkeeps", total).Int("Successful upkeeps", ok).Int("Reverted Upkeeps", reverted).Int("Stale Upkeeps", stale).Msg("Staleness data") + } + } + } +} diff --git a/integration-tests/actions/seth/automation_ocr_helpers.go b/integration-tests/actions/seth/automation_ocr_helpers.go new file mode 100644 index 00000000000..160937869ab --- /dev/null +++ b/integration-tests/actions/seth/automation_ocr_helpers.go @@ -0,0 +1,176 @@ +package actions_seth + +import ( + "math" + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/smartcontractkit/seth" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" +) + +// DeployAutoOCRRegistryAndRegistrar registry and registrar +func DeployAutoOCRRegistryAndRegistrar( + t *testing.T, + client *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + linkToken contracts.LinkToken, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar) { + registry := deployRegistry(t, client, registryVersion, registrySettings, linkToken) + registrar := deployRegistrar(t, client, registryVersion, registry, linkToken) + + return registry, registrar +} + +// DeployConsumers deploys and registers keeper consumers. If ephemeral addresses are enabled, it will deploy and register the consumers from ephemeral addresses, but each upkpeep will be registered with root key address as the admin. Which means +// that functions like setting upkeep configuration, pausing, unpausing, etc. will be done by the root key address. It deploys multicall contract and sends link funds to each deployment address. +func DeployConsumers(t *testing.T, chainClient *seth.Client, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, linkToken contracts.LinkToken, numberOfUpkeeps int, linkFundsForEachUpkeep *big.Int, upkeepGasLimit uint32, isLogTrigger bool, isMercury bool) ([]contracts.KeeperConsumer, []*big.Int) { + err := DeployMultiCallAndFundDeploymentAddresses(chainClient, linkToken, numberOfUpkeeps, linkFundsForEachUpkeep) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + upkeeps := DeployKeeperConsumers(t, chainClient, numberOfUpkeeps, isLogTrigger, isMercury) + require.Equal(t, numberOfUpkeeps, len(upkeeps), "Number of upkeeps should match") + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts( + t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, isLogTrigger, isMercury, + ) + require.Equal(t, numberOfUpkeeps, len(upkeepIds), "Number of upkeepIds should match") + return upkeeps, upkeepIds +} + +// DeployPerformanceConsumers deploys and registers keeper performance consumers. If ephemeral addresses are enabled, it will deploy and register the consumers from ephemeral addresses, but each upkeep will be registered with root key address as the admin. +// that functions like setting upkeep configuration, pausing, unpausing, etc. will be done by the root key address. It deploys multicall contract and sends link funds to each deployment address. +func DeployPerformanceConsumers( + t *testing.T, + chainClient *seth.Client, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + linkToken contracts.LinkToken, + numberOfUpkeeps int, + linkFundsForEachUpkeep *big.Int, + upkeepGasLimit uint32, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) ([]contracts.KeeperConsumerPerformance, []*big.Int) { + upkeeps := DeployKeeperConsumersPerformance( + t, chainClient, numberOfUpkeeps, blockRange, blockInterval, checkGasToBurn, performGasToBurn, + ) + + err := DeployMultiCallAndFundDeploymentAddresses(chainClient, linkToken, numberOfUpkeeps, linkFundsForEachUpkeep) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts(t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, false, false) + return upkeeps, upkeepIds +} + +// DeployPerformDataCheckerConsumers deploys and registers keeper performance data checkers consumers. If ephemeral addresses are enabled, it will deploy and register the consumers from ephemeral addresses, but each upkpeep will be registered with root key address as the admin. +// that functions like setting upkeep configuration, pausing, unpausing, etc. will be done by the root key address. It deployes multicall contract and sends link funds to each deployment address. +func DeployPerformDataCheckerConsumers( + t *testing.T, + chainClient *seth.Client, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + linkToken contracts.LinkToken, + numberOfUpkeeps int, + linkFundsForEachUpkeep *big.Int, + upkeepGasLimit uint32, + expectedData []byte, +) ([]contracts.KeeperPerformDataChecker, []*big.Int) { + upkeeps := DeployPerformDataChecker(t, chainClient, numberOfUpkeeps, expectedData) + + err := DeployMultiCallAndFundDeploymentAddresses(chainClient, linkToken, numberOfUpkeeps, linkFundsForEachUpkeep) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts(t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, false, false) + return upkeeps, upkeepIds +} + +// DeployMultiCallAndFundDeploymentAddresses deploys multicall contract and sends link funds to each deployment address +func DeployMultiCallAndFundDeploymentAddresses( + chainClient *seth.Client, + linkToken contracts.LinkToken, + numberOfUpkeeps int, + linkFundsForEachUpkeep *big.Int, +) error { + concurrency, err := GetAndAssertCorrectConcurrency(chainClient, 1) + if err != nil { + return err + } + + operationsPerAddress := numberOfUpkeeps / concurrency + + multicallAddress, err := contracts.DeployMultiCallContract(chainClient) + if err != nil { + return errors.Wrap(err, "Error deploying multicall contract") + } + + return SendLinkFundsToDeploymentAddresses(chainClient, concurrency, numberOfUpkeeps, operationsPerAddress, multicallAddress, linkFundsForEachUpkeep, linkToken) +} + +func deployRegistrar( + t *testing.T, + client *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + registry contracts.KeeperRegistry, + linkToken contracts.LinkToken, +) contracts.KeeperRegistrar { + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar, err := contracts.DeployKeeperRegistrar(client, registryVersion, linkToken.Address(), registrarSettings) + require.NoError(t, err, "Deploying KeeperRegistrar contract shouldn't fail") + return registrar +} + +func deployRegistry( + t *testing.T, + client *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + linkToken contracts.LinkToken, +) contracts.KeeperRegistry { + ef, err := contracts.DeployMockETHLINKFeed(client, big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contracts.DeployMockGASFeed(client, big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + + // Deploy the transcoder here, and then set it to the registry + transcoder, err := contracts.DeployUpkeepTranscoder(client) + require.NoError(t, err, "Deploying upkeep transcoder shouldn't fail") + + registry, err := contracts.DeployKeeperRegistry( + client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: transcoder.Address(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: registrySettings, + }, + ) + require.NoError(t, err, "Deploying KeeperRegistry contract shouldn't fail") + return registry +} diff --git a/integration-tests/actions/seth/keeper_helpers.go b/integration-tests/actions/seth/keeper_helpers.go new file mode 100644 index 00000000000..5e91d639d42 --- /dev/null +++ b/integration-tests/actions/seth/keeper_helpers.go @@ -0,0 +1,580 @@ +package actions_seth + +import ( + "context" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/smartcontractkit/seth" + "github.com/stretchr/testify/require" + + ctf_concurrency "github.com/smartcontractkit/chainlink-testing-framework/concurrency" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + + "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" +) + +var ZeroAddress = common.Address{} + +// DeployKeeperContracts deploys keeper registry and a number of basic upkeep contracts with an update interval of 5. +// It returns the freshly deployed registry, registrar, consumers and the IDs of the upkeeps. +func DeployKeeperContracts( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + numberOfUpkeeps int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + client *seth.Client, + linkFundsForEachUpkeep *big.Int, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperConsumer, []*big.Int) { + ef, err := contracts.DeployMockETHLINKFeed(client, big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contracts.DeployMockGASFeed(client, big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + + // Deploy the transcoder here, and then set it to the registry + transcoder, err := contracts.DeployUpkeepTranscoder(client) + require.NoError(t, err, "Deploying UpkeepTranscoder contract shouldn't fail") + + registry, err := contracts.DeployKeeperRegistry( + client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: transcoder.Address(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: registrySettings, + }, + ) + require.NoError(t, err, "Deploying KeeperRegistry shouldn't fail") + + // Fund the registry with 1 LINK * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + + registrar := DeployKeeperRegistrar(t, client, registryVersion, linkToken, registrarSettings, registry) + upkeeps, upkeepIds := DeployConsumers(t, client, registry, registrar, linkToken, numberOfUpkeeps, linkFundsForEachUpkeep, upkeepGasLimit, false, false) + + return registry, registrar, upkeeps, upkeepIds +} + +// DeployPerformanceKeeperContracts deploys a set amount of keeper performance contracts registered to a single registry +func DeployPerformanceKeeperContracts( + t *testing.T, + chainClient *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + numberOfContracts int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + registrySettings *contracts.KeeperRegistrySettings, + linkFundsForEachUpkeep *big.Int, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperConsumerPerformance, []*big.Int) { + ef, err := contracts.DeployMockETHLINKFeed(chainClient, big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contracts.DeployMockGASFeed(chainClient, big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + + registry, err := contracts.DeployKeeperRegistry( + chainClient, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: ZeroAddress.Hex(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: *registrySettings, + }, + ) + require.NoError(t, err, "Deploying KeeperRegistry shouldn't fail") + + // Fund the registry with 1 LINK * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfContracts)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar := DeployKeeperRegistrar(t, chainClient, registryVersion, linkToken, registrarSettings, registry) + + err = DeployMultiCallAndFundDeploymentAddresses(chainClient, linkToken, numberOfContracts, linkFundsForEachUpkeep) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + upkeeps := DeployKeeperConsumersPerformance( + t, chainClient, numberOfContracts, blockRange, blockInterval, checkGasToBurn, performGasToBurn, + ) + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + + upkeepIds := RegisterUpkeepContracts(t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfContracts, upkeepsAddresses, false, false) + + return registry, registrar, upkeeps, upkeepIds +} + +// DeployPerformDataCheckerContracts deploys a set amount of keeper perform data checker contracts registered to a single registry +func DeployPerformDataCheckerContracts( + t *testing.T, + chainClient *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + numberOfContracts int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + registrySettings *contracts.KeeperRegistrySettings, + linkFundsForEachUpkeep *big.Int, + expectedData []byte, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperPerformDataChecker, []*big.Int) { + ef, err := contracts.DeployMockETHLINKFeed(chainClient, big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contracts.DeployMockGASFeed(chainClient, big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + + registry, err := contracts.DeployKeeperRegistry( + chainClient, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: ZeroAddress.Hex(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: *registrySettings, + }, + ) + require.NoError(t, err, "Deploying KeeperRegistry shouldn't fail") + + // Fund the registry with 1 LINK * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfContracts)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + + registrar := DeployKeeperRegistrar(t, chainClient, registryVersion, linkToken, registrarSettings, registry) + upkeeps := DeployPerformDataChecker(t, chainClient, numberOfContracts, expectedData) + + err = DeployMultiCallAndFundDeploymentAddresses(chainClient, linkToken, numberOfContracts, linkFundsForEachUpkeep) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + + upkeepIds := RegisterUpkeepContracts(t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfContracts, upkeepsAddresses, false, false) + + return registry, registrar, upkeeps, upkeepIds +} + +func DeployKeeperRegistrar( + t *testing.T, + client *seth.Client, + registryVersion ethereum.KeeperRegistryVersion, + linkToken contracts.LinkToken, + registrarSettings contracts.KeeperRegistrarSettings, + registry contracts.KeeperRegistry, +) contracts.KeeperRegistrar { + registrar, err := contracts.DeployKeeperRegistrar(client, registryVersion, linkToken.Address(), registrarSettings) + require.NoError(t, err, "Failed waiting for registrar to deploy") + if registryVersion != ethereum.RegistryVersion_2_0 { + err = registry.SetRegistrar(registrar.Address()) + require.NoError(t, err, "Registering the registrar address on the registry shouldn't fail") + } + + return registrar +} + +func RegisterUpkeepContracts(t *testing.T, client *seth.Client, linkToken contracts.LinkToken, linkFunds *big.Int, upkeepGasLimit uint32, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, numberOfContracts int, upkeepAddresses []string, isLogTrigger bool, isMercury bool) []*big.Int { + checkData := make([][]byte, 0) + for i := 0; i < numberOfContracts; i++ { + checkData = append(checkData, []byte("0")) + } + return RegisterUpkeepContractsWithCheckData( + t, client, linkToken, linkFunds, upkeepGasLimit, registry, registrar, + numberOfContracts, upkeepAddresses, checkData, isLogTrigger, isMercury) +} + +type upkeepRegistrationResult struct { + upkeepID UpkeepId +} + +func (r upkeepRegistrationResult) GetResult() *big.Int { + return r.upkeepID +} + +type upkeepConfig struct { + address string + data []byte +} + +type UpkeepId = *big.Int + +func RegisterUpkeepContractsWithCheckData(t *testing.T, client *seth.Client, linkToken contracts.LinkToken, linkFunds *big.Int, upkeepGasLimit uint32, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, numberOfContracts int, upkeepAddresses []string, checkData [][]byte, isLogTrigger bool, isMercury bool) []*big.Int { + l := logging.GetTestLogger(t) + + concurrency, err := GetAndAssertCorrectConcurrency(client, 1) + require.NoError(t, err, "Insufficient concurrency to execute action") + + executor := ctf_concurrency.NewConcurrentExecutor[UpkeepId, upkeepRegistrationResult, upkeepConfig](l) + + configs := make([]upkeepConfig, 0) + for i := 0; i < len(upkeepAddresses); i++ { + configs = append(configs, upkeepConfig{address: upkeepAddresses[i], data: checkData[i]}) + } + + var registerUpkeepFn = func(resultCh chan upkeepRegistrationResult, errorCh chan error, executorNum int, config upkeepConfig) { + uuid := uuid.New().String() + keyNum := executorNum + 1 // key 0 is the root key + + req, err := registrar.EncodeRegisterRequest( + fmt.Sprintf("upkeep_%s", uuid), + []byte("test@mail.com"), + config.address, + upkeepGasLimit, + client.MustGetRootKeyAddress().Hex(), // upkeep Admin + config.data, + linkFunds, + 0, + client.Addresses[keyNum].Hex(), + isLogTrigger, + isMercury, + ) + + if err != nil { + errorCh <- errors.Wrapf(err, "[uuid: %s] Failed to encode register request for upkeep at %s", uuid, config.address) + return + } + + balance, err := linkToken.BalanceOf(context.Background(), client.Addresses[keyNum].Hex()) + if err != nil { + errorCh <- errors.Wrapf(err, "[uuid: %s]Failed to get LINK balance of %s", uuid, client.Addresses[keyNum].Hex()) + return + } + + // not stricly necessary, but helps us to avoid an errorless revert if there is not enough LINK + if balance.Cmp(linkFunds) < 0 { + errorCh <- fmt.Errorf("[uuid: %s] Not enough LINK balance for %s. Has: %s. Needs: %s", uuid, client.Addresses[keyNum].Hex(), balance.String(), linkFunds.String()) + return + } + + tx, err := linkToken.TransferAndCallFromKey(registrar.Address(), linkFunds, req, keyNum) + if err != nil { + errorCh <- errors.Wrapf(err, "[uuid: %s] Failed to register upkeep at %s", uuid, config.address) + return + } + + receipt, err := client.Client.TransactionReceipt(context.Background(), tx.Hash()) + if err != nil { + errorCh <- errors.Wrapf(err, "[uuid: %s] Failed to get receipt for upkeep at %s and tx hash %s", uuid, config.address, tx.Hash()) + return + } + + var upkeepId *big.Int + for _, rawLog := range receipt.Logs { + parsedUpkeepId, err := registry.ParseUpkeepIdFromRegisteredLog(rawLog) + if err == nil { + upkeepId = parsedUpkeepId + break + } + } + + if upkeepId == nil { + errorCh <- errors.Wrapf(err, "[uuid: %s] Failed find upkeep ID for upkeep at %s in logs of tx with hash %s", uuid, config.address, tx.Hash()) + return + } + + l.Debug(). + Str("TxHash", tx.Hash().String()). + Str("Upkeep ID", upkeepId.String()). + Msg("Found upkeepId in tx hash") + + resultCh <- upkeepRegistrationResult{upkeepID: upkeepId} + } + + upkeepIds, err := executor.Execute(concurrency, configs, registerUpkeepFn) + require.NoError(t, err, "Failed to register upkeeps using executor") + + require.Equal(t, numberOfContracts, len(upkeepIds), "Incorrect number of Keeper Consumer Contracts registered") + l.Info().Msg("Successfully registered all Keeper Consumer Contracts") + + return upkeepIds +} + +type keeperConsumerResult struct { + contract contracts.KeeperConsumer +} + +func (k keeperConsumerResult) GetResult() contracts.KeeperConsumer { + return k.contract +} + +// DeployKeeperConsumers concurrently deploys keeper consumer contracts. It requires at least 1 ephemeral key to be present in Seth config. +func DeployKeeperConsumers(t *testing.T, client *seth.Client, numberOfContracts int, isLogTrigger bool, isMercury bool) []contracts.KeeperConsumer { + l := logging.GetTestLogger(t) + + concurrency, err := GetAndAssertCorrectConcurrency(client, 1) + require.NoError(t, err, "Insufficient concurrency to execute action") + + executor := ctf_concurrency.NewConcurrentExecutor[contracts.KeeperConsumer, keeperConsumerResult, ctf_concurrency.NoTaskType](l) + + var deployContractFn = func(channel chan keeperConsumerResult, errorCh chan error, executorNum int) { + keyNum := executorNum + 1 // key 0 is the root key + var keeperConsumerInstance contracts.KeeperConsumer + var err error + + if isMercury && isLogTrigger { + // v2.1 only: Log triggered based contract with Mercury enabled + keeperConsumerInstance, err = contracts.DeployAutomationLogTriggeredStreamsLookupUpkeepConsumerFromKey(client, keyNum) + } else if isMercury { + // v2.1 only: Conditional based contract with Mercury enabled + keeperConsumerInstance, err = contracts.DeployAutomationStreamsLookupUpkeepConsumerFromKey(client, keyNum, big.NewInt(1000), big.NewInt(5), false, true, false) // 1000 block test range + } else if isLogTrigger { + // v2.1 only: Log triggered based contract without Mercury + keeperConsumerInstance, err = contracts.DeployAutomationLogTriggerConsumerFromKey(client, keyNum, big.NewInt(1000)) // 1000 block test range + } else { + // v2.0 and v2.1: Conditional based contract without Mercury + keeperConsumerInstance, err = contracts.DeployUpkeepCounterFromKey(client, keyNum, big.NewInt(999999), big.NewInt(5)) + } + + require.NoError(t, err, "Deploying Consumer shouldn't fail") + + channel <- keeperConsumerResult{contract: keeperConsumerInstance} + } + + results, err := executor.ExecuteSimple(concurrency, numberOfContracts, deployContractFn) + require.NoError(t, err, "Failed to deploy keeper consumers") + + // require.Equal(t, 0, len(deplymentErrors), "Error deploying consumer contracts") + require.Equal(t, numberOfContracts, len(results), "Incorrect number of Keeper Consumer Contracts deployed") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return results +} + +// DeployKeeperConsumersPerformance sequentially deploys keeper performance consumer contracts. +func DeployKeeperConsumersPerformance( + t *testing.T, + client *seth.Client, + numberOfContracts int, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) []contracts.KeeperConsumerPerformance { + l := logging.GetTestLogger(t) + upkeeps := make([]contracts.KeeperConsumerPerformance, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + keeperConsumerInstance, err := contracts.DeployKeeperConsumerPerformance( + client, + big.NewInt(blockRange), + big.NewInt(blockInterval), + big.NewInt(checkGasToBurn), + big.NewInt(performGasToBurn), + ) + require.NoError(t, err, "Deploying KeeperConsumerPerformance instance %d shouldn't fail", contractCount+1) + upkeeps = append(upkeeps, keeperConsumerInstance) + l.Debug(). + Str("Contract Address", keeperConsumerInstance.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Performance Contract") + } + + require.Equal(t, numberOfContracts, len(upkeeps), "Incorrect number of consumers contracts deployed") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeeps +} + +// DeployPerformDataChecker sequentially deploys keeper perform data checker contracts. +func DeployPerformDataChecker( + t *testing.T, + client *seth.Client, + numberOfContracts int, + expectedData []byte, +) []contracts.KeeperPerformDataChecker { + l := logging.GetTestLogger(t) + upkeeps := make([]contracts.KeeperPerformDataChecker, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + performDataCheckerInstance, err := contracts.DeployKeeperPerformDataChecker(client, expectedData) + require.NoError(t, err, "Deploying KeeperPerformDataChecker instance %d shouldn't fail", contractCount+1) + upkeeps = append(upkeeps, performDataCheckerInstance) + l.Debug(). + Str("Contract Address", performDataCheckerInstance.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed PerformDataChecker Contract") + } + require.Equal(t, numberOfContracts, len(upkeeps), "Incorrect number of PerformDataChecker contracts deployed") + l.Info().Msg("Successfully deployed all PerformDataChecker Contracts") + + return upkeeps +} + +// DeployUpkeepCounters sequentially deploys a set amount of upkeep counter contracts. +func DeployUpkeepCounters( + t *testing.T, + client *seth.Client, + numberOfContracts int, + testRange *big.Int, + interval *big.Int, +) []contracts.UpkeepCounter { + l := logging.GetTestLogger(t) + upkeepCounters := make([]contracts.UpkeepCounter, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + upkeepCounter, err := contracts.DeployUpkeepCounter(client, testRange, interval) + require.NoError(t, err, "Deploying KeeperConsumer instance %d shouldn't fail", contractCount+1) + upkeepCounters = append(upkeepCounters, upkeepCounter) + l.Debug(). + Str("Contract Address", upkeepCounter.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Consumer Contract") + } + require.Equal(t, numberOfContracts, len(upkeepCounters), "Incorrect number of Keeper Consumer contracts deployed") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeepCounters +} + +// DeployUpkeepPerformCounter sequentially deploys a set amount of upkeep perform counter restrictive contracts. +func DeployUpkeepPerformCounterRestrictive( + t *testing.T, + client *seth.Client, + numberOfContracts int, + testRange *big.Int, + averageEligibilityCadence *big.Int, +) []contracts.UpkeepPerformCounterRestrictive { + l := logging.GetTestLogger(t) + upkeepCounters := make([]contracts.UpkeepPerformCounterRestrictive, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + upkeepCounter, err := contracts.DeployUpkeepPerformCounterRestrictive(client, testRange, averageEligibilityCadence) + require.NoError(t, err, "Deploying KeeperConsumer instance %d shouldn't fail", contractCount+1) + upkeepCounters = append(upkeepCounters, upkeepCounter) + l.Debug(). + Str("Contract Address", upkeepCounter.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Consumer Contract") + } + require.Equal(t, numberOfContracts, len(upkeepCounters), "Incorrect number of Keeper Consumer contracts deployed") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeepCounters +} + +// RegisterNewUpkeeps concurrently registers the given amount of new upkeeps, using the registry and registrar, +// which are passed as parameters. It returns the newly deployed contracts (consumers), as well as their upkeep IDs. +func RegisterNewUpkeeps( + t *testing.T, + chainClient *seth.Client, + linkToken contracts.LinkToken, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + upkeepGasLimit uint32, + numberOfNewUpkeeps int, +) ([]contracts.KeeperConsumer, []*big.Int) { + newlyDeployedUpkeeps := DeployKeeperConsumers(t, chainClient, numberOfNewUpkeeps, false, false) + + var addressesOfNewUpkeeps []string + for _, upkeep := range newlyDeployedUpkeeps { + addressesOfNewUpkeeps = append(addressesOfNewUpkeeps, upkeep.Address()) + } + + concurrency, err := GetAndAssertCorrectConcurrency(chainClient, 1) + require.NoError(t, err, "Insufficient concurrency to execute action") + + operationsPerAddress := numberOfNewUpkeeps / concurrency + + multicallAddress, err := contracts.DeployMultiCallContract(chainClient) + require.NoError(t, err, "Error deploying multicall contract") + + linkFundsForEachUpkeep := big.NewInt(9e18) + + err = SendLinkFundsToDeploymentAddresses(chainClient, concurrency, numberOfNewUpkeeps, operationsPerAddress, multicallAddress, linkFundsForEachUpkeep, linkToken) + require.NoError(t, err, "Sending link funds to deployment addresses shouldn't fail") + + newUpkeepIDs := RegisterUpkeepContracts(t, chainClient, linkToken, linkFundsForEachUpkeep, upkeepGasLimit, registry, registrar, numberOfNewUpkeeps, addressesOfNewUpkeeps, false, false) + + return newlyDeployedUpkeeps, newUpkeepIDs +} + +var INSUFFICIENT_EPHEMERAL_KEYS = ` +Error: Insufficient Ephemeral Addresses for Simulated Network + +To operate on a simulated network, you must configure at least one ephemeral address. Currently, %d ephemeral address(es) are set. Please update your TOML configuration file as follows to meet this requirement: +[Seth] ephemeral_addresses_number = 1 + +This adjustment ensures that your setup is minimaly viable. Although it is highly recommended to use at least 20 ephemeral addresses. +` + +var INSUFFICIENT_STATIC_KEYS = ` +Error: Insufficient Private Keys for Live Network + +To run this test on a live network, you must either: +1. Set at least two private keys in the '[Network.WalletKeys]' section of your TOML configuration file. Example format: + [Network.WalletKeys] + NETWORK_NAME=["PRIVATE_KEY_1", "PRIVATE_KEY_2"] +2. Set at least two private keys in the '[Network.EVMNetworks.NETWORK_NAME] section of your TOML configuration file. Example format: + evm_keys=["PRIVATE_KEY_1", "PRIVATE_KEY_2"] + +Currently, only %d private key/s is/are set. + +Recommended Action: +Distribute your funds across multiple private keys and update your configuration accordingly. Even though 1 private key is sufficient for testing, it is highly recommended to use at least 10 private keys. +` + +// GetAndAssertCorrectConcurrency checks Seth configuration for the number of ephemeral keys or static keys (depending on Seth configuration) and makes sure that +// the number is at least minConcurrency. If the number is less than minConcurrency, it returns an error. The root key is always excluded from the count. +func GetAndAssertCorrectConcurrency(client *seth.Client, minConcurrency int) (int, error) { + concurrency := client.Cfg.GetMaxConcurrency() + + var msg string + if client.Cfg.IsSimulatedNetwork() { + msg = fmt.Sprintf(INSUFFICIENT_EPHEMERAL_KEYS, concurrency) + } else { + msg = fmt.Sprintf(INSUFFICIENT_STATIC_KEYS, concurrency) + } + + if concurrency < minConcurrency { + return 0, fmt.Errorf(msg) + } + + return concurrency, nil +} diff --git a/integration-tests/actions/seth/refund.go b/integration-tests/actions/seth/refund.go index cca4659cb6d..bd15fe7a0c9 100644 --- a/integration-tests/actions/seth/refund.go +++ b/integration-tests/actions/seth/refund.go @@ -229,16 +229,16 @@ func (r *OvershotTransferRetrier) Retry(ctx context.Context, logger zerolog.Logg return txErr } -// ReturnFunds returns funds from the given chainlink nodes to the default network wallet. It will use a variety +// ReturnFundsFromNodes returns funds from the given chainlink nodes to the default network wallet. It will use a variety // of strategies to attempt to return funds, including retrying with less funds if the transaction fails due to // insufficient funds, and retrying with a higher gas limit if the transaction fails due to gas too low. -func ReturnFunds(log zerolog.Logger, sethClient *seth.Client, chainlinkNodes []contracts.ChainlinkNodeWithKeysAndAddress) error { - if sethClient == nil { +func ReturnFundsFromNodes(log zerolog.Logger, client *seth.Client, chainlinkNodes []contracts.ChainlinkNodeWithKeysAndAddress) error { + if client == nil { return fmt.Errorf("Seth client is nil, unable to return funds from chainlink nodes") } log.Info().Msg("Attempting to return Chainlink node funds to default network wallets") - if sethClient.Cfg.IsSimulatedNetwork() { - log.Info().Str("Network Name", sethClient.Cfg.Network.Name). + if client.Cfg.IsSimulatedNetwork() { + log.Info().Str("Network Name", client.Cfg.Network.Name). Msg("Network is a simulated network. Skipping fund return.") return nil } @@ -246,7 +246,7 @@ func ReturnFunds(log zerolog.Logger, sethClient *seth.Client, chainlinkNodes []c failedReturns := []common.Address{} for _, chainlinkNode := range chainlinkNodes { - fundedKeys, err := chainlinkNode.ExportEVMKeysForChain(fmt.Sprint(sethClient.ChainID)) + fundedKeys, err := chainlinkNode.ExportEVMKeysForChain(fmt.Sprint(client.ChainID)) if err != nil { return err } @@ -262,94 +262,117 @@ func ReturnFunds(log zerolog.Logger, sethClient *seth.Client, chainlinkNodes []c return err } - publicKey := decryptedKey.PrivateKey.Public() - publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) - if !ok { - return errors.New("error casting public key to ECDSA") - } - fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) - - balance, err := sethClient.Client.BalanceAt(context.Background(), fromAddress, nil) + err = returnAllFundsIfPossible(log, client, decryptedKey.PrivateKey) if err != nil { - return err + log.Error().Err(err).Msg("Failed to return funds from Chainlink node to default network wallet") + publicKey := decryptedKey.PrivateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return errors.New("error casting public key to ECDSA") + } + failedReturns = append(failedReturns, crypto.PubkeyToAddress(*publicKeyECDSA)) } + } + } - if balance.Cmp(big.NewInt(0)) == 0 { - log.Info(). - Str("Address", fromAddress.String()). - Msg("No balance to return. Skipping return.") - } + if len(failedReturns) > 0 { + return fmt.Errorf("failed to return funds from Chainlink nodes to default network wallet for addresses: %v", failedReturns) + } - // if not set, it will be just set to empty string, which is okay as long as gas estimation is disabled - txPriority := sethClient.Cfg.Network.GasPriceEstimationTxPriority - txTimeout := sethClient.Cfg.Network.TxnTimeout.Duration() + log.Info().Msg("Successfully returned funds from all Chainlink nodes to default network wallets") - if sethClient.Cfg.IsExperimentEnabled(seth.Experiment_SlowFundsReturn) { - txPriority = "slow" - thirtyMinutes := time.Duration(30 * time.Minute) - txTimeout = thirtyMinutes - } + return nil +} - estimations := sethClient.CalculateGasEstimations(seth.GasEstimationRequest{ - GasEstimationEnabled: sethClient.Cfg.Network.GasPriceEstimationEnabled, - FallbackGasPrice: sethClient.Cfg.Network.GasPrice, - FallbackGasFeeCap: sethClient.Cfg.Network.GasFeeCap, - FallbackGasTipCap: sethClient.Cfg.Network.GasTipCap, - Priority: txPriority, - }) - - var maxTotalGasCost *big.Int - if sethClient.Cfg.Network.EIP1559DynamicFees { - maxTotalGasCost = new(big.Int).Mul(big.NewInt(0).SetInt64(sethClient.Cfg.Network.TransferGasFee), estimations.GasFeeCap) - } else { - maxTotalGasCost = new(big.Int).Mul(big.NewInt(0).SetInt64(sethClient.Cfg.Network.TransferGasFee), estimations.GasPrice) - } +func returnAllFundsIfPossible(log zerolog.Logger, sethClient *seth.Client, fromPrivateKey *ecdsa.PrivateKey) error { + publicKey := fromPrivateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return errors.New("error casting public key to ECDSA") + } - toSend := new(big.Int).Sub(balance, maxTotalGasCost) + fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) + balance, err := sethClient.Client.BalanceAt(context.Background(), fromAddress, nil) + if err != nil { + return err + } - if toSend.Cmp(big.NewInt(0)) <= 0 { - log.Warn(). - Str("Address", fromAddress.String()). - Str("Estimated maximum total gas cost", maxTotalGasCost.String()). - Str("Balance", balance.String()). - Str("To send", toSend.String()). - Msg("Not enough balance to cover gas cost. Skipping return.") + if balance.Cmp(big.NewInt(0)) == 0 { + log.Info(). + Str("Address", fromAddress.String()). + Msg("No balance to return. Skipping return.") + } - failedReturns = append(failedReturns, fromAddress) - continue - } + // if not set, it will be just set to empty string, which is okay as long as gas estimation is disabled + txPriority := sethClient.Cfg.Network.GasPriceEstimationTxPriority + txTimeout := sethClient.Cfg.Network.TxnTimeout.Duration() - payload := FundsToSendPayload{ - ToAddress: sethClient.Addresses[0], - Amount: toSend, - PrivateKey: decryptedKey.PrivateKey, - GasLimit: &sethClient.Cfg.Network.TransferGasFee, - GasPrice: estimations.GasPrice, - GasFeeCap: estimations.GasFeeCap, - GasTipCap: estimations.GasTipCap, - TxTimeout: &txTimeout, - } + if sethClient.Cfg.IsExperimentEnabled(seth.Experiment_SlowFundsReturn) { + txPriority = "slow" + thirtyMinutes := time.Duration(30 * time.Minute) + txTimeout = thirtyMinutes + } - _, err = SendFunds(log, sethClient, payload) - if err != nil { - handler := OvershotTransferRetrier{maxRetries: 10, nextRetrier: &InsufficientFundTransferRetrier{maxRetries: 10, nextRetrier: &GasTooLowTransferRetrier{maxGasLimit: sethClient.Cfg.Network.TransferGasFee * 10}}} - err = handler.Retry(context.Background(), log, sethClient, err, payload, 0) - if err != nil { - log.Error(). - Err(err). - Str("Address", fromAddress.String()). - Msg("Failed to return funds from Chainlink node to default network wallet") - failedReturns = append(failedReturns, fromAddress) - } - } - } + estimations := sethClient.CalculateGasEstimations(seth.GasEstimationRequest{ + GasEstimationEnabled: sethClient.Cfg.Network.GasPriceEstimationEnabled, + FallbackGasPrice: sethClient.Cfg.Network.GasPrice, + FallbackGasFeeCap: sethClient.Cfg.Network.GasFeeCap, + FallbackGasTipCap: sethClient.Cfg.Network.GasTipCap, + Priority: txPriority, + }) + + var gasLimit int64 + gasLimitRaw, err := sethClient.EstimateGasLimitForFundTransfer(fromAddress, sethClient.MustGetRootKeyAddress(), balance) + if err != nil { + gasLimit = sethClient.Cfg.Network.TransferGasFee + } else { + gasLimit = int64(gasLimitRaw) } - if len(failedReturns) > 0 { - return fmt.Errorf("failed to return funds from Chainlink nodes to default network wallet for addresses: %v", failedReturns) + var maxTotalGasCost *big.Int + if sethClient.Cfg.Network.EIP1559DynamicFees { + maxTotalGasCost = new(big.Int).Mul(big.NewInt(0).SetInt64(gasLimit), estimations.GasFeeCap) + } else { + maxTotalGasCost = new(big.Int).Mul(big.NewInt(0).SetInt64(gasLimit), estimations.GasPrice) } - log.Info().Msg("Successfully returned funds from all Chainlink nodes to default network wallets") + toSend := new(big.Int).Sub(balance, maxTotalGasCost) + + if toSend.Cmp(big.NewInt(0)) <= 0 { + log.Warn(). + Str("Address", fromAddress.String()). + Str("Estimated maximum total gas cost", maxTotalGasCost.String()). + Str("Balance", balance.String()). + Str("To send", toSend.String()). + Msg("Not enough balance to cover gas cost. Skipping return.") + + return nil + } + + payload := FundsToSendPayload{ + ToAddress: sethClient.MustGetRootKeyAddress(), + Amount: toSend, + PrivateKey: fromPrivateKey, + GasLimit: &gasLimit, + GasPrice: estimations.GasPrice, + GasFeeCap: estimations.GasFeeCap, + GasTipCap: estimations.GasTipCap, + TxTimeout: &txTimeout, + } + + _, err = SendFunds(log, sethClient, payload) + if err != nil { + handler := OvershotTransferRetrier{maxRetries: 10, nextRetrier: &InsufficientFundTransferRetrier{maxRetries: 10, nextRetrier: &GasTooLowTransferRetrier{maxGasLimit: gasLimit * 10}}} + err = handler.Retry(context.Background(), log, sethClient, err, payload, 0) + if err != nil { + log.Error(). + Err(err). + Str("Address", fromAddress.String()). + Msg("Failed to return funds from Chainlink node to default network wallet") + + return err + } + } return nil } diff --git a/integration-tests/benchmark/keeper_test.go b/integration-tests/benchmark/keeper_test.go index dad3d19610a..a3d50ae9874 100644 --- a/integration-tests/benchmark/keeper_test.go +++ b/integration-tests/benchmark/keeper_test.go @@ -13,20 +13,19 @@ import ( ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" env_client "github.com/smartcontractkit/chainlink-testing-framework/k8s/client" "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" - "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" - "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" "github.com/smartcontractkit/chainlink/integration-tests/types" - - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ( @@ -144,9 +143,11 @@ func TestAutomationBenchmark(t *testing.T) { benchmarkTestNetwork := getNetworkConfig(&config) l.Info().Str("Namespace", testEnvironment.Cfg.Namespace).Msg("Connected to Keepers Benchmark Environment") + testNetwork := utils.MustReplaceSimulatedNetworkUrlWithK8(l, benchmarkNetwork, *testEnvironment) + + chainClient, err := actions_seth.GetChainClientWithConfigFunction(&config, testNetwork, actions_seth.OneEphemeralKeysLiveTestnetAutoFixFn) + require.NoError(t, err, "Error getting Seth client") - chainClient, err := blockchain.NewEVMClient(benchmarkNetwork, testEnvironment, l) - require.NoError(t, err, "Error connecting to blockchain") registryVersions := addRegistry(&config) keeperBenchmarkTest := testsetups.NewKeeperBenchmarkTest(t, testsetups.KeeperBenchmarkTestInputs{ @@ -192,7 +193,7 @@ func TestAutomationBenchmark(t *testing.T) { }, ) t.Cleanup(func() { - if err = actions.TeardownRemoteSuite(keeperBenchmarkTest.TearDownVals(t)); err != nil { + if err = actions_seth.TeardownRemoteSuite(keeperBenchmarkTest.TearDownVals(t)); err != nil { l.Error().Err(err).Msg("Error when tearing down remote suite") } }) @@ -329,6 +330,10 @@ func SetupAutomationBenchmarkEnv(t *testing.T, keeperTestConfig types.KeeperBenc numberOfNodes++ } + networkName := strings.ReplaceAll(testNetwork.Name, " ", "-") + networkName = strings.ReplaceAll(networkName, "_", "-") + testNetwork.Name = networkName + testEnvironment := environment.New(&environment.Config{ TTL: time.Hour * 720, // 30 days, NamespacePrefix: fmt.Sprintf( @@ -383,20 +388,24 @@ func SetupAutomationBenchmarkEnv(t *testing.T, keeperTestConfig types.KeeperBenc }, }, "geth": map[string]interface{}{ - "blocktime": blockTime, + "blocktime": blockTime, + "capacity": "20Gi", + "startGaslimit": "20000000", + "targetGasLimit": "30000000", }, }, })) } + // TODO we need to update the image in CTF, the old one is not available anymore // deploy blockscout if running on simulated - if testNetwork.Simulated { - testEnvironment. - AddChart(blockscout.New(&blockscout.Props{ - Name: "geth-blockscout", - WsURL: testNetwork.URLs[0], - HttpURL: testNetwork.HTTPURLs[0]})) - } + // if testNetwork.Simulated { + // testEnvironment. + // AddChart(blockscout.New(&blockscout.Props{ + // Name: "geth-blockscout", + // WsURL: testNetwork.URLs[0], + // HttpURL: testNetwork.HTTPURLs[0]})) + // } err := testEnvironment.Run() require.NoError(t, err, "Error launching test environment") diff --git a/integration-tests/chaos/automation_chaos_test.go b/integration-tests/chaos/automation_chaos_test.go index 56b0d1e32b6..7989150fbf0 100644 --- a/integration-tests/chaos/automation_chaos_test.go +++ b/integration-tests/chaos/automation_chaos_test.go @@ -10,11 +10,9 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap/zapcore" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" - "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" "github.com/smartcontractkit/chainlink-testing-framework/logging" @@ -23,9 +21,11 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + "github.com/smartcontractkit/chainlink/integration-tests/utils" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" ) @@ -218,12 +218,15 @@ func TestAutomationChaos(t *testing.T) { Test: t, }). AddHelm(testCase.networkChart). - AddHelm(testCase.clChart). - AddChart(blockscout.New(&blockscout.Props{ - Name: "geth-blockscout", - WsURL: network.URL, - HttpURL: network.HTTPURLs[0], - })) + AddHelm(testCase.clChart) + // TODO we need to update the image in CTF, the old one is not available anymore + // deploy blockscout if running on simulated + // AddHelm(testCase.clChart). + // AddChart(blockscout.New(&blockscout.Props{ + // Name: "geth-blockscout", + // WsURL: network.URL, + // HttpURL: network.HTTPURLs[0], + // }) err = testEnvironment.Run() require.NoError(t, err, "Error setting up test environment") if testEnvironment.WillUseRemoteRunner() { @@ -237,39 +240,34 @@ func TestAutomationChaos(t *testing.T) { err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 2, 5, ChaosGroupMajorityPlus) require.NoError(t, err) - chainClient, err := blockchain.NewEVMClient(network, testEnvironment, l) - require.NoError(t, err, "Error connecting to blockchain") - contractDeployer, err := contracts.NewContractDeployer(chainClient, l) - require.NoError(t, err, "Error building contract deployer") - chainlinkNodes, err := client.ConnectChainlinkNodes(testEnvironment) require.NoError(t, err, "Error connecting to Chainlink nodes") - chainClient.ParallelTransactions(true) + + network = utils.MustReplaceSimulatedNetworkUrlWithK8(l, network, *testEnvironment) + + chainClient, err := actions_seth.GetChainClientWithConfigFunction(&config, network, actions_seth.OneEphemeralKeysLiveTestnetAutoFixFn) + require.NoError(t, err, "Error creating seth client") // Register cleanup for any test t.Cleanup(func() { - if chainClient != nil { - chainClient.GasStats().PrintStats() - } - err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, &config, chainClient) + err := actions_seth.TeardownSuite(t, chainClient, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, &config) require.NoError(t, err, "Error tearing down environment") }) - txCost, err := chainClient.EstimateCostForChainlinkOperations(1000) + txCost, err := actions_seth.EstimateCostForChainlinkOperations(l, chainClient, network, 1000) require.NoError(t, err, "Error estimating cost for Chainlink Operations") - err = actions.FundChainlinkNodes(chainlinkNodes, chainClient, txCost) + err = actions_seth.FundChainlinkNodesFromRootAddress(l, chainClient, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(chainlinkNodes), txCost) require.NoError(t, err, "Error funding Chainlink nodes") - linkToken, err := contractDeployer.DeployLinkTokenContract() + linkToken, err := contracts.DeployLinkTokenContract(l, chainClient) require.NoError(t, err, "Error deploying LINK token") - registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + registry, registrar := actions_seth.DeployAutoOCRRegistryAndRegistrar( t, + chainClient, rv, defaultOCRRegistryConfig, linkToken, - contractDeployer, - chainClient, ) // Fund the registry with LINK @@ -287,11 +285,10 @@ func TestAutomationChaos(t *testing.T) { } else { err = registry.SetConfigTypeSafe(ocrConfig) } - require.NoError(t, err, "Registry config should be be set successfully") - require.NoError(t, chainClient.WaitForEvents(), "Waiting for config to be set") + require.NoError(t, err, "Error setting OCR config") - consumersConditional, upkeepidsConditional := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, false, false) - consumersLogtrigger, upkeepidsLogtrigger := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, true, false) + consumersConditional, upkeepidsConditional := actions_seth.DeployConsumers(t, chainClient, registry, registrar, linkToken, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, false, false) + consumersLogtrigger, upkeepidsLogtrigger := actions_seth.DeployConsumers(t, chainClient, registry, registrar, linkToken, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, true, false) consumers := append(consumersConditional, consumersLogtrigger...) upkeepIDs := append(upkeepidsConditional, upkeepidsLogtrigger...) diff --git a/integration-tests/chaos/ocr_chaos_test.go b/integration-tests/chaos/ocr_chaos_test.go index d2e7c540db5..da1356689ad 100644 --- a/integration-tests/chaos/ocr_chaos_test.go +++ b/integration-tests/chaos/ocr_chaos_test.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/onsi/gomega" - "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" @@ -165,17 +164,11 @@ func TestOCRChaos(t *testing.T) { require.NoError(t, err) cfg := config.MustCopy().(tc.TestConfig) - readSethCfg := cfg.GetSethConfig() - require.NotNil(t, readSethCfg, "Seth config shouldn't be nil") network := networks.MustGetSelectedNetworkConfig(cfg.GetNetworkConfig())[0] network = utils.MustReplaceSimulatedNetworkUrlWithK8(l, network, *testEnvironment) - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(network, *readSethCfg) - require.NoError(t, err, "Error merging seth and evm network configs") - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - require.NoError(t, err, "Error validating seth network config") - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(&cfg, network) require.NoError(t, err, "Error creating seth client") chainlinkNodes, err := client.ConnectChainlinkNodes(testEnvironment) diff --git a/integration-tests/contracts/contract_deployer.go b/integration-tests/contracts/contract_deployer.go index 9e9c429d3ed..5b40128d6aa 100644 --- a/integration-tests/contracts/contract_deployer.go +++ b/integration-tests/contracts/contract_deployer.go @@ -716,7 +716,7 @@ func (e *EthereumContractDeployer) DeployMockETHLINKFeed(answer *big.Int) (MockE if err != nil { return nil, err } - return &EthereumMockETHLINKFeed{ + return &LegacyEthereumMockETHLINKFeed{ client: e.client, feed: instance.(*mock_ethlink_aggregator_wrapper.MockETHLINKAggregator), address: address, @@ -751,7 +751,7 @@ func (e *EthereumContractDeployer) LoadETHLINKFeed(address common.Address) (Mock if err != nil { return nil, err } - return &EthereumMockETHLINKFeed{ + return &LegacyEthereumMockETHLINKFeed{ address: &address, client: e.client, feed: instance.(*mock_ethlink_aggregator_wrapper.MockETHLINKAggregator), @@ -768,7 +768,7 @@ func (e *EthereumContractDeployer) DeployMockGasFeed(answer *big.Int) (MockGasFe if err != nil { return nil, err } - return &EthereumMockGASFeed{ + return &LegacyEthereumMockGASFeed{ client: e.client, feed: instance.(*mock_gas_aggregator_wrapper.MockGASAggregator), address: address, @@ -786,7 +786,7 @@ func (e *EthereumContractDeployer) LoadGasFeed(address common.Address) (MockGasF if err != nil { return nil, err } - return &EthereumMockGASFeed{ + return &LegacyEthereumMockGASFeed{ address: &address, client: e.client, feed: instance.(*mock_gas_aggregator_wrapper.MockGASAggregator), @@ -805,7 +805,7 @@ func (e *EthereumContractDeployer) DeployUpkeepTranscoder() (UpkeepTranscoder, e return nil, err } - return &EthereumUpkeepTranscoder{ + return &LegacyEthereumUpkeepTranscoder{ client: e.client, transcoder: instance.(*upkeep_transcoder.UpkeepTranscoder), address: address, @@ -824,7 +824,7 @@ func (e *EthereumContractDeployer) LoadUpkeepTranscoder(address common.Address) return nil, err } - return &EthereumUpkeepTranscoder{ + return &LegacyEthereumUpkeepTranscoder{ client: e.client, transcoder: instance.(*upkeep_transcoder.UpkeepTranscoder), address: &address, @@ -848,7 +848,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistrar(registryVersion eth_con return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ client: e.client, registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar), address: address, @@ -880,7 +880,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistrar(registryVersion eth_con return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ client: e.client, registrar21: instance.(*registrar21.AutomationRegistrar), address: address, @@ -899,7 +899,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistrar(registryVersion eth_con return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ client: e.client, registrar: instance.(*keeper_registrar_wrapper1_2.KeeperRegistrar), address: address, @@ -919,7 +919,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, r if err != nil { return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ address: &address, client: e.client, registrar: instance.(*keeper_registrar_wrapper1_2.KeeperRegistrar), @@ -934,7 +934,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, r if err != nil { return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ address: &address, client: e.client, registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar), @@ -949,7 +949,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, r if err != nil { return nil, err } - return &EthereumKeeperRegistrar{ + return &LegacyEthereumKeeperRegistrar{ address: &address, client: e.client, registrar21: instance.(*registrar21.AutomationRegistrar), @@ -999,7 +999,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_1_1, registry1_1: instance.(*keeper_registry_wrapper1_1.KeeperRegistry), @@ -1037,7 +1037,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_1_2, registry1_1: nil, @@ -1095,7 +1095,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_1_3, registry1_1: nil, @@ -1139,7 +1139,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_2_0, registry2_0: instance.(*keeper_registry_wrapper2_0.KeeperRegistry), @@ -1217,7 +1217,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_2_1, registry2_1: registryMaster, @@ -1349,7 +1349,7 @@ func (e *EthereumContractDeployer) DeployKeeperRegistry( return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ client: e.client, version: eth_contracts.RegistryVersion_2_2, registry2_2: registryMaster, @@ -1392,7 +1392,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry1_1: instance.(*keeper_registry_wrapper1_1.KeeperRegistry), @@ -1408,7 +1408,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry1_2: instance.(*keeper_registry_wrapper1_2.KeeperRegistry), @@ -1424,7 +1424,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry1_3: instance.(*keeper_registry_wrapper1_3.KeeperRegistry), @@ -1440,7 +1440,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry2_0: instance.(*keeper_registry_wrapper2_0.KeeperRegistry), @@ -1456,7 +1456,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry2_1: instance.(*iregistry21.IKeeperRegistryMaster), @@ -1472,7 +1472,7 @@ func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, re if err != nil { return nil, err } - return &EthereumKeeperRegistry{ + return &LegacyEthereumKeeperRegistry{ address: &address, client: e.client, registry2_2: instance.(*iregistry22.IAutomationRegistryMaster), @@ -1512,7 +1512,7 @@ func (e *EthereumContractDeployer) DeployAutomationLogTriggerConsumer(testInterv if err != nil { return nil, err } - return &EthereumAutomationLogCounterConsumer{ + return &LegacyEthereumAutomationLogCounterConsumer{ client: e.client, consumer: instance.(*log_upkeep_counter_wrapper.LogUpkeepCounter), address: address, @@ -1531,7 +1531,7 @@ func (e *EthereumContractDeployer) DeployAutomationSimpleLogTriggerConsumer(isSt if err != nil { return nil, err } - return &EthereumAutomationSimpleLogCounterConsumer{ + return &LegacyEthereumAutomationSimpleLogCounterConsumer{ client: e.client, consumer: instance.(*simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounter), address: address, @@ -1550,7 +1550,7 @@ func (e *EthereumContractDeployer) DeployAutomationStreamsLookupUpkeepConsumer(t if err != nil { return nil, err } - return &EthereumAutomationStreamsLookupUpkeepConsumer{ + return &LegacyEthereumAutomationStreamsLookupUpkeepConsumer{ client: e.client, consumer: instance.(*streams_lookup_upkeep_wrapper.StreamsLookupUpkeep), address: address, @@ -1569,7 +1569,7 @@ func (e *EthereumContractDeployer) DeployAutomationLogTriggeredStreamsLookupUpke if err != nil { return nil, err } - return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{ + return &LegacyEthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{ client: e.client, consumer: instance.(*log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup), address: address, @@ -1586,7 +1586,7 @@ func (e *EthereumContractDeployer) DeployUpkeepCounter(testRange *big.Int, inter if err != nil { return nil, err } - return &EthereumUpkeepCounter{ + return &LegacyEthereumUpkeepCounter{ client: e.client, consumer: instance.(*upkeep_counter_wrapper.UpkeepCounter), address: address, @@ -1603,7 +1603,7 @@ func (e *EthereumContractDeployer) DeployUpkeepPerformCounterRestrictive(testRan if err != nil { return nil, err } - return &EthereumUpkeepPerformCounterRestrictive{ + return &LegacyEthereumUpkeepPerformCounterRestrictive{ client: e.client, consumer: instance.(*upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictive), address: address, @@ -1632,7 +1632,7 @@ func (e *EthereumContractDeployer) DeployKeeperConsumerPerformance( if err != nil { return nil, err } - return &EthereumKeeperConsumerPerformance{ + return &LegacyEthereumKeeperConsumerPerformance{ client: e.client, consumer: instance.(*keeper_consumer_performance_wrapper.KeeperConsumerPerformance), address: address, @@ -1652,7 +1652,7 @@ func (e *EthereumContractDeployer) DeployKeeperConsumerBenchmark() (AutomationCo if err != nil { return nil, err } - return &EthereumAutomationConsumerBenchmark{ + return &LegacyEthereumAutomationConsumerBenchmark{ client: e.client, consumer: instance.(*automation_consumer_benchmark.AutomationConsumerBenchmark), address: address, @@ -1670,7 +1670,7 @@ func (e *EthereumContractDeployer) LoadKeeperConsumerBenchmark(address common.Ad if err != nil { return nil, err } - return &EthereumAutomationConsumerBenchmark{ + return &LegacyEthereumAutomationConsumerBenchmark{ address: &address, client: e.client, consumer: instance.(*automation_consumer_benchmark.AutomationConsumerBenchmark), @@ -1691,7 +1691,7 @@ func (e *EthereumContractDeployer) DeployKeeperPerformDataChecker(expectedData [ if err != nil { return nil, err } - return &EthereumKeeperPerformDataCheckerConsumer{ + return &LegacyEthereumKeeperPerformDataCheckerConsumer{ client: e.client, performDataChecker: instance.(*perform_data_checker_wrapper.PerformDataChecker), address: address, @@ -1903,7 +1903,7 @@ func (e *EthereumContractDeployer) DeployLogEmitterContract() (LogEmitter, error if err != nil { return nil, err } - return &LogEmitterContract{ + return &LegacyLogEmitterContract{ client: e.client, instance: instance.(*le.LogEmitter), address: *address, diff --git a/integration-tests/contracts/contract_models.go b/integration-tests/contracts/contract_models.go index c61356130ee..b548ec1427a 100644 --- a/integration-tests/contracts/contract_models.go +++ b/integration-tests/contracts/contract_models.go @@ -79,6 +79,7 @@ type LinkToken interface { Transfer(to string, amount *big.Int) error BalanceOf(ctx context.Context, addr string) (*big.Int, error) TransferAndCall(to string, amount *big.Int, data []byte) (*types.Transaction, error) + TransferAndCallFromKey(to string, amount *big.Int, data []byte, keyNum int) (*types.Transaction, error) Name(context.Context) (string, error) } @@ -140,6 +141,7 @@ type ChainlinkNodeWithKeysAndAddress interface { MustReadP2PKeys() (*client.P2PKeys, error) ExportEVMKeysForChain(string) ([]*client.ExportedEVMKey, error) PrimaryEthAddress() (string, error) + EthAddresses() ([]string, error) } type ChainlinkNodeWithForwarder interface { @@ -423,6 +425,10 @@ type LogEmitter interface { EmitLogIntsIndexed(ints []int) (*types.Transaction, error) EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) EmitLogStrings(strings []string) (*types.Transaction, error) + EmitLogIntsFromKey(ints []int, keyNum int) (*types.Transaction, error) + EmitLogIntsIndexedFromKey(ints []int, keyNum int) (*types.Transaction, error) + EmitLogIntMultiIndexedFromKey(ints int, ints2 int, count int, keyNum int) (*types.Transaction, error) + EmitLogStringsFromKey(strings []string, keyNum int) (*types.Transaction, error) EmitLogInt(payload int) (*types.Transaction, error) EmitLogIntIndexed(payload int) (*types.Transaction, error) EmitLogString(strings string) (*types.Transaction, error) diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go index adf4dcffe80..4ee9f842873 100644 --- a/integration-tests/contracts/ethereum_contracts.go +++ b/integration-tests/contracts/ethereum_contracts.go @@ -21,6 +21,7 @@ import ( ocrTypes "github.com/smartcontractkit/libocr/offchainreporting/types" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + contractsethereum "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_coordinator" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_load_test_client" @@ -34,9 +35,15 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/gas_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/gas_wrapper_mock" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2" + iregistry22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_aggregator_proxy" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper" @@ -1291,6 +1298,10 @@ func (l *LegacyEthereumLinkToken) TransferAndCall(to string, amount *big.Int, da return tx, l.client.ProcessTransaction(tx) } +func (l *LegacyEthereumLinkToken) TransferAndCallFromKey(_ string, _ *big.Int, _ []byte, _ int) (*types.Transaction, error) { + panic("supported only with Seth") +} + // LegacyEthereumOffchainAggregator represents the offchain aggregation contract // Deprecated: we are moving away from blockchain.EVMClient, use EthereumOffchainAggregator instead type LegacyEthereumOffchainAggregator struct { @@ -1714,18 +1725,18 @@ func (o *OffchainAggregatorV2RoundConfirmer) Complete() bool { return o.complete } -// EthereumMockETHLINKFeed represents mocked ETH/LINK feed contract -type EthereumMockETHLINKFeed struct { +// LegacyEthereumMockETHLINKFeed represents mocked ETH/LINK feed contract +type LegacyEthereumMockETHLINKFeed struct { client blockchain.EVMClient feed *mock_ethlink_aggregator_wrapper.MockETHLINKAggregator address *common.Address } -func (v *EthereumMockETHLINKFeed) Address() string { +func (v *LegacyEthereumMockETHLINKFeed) Address() string { return v.address.Hex() } -func (v *EthereumMockETHLINKFeed) LatestRoundData() (*big.Int, error) { +func (v *LegacyEthereumMockETHLINKFeed) LatestRoundData() (*big.Int, error) { data, err := v.feed.LatestRoundData(&bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: context.Background(), @@ -1736,7 +1747,7 @@ func (v *EthereumMockETHLINKFeed) LatestRoundData() (*big.Int, error) { return data.Ans, nil } -func (v *EthereumMockETHLINKFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { +func (v *LegacyEthereumMockETHLINKFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { data, err := v.feed.LatestRoundData(&bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: context.Background(), @@ -1747,14 +1758,14 @@ func (v *EthereumMockETHLINKFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { return data.UpdatedAt, nil } -// EthereumMockGASFeed represents mocked Gas feed contract -type EthereumMockGASFeed struct { +// LegacyEthereumMockGASFeed represents mocked Gas feed contract +type LegacyEthereumMockGASFeed struct { client blockchain.EVMClient feed *mock_gas_aggregator_wrapper.MockGASAggregator address *common.Address } -func (v *EthereumMockGASFeed) Address() string { +func (v *LegacyEthereumMockGASFeed) Address() string { return v.address.Hex() } @@ -2513,3 +2524,28 @@ func V1OffChainAgrregatorToOffChainAggregatorWithRounds(contracts []OffchainAggr return contractsAsInterface } + +func GetRegistryContractABI(version contractsethereum.KeeperRegistryVersion) (*abi.ABI, error) { + var ( + contractABI *abi.ABI + err error + ) + switch version { + case contractsethereum.RegistryVersion_1_0, contractsethereum.RegistryVersion_1_1: + contractABI, err = keeper_registry_wrapper1_1.KeeperRegistryMetaData.GetAbi() + case contractsethereum.RegistryVersion_1_2: + contractABI, err = keeper_registry_wrapper1_2.KeeperRegistryMetaData.GetAbi() + case contractsethereum.RegistryVersion_1_3: + contractABI, err = keeper_registry_wrapper1_3.KeeperRegistryMetaData.GetAbi() + case contractsethereum.RegistryVersion_2_0: + contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + case contractsethereum.RegistryVersion_2_1: + contractABI, err = iregistry21.IKeeperRegistryMasterMetaData.GetAbi() + case contractsethereum.RegistryVersion_2_2: + contractABI, err = iregistry22.IAutomationRegistryMasterMetaData.GetAbi() + default: + contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + } + + return contractABI, err +} diff --git a/integration-tests/contracts/ethereum_contracts_automation_seth.go b/integration-tests/contracts/ethereum_contracts_automation_seth.go new file mode 100644 index 00000000000..5f5bc0eaf06 --- /dev/null +++ b/integration-tests/contracts/ethereum_contracts_automation_seth.go @@ -0,0 +1,2514 @@ +package contracts + +import ( + "context" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/smartcontractkit/seth" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/wrappers" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/arbitrum_module" + acutils "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_compatible_utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_consumer_benchmark" + automationForwarderLogic "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_forwarder_logic" + registrar21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_registrar_wrapper2_1" + registrylogica22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_registry_logic_a_wrapper_2_2" + registrylogicb22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_registry_logic_b_wrapper_2_2" + registry22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_registry_wrapper_2_2" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/chain_module_base" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2" + iregistry22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2" + ac "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_v21_plus_common" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_chain_module" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_consumer_performance_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registrar_wrapper2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_logic1_3" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_logic2_0" + registrylogica21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1" + registrylogicb21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + registry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/optimism_module" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/perform_data_checker_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/scroll_module" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/streams_lookup_upkeep_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/upkeep_counter_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/upkeep_transcoder" +) + +// EthereumUpkeepTranscoder represents the transcoder which is used to perform migrations +// of upkeeps from one registry to another. +type EthereumUpkeepTranscoder struct { + client *seth.Client + transcoder *upkeep_transcoder.UpkeepTranscoder + address *common.Address +} + +func (v *EthereumUpkeepTranscoder) Address() string { + return v.address.Hex() +} + +func DeployUpkeepTranscoder(client *seth.Client) (*EthereumUpkeepTranscoder, error) { + abi, err := upkeep_transcoder.UpkeepTranscoderMetaData.GetAbi() + if err != nil { + return &EthereumUpkeepTranscoder{}, fmt.Errorf("failed to get UpkeepTranscoder ABI: %w", err) + } + transcoderDeploymentData, err := client.DeployContract(client.NewTXOpts(), "UpkeepTranscoder", *abi, common.FromHex(upkeep_transcoder.UpkeepTranscoderMetaData.Bin)) + if err != nil { + return &EthereumUpkeepTranscoder{}, fmt.Errorf("UpkeepTranscoder instance deployment have failed: %w", err) + } + + transcoder, err := upkeep_transcoder.NewUpkeepTranscoder(transcoderDeploymentData.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumUpkeepTranscoder{}, fmt.Errorf("failed to instantiate UpkeepTranscoder instance: %w", err) + } + + return &EthereumUpkeepTranscoder{ + client: client, + transcoder: transcoder, + address: &transcoderDeploymentData.Address, + }, nil +} + +func LoadUpkeepTranscoder(client *seth.Client, address common.Address) (*EthereumUpkeepTranscoder, error) { + abi, err := upkeep_transcoder.UpkeepTranscoderMetaData.GetAbi() + if err != nil { + return &EthereumUpkeepTranscoder{}, fmt.Errorf("failed to get UpkeepTranscoder ABI: %w", err) + } + + client.ContractStore.AddABI("UpkeepTranscoder", *abi) + client.ContractStore.AddBIN("UpkeepTranscoder", common.FromHex(upkeep_transcoder.UpkeepTranscoderMetaData.Bin)) + + transcoder, err := upkeep_transcoder.NewUpkeepTranscoder(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumUpkeepTranscoder{}, fmt.Errorf("failed to instantiate UpkeepTranscoder instance: %w", err) + } + + return &EthereumUpkeepTranscoder{ + client: client, + transcoder: transcoder, + address: &address, + }, nil +} + +// EthereumKeeperRegistry represents keeper registry contract +type EthereumKeeperRegistry struct { + client *seth.Client + version ethereum.KeeperRegistryVersion + registry1_1 *keeper_registry_wrapper1_1.KeeperRegistry + registry1_2 *keeper_registry_wrapper1_2.KeeperRegistry + registry1_3 *keeper_registry_wrapper1_3.KeeperRegistry + registry2_0 *keeper_registry_wrapper2_0.KeeperRegistry + registry2_1 *i_keeper_registry_master_wrapper_2_1.IKeeperRegistryMaster + registry2_2 *i_automation_registry_master_wrapper_2_2.IAutomationRegistryMaster + chainModule *i_chain_module.IChainModule + address *common.Address + l zerolog.Logger +} + +func (v *EthereumKeeperRegistry) ReorgProtectionEnabled() bool { + chainId := v.client.ChainID + // reorg protection is disabled in polygon zkEVM and Scroll bc currently there is no way to get the block hash onchain + return v.version != ethereum.RegistryVersion_2_2 || (chainId != 1101 && chainId != 1442 && chainId != 2442 && chainId != 534352 && chainId != 534351) +} + +func (v *EthereumKeeperRegistry) ChainModuleAddress() common.Address { + if v.version == ethereum.RegistryVersion_2_2 { + return v.chainModule.Address() + } + return common.Address{} +} + +func (v *EthereumKeeperRegistry) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperRegistry) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} + +func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { + callOpts := &bind.CallOpts{ + Pending: false, + } + + switch v.version { + case ethereum.RegistryVersion_2_2: + ownerAddress, _ := v.registry2_2.Owner(callOpts) + return ownerAddress + case ethereum.RegistryVersion_2_1: + ownerAddress, _ := v.registry2_1.Owner(callOpts) + return ownerAddress + case ethereum.RegistryVersion_2_0: + ownerAddress, _ := v.registry2_0.Owner(callOpts) + return ownerAddress + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1, ethereum.RegistryVersion_1_2, ethereum.RegistryVersion_1_3: + return v.client.MustGetRootKeyAddress() + default: + return v.client.MustGetRootKeyAddress() + } +} + +func (v *EthereumKeeperRegistry) SetConfigTypeSafe(ocrConfig OCRv2Config) error { + txOpts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.SetConfigTypeSafe(txOpts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.TypedOnchainConfig21, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + )) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.SetConfigTypeSafe(txOpts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.TypedOnchainConfig22, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + )) + default: + return fmt.Errorf("SetConfigTypeSafe is not supported in keeper registry version %d", v.version) + } + + return err +} + +func (v *EthereumKeeperRegistry) SetConfig(config KeeperRegistrySettings, ocrConfig OCRv2Config) error { + txOpts := v.client.NewTXOpts() + callOpts := bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: nil, + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err := v.client.Decode(v.registry1_1.SetConfig( + txOpts, + config.PaymentPremiumPPB, + config.FlatFeeMicroLINK, + config.BlockCountPerTurn, + config.CheckGasLimit, + config.StalenessSeconds, + config.GasCeilingMultiplier, + config.FallbackGasPrice, + config.FallbackLinkPrice, + )) + return err + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(&callOpts) + if err != nil { + return err + } + + _, err = v.client.Decode(v.registry1_2.SetConfig(txOpts, keeper_registry_wrapper1_2.Config{ + PaymentPremiumPPB: config.PaymentPremiumPPB, + FlatFeeMicroLink: config.FlatFeeMicroLINK, + BlockCountPerTurn: config.BlockCountPerTurn, + CheckGasLimit: config.CheckGasLimit, + StalenessSeconds: config.StalenessSeconds, + GasCeilingMultiplier: config.GasCeilingMultiplier, + MinUpkeepSpend: config.MinUpkeepSpend, + MaxPerformGas: config.MaxPerformGas, + FallbackGasPrice: config.FallbackGasPrice, + FallbackLinkPrice: config.FallbackLinkPrice, + // Keep the transcoder and registrar same. They have separate setters + Transcoder: state.Config.Transcoder, + Registrar: state.Config.Registrar, + })) + return err + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(&callOpts) + if err != nil { + return err + } + + _, err = v.client.Decode(v.registry1_3.SetConfig(txOpts, keeper_registry_wrapper1_3.Config{ + PaymentPremiumPPB: config.PaymentPremiumPPB, + FlatFeeMicroLink: config.FlatFeeMicroLINK, + BlockCountPerTurn: config.BlockCountPerTurn, + CheckGasLimit: config.CheckGasLimit, + StalenessSeconds: config.StalenessSeconds, + GasCeilingMultiplier: config.GasCeilingMultiplier, + MinUpkeepSpend: config.MinUpkeepSpend, + MaxPerformGas: config.MaxPerformGas, + FallbackGasPrice: config.FallbackGasPrice, + FallbackLinkPrice: config.FallbackLinkPrice, + // Keep the transcoder and registrar same. They have separate setters + Transcoder: state.Config.Transcoder, + Registrar: state.Config.Registrar, + })) + return err + case ethereum.RegistryVersion_2_0: + _, err := v.client.Decode(v.registry2_0.SetConfig(txOpts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + )) + return err + case ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: + return fmt.Errorf("registry version 2.1 and 2.2 must use setConfigTypeSafe function") + default: + return fmt.Errorf("keeper registry version %d is not supported", v.version) + } +} + +func (v *EthereumKeeperRegistry) SetUpkeepOffchainConfig(id *big.Int, offchainConfig []byte) error { + switch v.version { + case ethereum.RegistryVersion_2_0: + _, err := v.client.Decode(v.registry2_0.SetUpkeepOffchainConfig(v.client.NewTXOpts(), id, offchainConfig)) + return err + case ethereum.RegistryVersion_2_1: + _, err := v.client.Decode(v.registry2_1.SetUpkeepOffchainConfig(v.client.NewTXOpts(), id, offchainConfig)) + return err + case ethereum.RegistryVersion_2_2: + _, err := v.client.Decode(v.registry2_2.SetUpkeepOffchainConfig(v.client.NewTXOpts(), id, offchainConfig)) + return err + default: + return fmt.Errorf("SetUpkeepOffchainConfig is not supported by keeper registry version %d", v.version) + } +} + +// Pause pauses the registry. +func (v *EthereumKeeperRegistry) Pause() error { + txOpts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err = v.client.Decode(v.registry1_1.Pause(txOpts)) + case ethereum.RegistryVersion_1_2: + _, err = v.client.Decode(v.registry1_2.Pause(txOpts)) + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.Pause(txOpts)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.Pause(txOpts)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.Pause(txOpts)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.Pause(txOpts)) + default: + return fmt.Errorf("keeper registry version %d is not supported", v.version) + } + + return err +} + +// Migrate performs a migration of the given upkeep ids to the specific destination passed as parameter. +func (v *EthereumKeeperRegistry) Migrate(upkeepIDs []*big.Int, destinationAddress common.Address) error { + if v.version != ethereum.RegistryVersion_1_2 { + return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") + } + + _, err := v.client.Decode(v.registry1_2.MigrateUpkeeps(v.client.NewTXOpts(), upkeepIDs, destinationAddress)) + return err +} + +// SetMigrationPermissions sets the permissions of another registry to allow migrations between the two. +func (v *EthereumKeeperRegistry) SetMigrationPermissions(peerAddress common.Address, permission uint8) error { + if v.version != ethereum.RegistryVersion_1_2 { + return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") + } + + _, err := v.client.Decode(v.registry1_2.SetPeerRegistryMigrationPermission(v.client.NewTXOpts(), peerAddress, permission)) + return err +} + +func (v *EthereumKeeperRegistry) SetRegistrar(registrarAddr string) error { + if v.version == ethereum.RegistryVersion_2_0 { + // we short circuit and exit, so we don't create a new txs messing up the nonce before exiting + return fmt.Errorf("please use set config") + } + + txOpts := v.client.NewTXOpts() + callOpts := bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: nil, + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err := v.client.Decode(v.registry1_1.SetRegistrar(txOpts, common.HexToAddress(registrarAddr))) + return err + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(&callOpts) + if err != nil { + return err + } + newConfig := state.Config + newConfig.Registrar = common.HexToAddress(registrarAddr) + _, err = v.client.Decode(v.registry1_2.SetConfig(txOpts, newConfig)) + return err + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(&callOpts) + if err != nil { + return err + } + newConfig := state.Config + newConfig.Registrar = common.HexToAddress(registrarAddr) + _, err = v.client.Decode(v.registry1_3.SetConfig(txOpts, newConfig)) + return err + default: + return fmt.Errorf("keeper registry version %d is not supported", v.version) + } +} + +// AddUpkeepFunds adds link for particular upkeep id +func (v *EthereumKeeperRegistry) AddUpkeepFundsFromKey(id *big.Int, amount *big.Int, keyNum int) error { + opts := v.client.NewTXKeyOpts(keyNum) + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err = v.client.Decode(v.registry1_1.AddFunds(opts, id, amount)) + case ethereum.RegistryVersion_1_2: + _, err = v.client.Decode(v.registry1_2.AddFunds(opts, id, amount)) + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.AddFunds(opts, id, amount)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.AddFunds(opts, id, amount)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.AddFunds(opts, id, amount)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.AddFunds(opts, id, amount)) + } + + return err +} + +// AddUpkeepFunds adds link for particular upkeep id +func (v *EthereumKeeperRegistry) AddUpkeepFunds(id *big.Int, amount *big.Int) error { + return v.AddUpkeepFundsFromKey(id, amount, 0) +} + +// GetUpkeepInfo gets upkeep info +func (v *EthereumKeeperRegistry) GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + uk, err := v.registry1_1.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_1_2: + uk, err := v.registry1_2.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_1_3: + uk, err := v.registry1_3.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_2_0: + uk, err := v.registry2_0.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + LastPerformBlockNumber: uk.LastPerformBlockNumber, + AmountSpent: uk.AmountSpent, + Paused: uk.Paused, + OffchainConfig: uk.OffchainConfig, + }, nil + case ethereum.RegistryVersion_2_1: + uk, err := v.registry2_1.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.PerformGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + LastPerformBlockNumber: uk.LastPerformedBlockNumber, + AmountSpent: uk.AmountSpent, + Paused: uk.Paused, + OffchainConfig: uk.OffchainConfig, + }, nil + case ethereum.RegistryVersion_2_2: + return v.getUpkeepInfo22(opts, id) + } + + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +func (v *EthereumKeeperRegistry) getUpkeepInfo22(opts *bind.CallOpts, id *big.Int) (*UpkeepInfo, error) { + uk, err := v.registry2_2.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.PerformGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + LastPerformBlockNumber: uk.LastPerformedBlockNumber, + AmountSpent: uk.AmountSpent, + Paused: uk.Paused, + OffchainConfig: uk.OffchainConfig, + }, nil +} + +func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + var info struct { + Payee common.Address + Active bool + Balance *big.Int + } + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + info, err = v.registry1_1.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_1_2: + info, err = v.registry1_2.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_1_3: + info, err = v.registry1_3.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_2_0, ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: + // this is not used anywhere + return nil, fmt.Errorf("not supported") + } + + if err != nil { + return nil, err + } + return &KeeperInfo{ + Payee: info.Payee.Hex(), + Active: info.Active, + Balance: info.Balance, + }, nil +} + +func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error { + opts := v.client.NewTXOpts() + var err error + + keepersAddresses := make([]common.Address, 0) + for _, k := range keepers { + keepersAddresses = append(keepersAddresses, common.HexToAddress(k)) + } + payeesAddresses := make([]common.Address, 0) + for _, p := range payees { + payeesAddresses = append(payeesAddresses, common.HexToAddress(p)) + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err = v.client.Decode(v.registry1_1.SetKeepers(opts, keepersAddresses, payeesAddresses)) + case ethereum.RegistryVersion_1_2: + _, err = v.client.Decode(v.registry1_2.SetKeepers(opts, keepersAddresses, payeesAddresses)) + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.SetKeepers(opts, keepersAddresses, payeesAddresses)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.SetConfig(opts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + )) + case ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: + return fmt.Errorf("not supported") + } + + return err +} + +// RegisterUpkeep registers contract to perform upkeep +func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, admin string, checkData []byte) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + _, err = v.client.Decode(v.registry1_1.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + )) + case ethereum.RegistryVersion_1_2: + _, err = v.client.Decode(v.registry1_2.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + )) + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + )) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + nil, //offchain config + )) + case ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: + return fmt.Errorf("not supported") + } + + return err +} + +// CancelUpkeep cancels the given upkeep ID +func (v *EthereumKeeperRegistry) CancelUpkeep(id *big.Int) error { + opts := v.client.NewTXOpts() + var err error + var tx *seth.DecodedTransaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.client.Decode(v.registry1_1.CancelUpkeep(opts, id)) + case ethereum.RegistryVersion_1_2: + tx, err = v.client.Decode(v.registry1_2.CancelUpkeep(opts, id)) + case ethereum.RegistryVersion_1_3: + tx, err = v.client.Decode(v.registry1_3.CancelUpkeep(opts, id)) + case ethereum.RegistryVersion_2_0: + tx, err = v.client.Decode(v.registry2_0.CancelUpkeep(opts, id)) + case ethereum.RegistryVersion_2_1: + tx, err = v.client.Decode(v.registry2_1.CancelUpkeep(opts, id)) + case ethereum.RegistryVersion_2_2: + tx, err = v.client.Decode(v.registry2_2.CancelUpkeep(opts, id)) + } + + txHash := "none" + if err == nil && tx != nil { + txHash = tx.Hash + } + + v.l.Info(). + Str("Upkeep ID", strconv.FormatInt(id.Int64(), 10)). + Str("From", v.client.MustGetRootKeyAddress().Hex()). + Str("TX Hash", txHash). + Msg("Cancel Upkeep tx") + + return err +} + +// SetUpkeepGasLimit sets the perform gas limit for a given upkeep ID +func (v *EthereumKeeperRegistry) SetUpkeepGasLimit(id *big.Int, gas uint32) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_2: + _, err = v.client.Decode(v.registry1_2.SetUpkeepGasLimit(opts, id, gas)) + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.SetUpkeepGasLimit(opts, id, gas)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.SetUpkeepGasLimit(opts, id, gas)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.SetUpkeepGasLimit(opts, id, gas)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.SetUpkeepGasLimit(opts, id, gas)) + default: + return fmt.Errorf("keeper registry version %d is not supported for SetUpkeepGasLimit", v.version) + } + + return err +} + +// GetKeeperList get list of all registered keeper addresses +func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + var list []common.Address + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + list, err = v.registry1_1.GetKeeperList(opts) + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Keepers + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Keepers + case ethereum.RegistryVersion_2_0: + state, err := v.registry2_0.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Transmitters + case ethereum.RegistryVersion_2_1, ethereum.RegistryVersion_2_2: + return nil, fmt.Errorf("not supported") + } + + if err != nil { + return []string{}, err + } + addrs := make([]string, 0) + for _, ca := range list { + addrs = append(addrs, ca.Hex()) + } + return addrs, nil +} + +// UpdateCheckData updates the check data of an upkeep +func (v *EthereumKeeperRegistry) UpdateCheckData(id *big.Int, newCheckData []byte) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.UpdateCheckData(opts, id, newCheckData)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.UpdateCheckData(opts, id, newCheckData)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.SetUpkeepCheckData(opts, id, newCheckData)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.SetUpkeepCheckData(opts, id, newCheckData)) + default: + return fmt.Errorf("UpdateCheckData is not supported by keeper registry version %d", v.version) + } + + return err +} + +// SetUpkeepTriggerConfig updates the trigger config of an upkeep (only for version 2.1) +func (v *EthereumKeeperRegistry) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.SetUpkeepTriggerConfig(opts, id, triggerConfig)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.SetUpkeepTriggerConfig(opts, id, triggerConfig)) + default: + return fmt.Errorf("SetUpkeepTriggerConfig is not supported by keeper registry version %d", v.version) + } + + return err +} + +// SetUpkeepPrivilegeConfig sets the privilege config of an upkeep (only for version 2.1) +func (v *EthereumKeeperRegistry) SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.SetUpkeepPrivilegeConfig(opts, id, privilegeConfig)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.SetUpkeepPrivilegeConfig(opts, id, privilegeConfig)) + default: + return fmt.Errorf("SetUpkeepPrivilegeConfig is not supported by keeper registry version %d", v.version) + } + + return err +} + +// PauseUpkeep stops an upkeep from an upkeep +func (v *EthereumKeeperRegistry) PauseUpkeep(id *big.Int) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.PauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.PauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.PauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.PauseUpkeep(opts, id)) + default: + return fmt.Errorf("PauseUpkeep is not supported by keeper registry version %d", v.version) + } + + return err +} + +// UnpauseUpkeep get list of all registered keeper addresses +func (v *EthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { + opts := v.client.NewTXOpts() + var err error + + switch v.version { + case ethereum.RegistryVersion_1_3: + _, err = v.client.Decode(v.registry1_3.UnpauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_0: + _, err = v.client.Decode(v.registry2_0.UnpauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_1: + _, err = v.client.Decode(v.registry2_1.UnpauseUpkeep(opts, id)) + case ethereum.RegistryVersion_2_2: + _, err = v.client.Decode(v.registry2_2.UnpauseUpkeep(opts, id)) + default: + return fmt.Errorf("UnpauseUpkeep is not supported by keeper registry version %d", v.version) + } + + return err +} + +// Parses upkeep performed log +func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) { + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + parsedLog, err := v.registry1_1.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_1_2: + parsedLog, err := v.registry1_2.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_1_3: + parsedLog, err := v.registry1_3.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: utils.ZeroAddress, + }, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: utils.ZeroAddress, + }, nil + case ethereum.RegistryVersion_2_2: + parsedLog, err := v.registry2_2.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: utils.ZeroAddress, + }, nil + } + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// ParseStaleUpkeepReportLog Parses Stale upkeep report log +func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) { + //nolint:exhaustive + switch v.version { + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseStaleUpkeepReport(*log) + if err != nil { + return nil, err + } + return &StaleUpkeepReportLog{ + Id: parsedLog.Id, + }, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseStaleUpkeepReport(*log) + if err != nil { + return nil, err + } + return &StaleUpkeepReportLog{ + Id: parsedLog.Id, + }, nil + case ethereum.RegistryVersion_2_2: + parsedLog, err := v.registry2_2.ParseStaleUpkeepReport(*log) + if err != nil { + return nil, err + } + return &StaleUpkeepReportLog{ + Id: parsedLog.Id, + }, nil + } + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// Parses the upkeep ID from an 'UpkeepRegistered' log, returns error on any other log +func (v *EthereumKeeperRegistry) ParseUpkeepIdFromRegisteredLog(log *types.Log) (*big.Int, error) { + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + parsedLog, err := v.registry1_1.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_1_2: + parsedLog, err := v.registry1_2.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_1_3: + parsedLog, err := v.registry1_3.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_2_2: + parsedLog, err := v.registry2_2.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + } + + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +func DeployKeeperRegistry( + client *seth.Client, + opts *KeeperRegistryOpts, +) (KeeperRegistry, error) { + var mode uint8 + switch client.ChainID { + //Arbitrum payment model + case networks.ArbitrumMainnet.ChainID, networks.ArbitrumSepolia.ChainID: + mode = uint8(1) + //Optimism payment model + case networks.OptimismMainnet.ChainID, networks.OptimismSepolia.ChainID: + mode = uint8(2) + //Base + case networks.BaseMainnet.ChainID, networks.BaseSepolia.ChainID: + mode = uint8(2) + default: + mode = uint8(0) + } + registryGasOverhead := big.NewInt(80000) + switch opts.RegistryVersion { + case eth_contracts.RegistryVersion_1_0, eth_contracts.RegistryVersion_1_1: + return deployRegistry10_11(client, opts) + case eth_contracts.RegistryVersion_1_2: + return deployRegistry12(client, opts) + case eth_contracts.RegistryVersion_1_3: + return deployRegistry13(client, opts, mode, registryGasOverhead) + case eth_contracts.RegistryVersion_2_0: + return deployRegistry20(client, opts, mode) + case eth_contracts.RegistryVersion_2_1: + return deployRegistry21(client, opts, mode) + case eth_contracts.RegistryVersion_2_2: + return deployRegistry22(client, opts) + default: + return nil, fmt.Errorf("keeper registry version %d is not supported", opts.RegistryVersion) + } +} + +func deployRegistry10_11(client *seth.Client, opts *KeeperRegistryOpts) (KeeperRegistry, error) { + abi, err := keeper_registry_wrapper1_1.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_1 ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistry1_1", *abi, common.FromHex(keeper_registry_wrapper1_1.KeeperRegistryMetaData.Bin), + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + opts.Settings.PaymentPremiumPPB, + opts.Settings.FlatFeeMicroLINK, + opts.Settings.BlockCountPerTurn, + opts.Settings.CheckGasLimit, + opts.Settings.StalenessSeconds, + opts.Settings.GasCeilingMultiplier, + opts.Settings.FallbackGasPrice, + opts.Settings.FallbackLinkPrice, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistry1_1 instance deployment have failed: %w", err) + } + + instance, err := keeper_registry_wrapper1_1.NewKeeperRegistry(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_1 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_1_1, + registry1_1: instance, + registry1_2: nil, + registry1_3: nil, + address: &data.Address, + }, err +} + +func deployRegistry12(client *seth.Client, opts *KeeperRegistryOpts) (KeeperRegistry, error) { + abi, err := keeper_registry_wrapper1_2.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_2 ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistry1_2", *abi, common.FromHex(keeper_registry_wrapper1_2.KeeperRegistryMetaData.Bin), + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + keeper_registry_wrapper1_2.Config{ + PaymentPremiumPPB: opts.Settings.PaymentPremiumPPB, + FlatFeeMicroLink: opts.Settings.FlatFeeMicroLINK, + BlockCountPerTurn: opts.Settings.BlockCountPerTurn, + CheckGasLimit: opts.Settings.CheckGasLimit, + StalenessSeconds: opts.Settings.StalenessSeconds, + GasCeilingMultiplier: opts.Settings.GasCeilingMultiplier, + MinUpkeepSpend: opts.Settings.MinUpkeepSpend, + MaxPerformGas: opts.Settings.MaxPerformGas, + FallbackGasPrice: opts.Settings.FallbackGasPrice, + FallbackLinkPrice: opts.Settings.FallbackLinkPrice, + Transcoder: common.HexToAddress(opts.TranscoderAddr), + Registrar: common.HexToAddress(opts.RegistrarAddr), + }, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistry1_2 instance deployment have failed: %w", err) + } + + instance, err := keeper_registry_wrapper1_2.NewKeeperRegistry(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_2 instance: %w", err) + } + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_1_2, + registry1_1: nil, + registry1_2: instance, + registry1_3: nil, + address: &data.Address, + }, err +} + +func deployRegistry13(client *seth.Client, opts *KeeperRegistryOpts, mode uint8, registryGasOverhead *big.Int) (KeeperRegistry, error) { + logicAbi, err := keeper_registry_logic1_3.KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistryLogic1_3 ABI: %w", err) + } + logicData, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistryLogic1_3", *logicAbi, common.FromHex(keeper_registry_logic1_3.KeeperRegistryLogicMetaData.Bin), + mode, // Default payment model + registryGasOverhead, // Registry gas overhead + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistryLogic1_3 instance deployment have failed: %w", err) + } + + abi, err := keeper_registry_wrapper1_3.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_3 ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistry1_3", *abi, common.FromHex(keeper_registry_wrapper1_3.KeeperRegistryMetaData.Bin), + logicData.Address, + keeper_registry_wrapper1_3.Config{ + PaymentPremiumPPB: opts.Settings.PaymentPremiumPPB, + FlatFeeMicroLink: opts.Settings.FlatFeeMicroLINK, + BlockCountPerTurn: opts.Settings.BlockCountPerTurn, + CheckGasLimit: opts.Settings.CheckGasLimit, + StalenessSeconds: opts.Settings.StalenessSeconds, + GasCeilingMultiplier: opts.Settings.GasCeilingMultiplier, + MinUpkeepSpend: opts.Settings.MinUpkeepSpend, + MaxPerformGas: opts.Settings.MaxPerformGas, + FallbackGasPrice: opts.Settings.FallbackGasPrice, + FallbackLinkPrice: opts.Settings.FallbackLinkPrice, + Transcoder: common.HexToAddress(opts.TranscoderAddr), + Registrar: common.HexToAddress(opts.RegistrarAddr), + }, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistry1_3 instance deployment have failed: %w", err) + } + + instance, err := keeper_registry_wrapper1_3.NewKeeperRegistry(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_3 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_1_3, + registry1_1: nil, + registry1_2: nil, + registry1_3: instance, + address: &data.Address, + }, err +} + +func deployRegistry20(client *seth.Client, opts *KeeperRegistryOpts, mode uint8) (KeeperRegistry, error) { + logicAbi, err := keeper_registry_logic2_0.KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistryLogic2_0 ABI: %w", err) + } + logicData, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistryLogic2_0", *logicAbi, common.FromHex(keeper_registry_logic2_0.KeeperRegistryLogicMetaData.Bin), + mode, // Default payment model + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistryLogic2_0 instance deployment have failed: %w", err) + } + + abi, err := keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_3 ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistry2_0", *abi, common.FromHex(keeper_registry_wrapper2_0.KeeperRegistryMetaData.Bin), + logicData.Address, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistry2_0 instance deployment have failed: %w", err) + } + + instance, err := keeper_registry_wrapper2_0.NewKeeperRegistry(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry2_0 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_2_0, + registry2_0: instance, + address: &data.Address, + }, err +} + +func deployRegistry21(client *seth.Client, opts *KeeperRegistryOpts, mode uint8) (KeeperRegistry, error) { + automationForwarderLogicAddr, err := deployAutomationForwarderLogicSeth(client) + if err != nil { + return nil, err + } + + logicBAbi, err := registrylogicb21.KeeperRegistryLogicBMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistryLogicB2_1 ABI: %w", err) + } + logicBData, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistryLogicB2_1", *logicBAbi, common.FromHex(registrylogicb21.KeeperRegistryLogicBMetaData.Bin), + mode, + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + automationForwarderLogicAddr, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistryLogicB2_1 instance deployment have failed: %w", err) + } + + logicAAbi, err := registrylogica21.KeeperRegistryLogicAMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistryLogicA2_1 ABI: %w", err) + } + logicAData, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistryLogicA2_1", *logicAAbi, common.FromHex(registrylogica21.KeeperRegistryLogicAMetaData.Bin), + logicBData.Address, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistryLogicA2_1 instance deployment have failed: %w", err) + } + + abi, err := registry21.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry2_1 ABI: %w", err) + } + + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistry2_1", *abi, common.FromHex(registry21.KeeperRegistryMetaData.Bin), + logicAData.Address, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("KeeperRegistry2_1 instance deployment have failed: %w", err) + } + + instance, err := iregistry21.NewIKeeperRegistryMaster(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry2_1 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_2_1, + registry2_1: instance, + address: &data.Address, + }, err +} + +func deployRegistry22(client *seth.Client, opts *KeeperRegistryOpts) (KeeperRegistry, error) { + var chainModuleAddr common.Address + var err error + chainId := client.ChainID + + if chainId == networks.ScrollMainnet.ChainID || chainId == networks.ScrollSepolia.ChainID { + chainModuleAddr, err = deployScrollModule(client) + } else if chainId == networks.ArbitrumMainnet.ChainID || chainId == networks.ArbitrumSepolia.ChainID { + chainModuleAddr, err = deployArbitrumModule(client) + } else if chainId == networks.OptimismMainnet.ChainID || chainId == networks.OptimismSepolia.ChainID { + chainModuleAddr, err = deployOptimismModule(client) + } else { + chainModuleAddr, err = deployBaseModule(client) + } + if err != nil { + return nil, err + } + + automationForwarderLogicAddr, err := deployAutomationForwarderLogicSeth(client) + if err != nil { + return nil, err + } + + var allowedReadOnlyAddress common.Address + if chainId == networks.PolygonZkEvmMainnet.ChainID || chainId == networks.PolygonZkEvmCardona.ChainID { + allowedReadOnlyAddress = common.HexToAddress("0x1111111111111111111111111111111111111111") + } else { + allowedReadOnlyAddress = common.HexToAddress("0x0000000000000000000000000000000000000000") + } + + logicBAbi, err := registrylogicb22.AutomationRegistryLogicBMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get AutomationRegistryLogicB2_2 ABI: %w", err) + } + + logicBData, err := client.DeployContract(client.NewTXOpts(), "AutomationRegistryLogicB2_2", *logicBAbi, common.FromHex(registrylogicb22.AutomationRegistryLogicBMetaData.Bin), + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + automationForwarderLogicAddr, + allowedReadOnlyAddress, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("AutomationRegistryLogicB2_2 instance deployment have failed: %w", err) + } + + logicAAbi, err := registrylogica22.AutomationRegistryLogicAMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get AutomationRegistryLogicA2_2 ABI: %w", err) + } + logicAData, err := client.DeployContract(client.NewTXOpts(), "AutomationRegistryLogicA2_2", *logicAAbi, common.FromHex(registrylogica22.AutomationRegistryLogicAMetaData.Bin), + logicBData.Address, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("AutomationRegistryLogicA2_2 instance deployment have failed: %w", err) + } + + abi, err := registry22.AutomationRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get AutomationRegistry2_2 ABI: %w", err) + } + + data, err := client.DeployContract(client.NewTXOpts(), "AutomationRegistry2_2", *abi, common.FromHex(registry22.AutomationRegistryMetaData.Bin), + logicAData.Address, + ) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("AutomationRegistry2_2 instance deployment have failed: %w", err) + } + + instance, err := iregistry22.NewIAutomationRegistryMaster(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate AutomationRegistry2_2 instance: %w", err) + } + + chainModule, err := i_chain_module.NewIChainModule( + chainModuleAddr, + wrappers.MustNewWrappedContractBackend(nil, client), + ) + + return &EthereumKeeperRegistry{ + client: client, + version: eth_contracts.RegistryVersion_2_2, + registry2_2: instance, + chainModule: chainModule, + address: &data.Address, + }, err +} + +// LoadKeeperRegistry returns deployed on given address EthereumKeeperRegistry +func LoadKeeperRegistry(l zerolog.Logger, client *seth.Client, address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistry, error) { + var keeper *EthereumKeeperRegistry + var err error + switch registryVersion { + case eth_contracts.RegistryVersion_1_1: + keeper, err = loadRegistry1_1(client, address) + case eth_contracts.RegistryVersion_1_2: + keeper, err = loadRegistry1_2(client, address) + case eth_contracts.RegistryVersion_1_3: + keeper, err = loadRegistry1_3(client, address) + case eth_contracts.RegistryVersion_2_0: + keeper, err = loadRegistry2_0(client, address) + case eth_contracts.RegistryVersion_2_1: + keeper, err = loadRegistry2_1(client, address) + case eth_contracts.RegistryVersion_2_2: // why the contract name is not the same as the actual contract name? + keeper, err = loadRegistry2_2(client, address) + default: + return nil, fmt.Errorf("keeper registry version %d is not supported", registryVersion) + } + + if keeper != nil { + keeper.version = registryVersion + keeper.l = l + } + return keeper, err +} + +func loadRegistry1_1(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := keeper_registry_wrapper1_1.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_1 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistry1_1", *abi) + client.ContractStore.AddBIN("KeeperRegistry1_1", common.FromHex(keeper_registry_wrapper1_1.KeeperRegistryMetaData.Bin)) + + instance, err := keeper_registry_wrapper1_1.NewKeeperRegistry(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_1 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry1_1: instance, + }, nil +} + +func loadRegistry1_2(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := keeper_registry_wrapper1_2.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_2 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistry1_2", *abi) + client.ContractStore.AddBIN("KeeperRegistry1_2", common.FromHex(keeper_registry_wrapper1_2.KeeperRegistryMetaData.Bin)) + + instance, err := keeper_registry_wrapper1_2.NewKeeperRegistry(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_2 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry1_2: instance, + }, nil +} + +func loadRegistry1_3(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := keeper_registry_wrapper1_3.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry1_3 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistry1_3", *abi) + client.ContractStore.AddBIN("KeeperRegistry1_3", common.FromHex(keeper_registry_wrapper1_3.KeeperRegistryMetaData.Bin)) + + instance, err := keeper_registry_wrapper1_3.NewKeeperRegistry(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry1_3 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry1_3: instance, + }, nil +} + +func loadRegistry2_0(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry2_0 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistry2_0", *abi) + client.ContractStore.AddBIN("KeeperRegistry2_0", common.FromHex(keeper_registry_wrapper2_0.KeeperRegistryMetaData.Bin)) + + instance, err := keeper_registry_wrapper2_0.NewKeeperRegistry(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry2_0 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry2_0: instance, + }, nil +} + +func loadRegistry2_1(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := ac.IAutomationV21PlusCommonMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get KeeperRegistry2_1 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistry2_1", *abi) + client.ContractStore.AddBIN("KeeperRegistry2_1", common.FromHex(ac.IAutomationV21PlusCommonMetaData.Bin)) + + var instance interface{} + + instance, err = ac.NewIAutomationV21PlusCommon(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate KeeperRegistry2_1 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry2_1: instance.(*iregistry21.IKeeperRegistryMaster), + }, nil +} + +func loadRegistry2_2(client *seth.Client, address common.Address) (*EthereumKeeperRegistry, error) { + abi, err := iregistry22.IAutomationRegistryMasterMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to get AutomationRegistry2_2 ABI: %w", err) + } + + client.ContractStore.AddABI("AutomationRegistry2_2", *abi) + client.ContractStore.AddBIN("AutomationRegistry2_2", common.FromHex(iregistry22.IAutomationRegistryMasterMetaData.Bin)) + + instance, err := iregistry22.NewIAutomationRegistryMaster(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistry{}, fmt.Errorf("failed to instantiate AutomationRegistry2_2 instance: %w", err) + } + + return &EthereumKeeperRegistry{ + address: &address, + client: client, + registry2_2: instance, + }, nil +} + +func deployAutomationForwarderLogicSeth(client *seth.Client) (common.Address, error) { + abi, err := automationForwarderLogic.AutomationForwarderLogicMetaData.GetAbi() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get AutomationForwarderLogic ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "AutomationForwarderLogic", *abi, common.FromHex(automationForwarderLogic.AutomationForwarderLogicMetaData.Bin)) + if err != nil { + return common.Address{}, fmt.Errorf("AutomationForwarderLogic instance deployment have failed: %w", err) + } + + return data.Address, nil +} + +func deployScrollModule(client *seth.Client) (common.Address, error) { + abi, err := scroll_module.ScrollModuleMetaData.GetAbi() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get ScrollModule ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "ScrollModule", *abi, common.FromHex(scroll_module.ScrollModuleMetaData.Bin)) + if err != nil { + return common.Address{}, fmt.Errorf("ScrollModule instance deployment have failed: %w", err) + } + + return data.Address, nil +} + +func deployArbitrumModule(client *seth.Client) (common.Address, error) { + abi, err := arbitrum_module.ArbitrumModuleMetaData.GetAbi() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get ArbitrumModule ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "ArbitrumModule", *abi, common.FromHex(arbitrum_module.ArbitrumModuleMetaData.Bin)) + if err != nil { + return common.Address{}, fmt.Errorf("ArbitrumModule instance deployment have failed: %w", err) + } + + return data.Address, nil +} + +func deployOptimismModule(client *seth.Client) (common.Address, error) { + abi, err := optimism_module.OptimismModuleMetaData.GetAbi() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get OptimismModule ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "OptimismModule", *abi, common.FromHex(optimism_module.OptimismModuleMetaData.Bin)) + if err != nil { + return common.Address{}, fmt.Errorf("OptimismModule instance deployment have failed: %w", err) + } + + return data.Address, nil +} + +func deployBaseModule(client *seth.Client) (common.Address, error) { + abi, err := chain_module_base.ChainModuleBaseMetaData.GetAbi() + if err != nil { + return common.Address{}, fmt.Errorf("failed to get BaseModule ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "BaseModule", *abi, common.FromHex(chain_module_base.ChainModuleBaseMetaData.Bin)) + if err != nil { + return common.Address{}, fmt.Errorf("BaseModule instance deployment have failed: %w", err) + } + + return data.Address, nil +} + +// EthereumKeeperRegistrar corresponds to the registrar which is used to send requests to the registry when +// registering new upkeeps. +type EthereumKeeperRegistrar struct { + client *seth.Client + registrar *keeper_registrar_wrapper1_2.KeeperRegistrar + registrar20 *keeper_registrar_wrapper2_0.KeeperRegistrar + registrar21 *registrar21.AutomationRegistrar + address *common.Address +} + +func (v *EthereumKeeperRegistrar) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperRegistrar) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} + +// EncodeRegisterRequest encodes register request to call it through link token TransferAndCall +func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byte, upkeepAddr string, gasLimit uint32, adminAddr string, checkData []byte, amount *big.Int, source uint8, senderAddr string, isLogTrigger bool, isMercury bool) ([]byte, error) { + if v.registrar20 != nil { + registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.ABI)) + if err != nil { + return nil, err + } + req, err := registryABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + checkData, + []byte{}, //offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + + if err != nil { + return nil, err + } + return req, nil + } else if v.registrar21 != nil { + if isLogTrigger { + var topic0InBytes [32]byte + // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000 + bytes0 := [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + if isMercury { + // bytes representation of 0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd + topic0InBytes = [32]byte{209, 255, 233, 228, 85, 129, 193, 29, 125, 159, 46, 213, 247, 82, 23, 205, 75, 233, 248, 183, 238, 230, 175, 15, 109, 3, 244, 109, 229, 57, 86, 205} + } else { + // bytes representation of 0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d + topic0InBytes = [32]byte{ + 61, 83, 163, 149, 80, 224, 70, 136, + 6, 88, 39, 243, 187, 134, 88, 76, + 176, 7, 171, 158, 188, 167, 235, + 213, 40, 231, 48, 28, 156, 49, 235, 93, + } + } + + logTriggerConfigStruct := acutils.IAutomationV21PlusCommonLogTriggerConfig{ + ContractAddress: common.HexToAddress(upkeepAddr), + FilterSelector: 0, + Topic0: topic0InBytes, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := compatibleUtils.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return nil, err + } + + req, err := registrarABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + uint8(1), // trigger type + checkData, + encodedLogTriggerConfig, // triggerConfig + []byte{}, // offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + + return req, err + } + req, err := registrarABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + uint8(0), // trigger type + checkData, + []byte{}, // triggerConfig + []byte{}, // offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + return req, err + } + registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.ABI)) + if err != nil { + return nil, err + } + req, err := registryABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + checkData, + amount, + source, + common.HexToAddress(senderAddr), + ) + if err != nil { + return nil, err + } + return req, nil +} + +func DeployKeeperRegistrar(client *seth.Client, registryVersion eth_contracts.KeeperRegistryVersion, linkAddr string, registrarSettings KeeperRegistrarSettings) (KeeperRegistrar, error) { + if registryVersion == eth_contracts.RegistryVersion_2_0 { + abi, err := keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar2_0 ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistrar2_0", *abi, common.FromHex(keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.Bin), + common.HexToAddress(linkAddr), + registrarSettings.AutoApproveConfigType, + registrarSettings.AutoApproveMaxAllowed, + common.HexToAddress(registrarSettings.RegistryAddr), + registrarSettings.MinLinkJuels, + ) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("KeeperRegistrar2_0 instance deployment have failed: %w", err) + } + + instance, err := keeper_registrar_wrapper2_0.NewKeeperRegistrar(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar2_0 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + client: client, + registrar20: instance, + address: &data.Address, + }, nil + } else if registryVersion == eth_contracts.RegistryVersion_2_1 || registryVersion == eth_contracts.RegistryVersion_2_2 { // both 2.1 and 2.2 registry use registrar 2.1 + abi, err := registrar21.AutomationRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar2_1 ABI: %w", err) + } + // set default TriggerType to 0(conditional), AutoApproveConfigType to 2(auto approve enabled), AutoApproveMaxAllowed to 1000 + triggerConfigs := []registrar21.AutomationRegistrar21InitialTriggerConfig{ + {TriggerType: 0, AutoApproveType: registrarSettings.AutoApproveConfigType, + AutoApproveMaxAllowed: uint32(registrarSettings.AutoApproveMaxAllowed)}, + {TriggerType: 1, AutoApproveType: registrarSettings.AutoApproveConfigType, + AutoApproveMaxAllowed: uint32(registrarSettings.AutoApproveMaxAllowed)}, + } + + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistrar2_1", *abi, common.FromHex(registrar21.AutomationRegistrarMetaData.Bin), + common.HexToAddress(linkAddr), + common.HexToAddress(registrarSettings.RegistryAddr), + registrarSettings.MinLinkJuels, + triggerConfigs, + ) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("KeeperRegistrar2_1 instance deployment have failed: %w", err) + } + + instance, err := registrar21.NewAutomationRegistrar(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar2_1 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + client: client, + registrar21: instance, + address: &data.Address, + }, nil + } + + // non OCR registrar + abi, err := keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar1_2 ABI: %w", err) + } + + data, err := client.DeployContract(client.NewTXOpts(), "KeeperRegistrar1_2", *abi, common.FromHex(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.Bin), + common.HexToAddress(linkAddr), + registrarSettings.AutoApproveConfigType, + registrarSettings.AutoApproveMaxAllowed, + common.HexToAddress(registrarSettings.RegistryAddr), + registrarSettings.MinLinkJuels, + ) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("KeeperRegistrar1_2 instance deployment have failed: %w", err) + } + + instance, err := keeper_registrar_wrapper1_2.NewKeeperRegistrar(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar1_2 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + client: client, + registrar: instance, + address: &data.Address, + }, nil +} + +// LoadKeeperRegistrar returns deployed on given address EthereumKeeperRegistrar +func LoadKeeperRegistrar(client *seth.Client, address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistrar, error) { + if registryVersion == eth_contracts.RegistryVersion_1_1 || registryVersion == eth_contracts.RegistryVersion_1_2 || + registryVersion == eth_contracts.RegistryVersion_1_3 { + + abi, err := keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar1_2 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistrar1_2", *abi) + client.ContractStore.AddBIN("KeeperRegistrar1_2", common.FromHex(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.Bin)) + + instance, err := keeper_registrar_wrapper1_2.NewKeeperRegistrar(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar1_2 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + address: &address, + client: client, + registrar: instance, + }, err + } else if registryVersion == eth_contracts.RegistryVersion_2_0 { + abi, err := keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar2_0 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistrar2_0", *abi) + client.ContractStore.AddBIN("KeeperRegistrar2_0", common.FromHex(keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.Bin)) + + instance, err := keeper_registrar_wrapper2_0.NewKeeperRegistrar(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar2_0 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + address: &address, + client: client, + registrar20: instance, + }, nil + } + + abi, err := registrar21.AutomationRegistrarMetaData.GetAbi() + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to get KeeperRegistrar2_1 ABI: %w", err) + } + + client.ContractStore.AddABI("KeeperRegistrar2_1", *abi) + client.ContractStore.AddBIN("KeeperRegistrar2_1", common.FromHex(registrar21.AutomationRegistrarMetaData.Bin)) + + instance, err := registrar21.NewAutomationRegistrar(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperRegistrar{}, fmt.Errorf("failed to instantiate KeeperRegistrar2_1 instance: %w", err) + } + + return &EthereumKeeperRegistrar{ + address: &address, + client: client, + registrar21: instance, + }, nil +} + +type EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer struct { + client *seth.Client + consumer *log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup + address *common.Address +} + +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Address() string { + return v.address.Hex() +} + +// Kick off the log trigger event. The contract uses Mercury v0.2 so no need to set ParamKeys +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Start() error { + _, err := v.client.Decode(v.consumer.Start(v.client.NewTXOpts())) + return err +} + +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func DeployAutomationLogTriggeredStreamsLookupUpkeepConsumerFromKey(client *seth.Client, keyNum int) (KeeperConsumer, error) { + abi, err := log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookupMetaData.GetAbi() + if err != nil { + return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{}, fmt.Errorf("failed to get LogTriggeredStreamsLookupUpkeep ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "LogTriggeredStreamsLookupUpkeep", *abi, common.FromHex(log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookupMetaData.Bin), false, false, false) + if err != nil { + return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{}, fmt.Errorf("LogTriggeredStreamsLookupUpkeep instance deployment have failed: %w", err) + } + + instance, err := log_triggered_streams_lookup_wrapper.NewLogTriggeredStreamsLookup(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{}, fmt.Errorf("failed to instantiate LogTriggeredStreamsLookupUpkeep instance: %w", err) + } + + return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +func DeployAutomationLogTriggeredStreamsLookupUpkeepConsumer(client *seth.Client) (KeeperConsumer, error) { + return DeployAutomationLogTriggeredStreamsLookupUpkeepConsumerFromKey(client, 0) +} + +type EthereumAutomationStreamsLookupUpkeepConsumer struct { + client *seth.Client + consumer *streams_lookup_upkeep_wrapper.StreamsLookupUpkeep + address *common.Address +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Start() error { + _, err := v.client.Decode(v.consumer.SetParamKeys(v.client.NewTXOpts(), "feedIdHex", "blockNumber")) + return err +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func DeployAutomationStreamsLookupUpkeepConsumerFromKey(client *seth.Client, keyNum int, testRange *big.Int, interval *big.Int, useArbBlock bool, staging bool, verify bool) (KeeperConsumer, error) { + abi, err := streams_lookup_upkeep_wrapper.StreamsLookupUpkeepMetaData.GetAbi() + if err != nil { + return &EthereumAutomationStreamsLookupUpkeepConsumer{}, fmt.Errorf("failed to get StreamsLookupUpkeep ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "StreamsLookupUpkeep", *abi, common.FromHex(streams_lookup_upkeep_wrapper.StreamsLookupUpkeepMetaData.Bin), + testRange, + interval, + useArbBlock, + staging, + verify, + ) + if err != nil { + return &EthereumAutomationStreamsLookupUpkeepConsumer{}, fmt.Errorf("StreamsLookupUpkeep instance deployment have failed: %w", err) + } + + instance, err := streams_lookup_upkeep_wrapper.NewStreamsLookupUpkeep(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumAutomationStreamsLookupUpkeepConsumer{}, fmt.Errorf("failed to instantiate StreamsLookupUpkeep instance: %w", err) + } + + return &EthereumAutomationStreamsLookupUpkeepConsumer{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +func DeployAutomationStreamsLookupUpkeepConsumer(client *seth.Client, testRange *big.Int, interval *big.Int, useArbBlock bool, staging bool, verify bool) (KeeperConsumer, error) { + return DeployAutomationStreamsLookupUpkeepConsumerFromKey(client, 0, testRange, interval, useArbBlock, staging, verify) +} + +type EthereumAutomationLogCounterConsumer struct { + client *seth.Client + consumer *log_upkeep_counter_wrapper.LogUpkeepCounter + address *common.Address +} + +func (v *EthereumAutomationLogCounterConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationLogCounterConsumer) Start() error { + _, err := v.client.Decode(v.consumer.Start(v.client.NewTXOpts())) + return err +} + +func (v *EthereumAutomationLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func DeployAutomationLogTriggerConsumerFromKey(client *seth.Client, keyNum int, testInterval *big.Int) (KeeperConsumer, error) { + abi, err := log_upkeep_counter_wrapper.LogUpkeepCounterMetaData.GetAbi() + if err != nil { + return &EthereumAutomationLogCounterConsumer{}, fmt.Errorf("failed to get LogUpkeepCounter ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "LogUpkeepCounter", *abi, common.FromHex(log_upkeep_counter_wrapper.LogUpkeepCounterMetaData.Bin), testInterval) + if err != nil { + return &EthereumAutomationLogCounterConsumer{}, fmt.Errorf("LogUpkeepCounter instance deployment have failed: %w", err) + } + + instance, err := log_upkeep_counter_wrapper.NewLogUpkeepCounter(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumAutomationLogCounterConsumer{}, fmt.Errorf("failed to instantiate LogUpkeepCounter instance: %w", err) + } + + return &EthereumAutomationLogCounterConsumer{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +func DeployAutomationLogTriggerConsumer(client *seth.Client, testInterval *big.Int) (KeeperConsumer, error) { + return DeployAutomationLogTriggerConsumerFromKey(client, 0, testInterval) +} + +// EthereumUpkeepCounter represents keeper consumer (upkeep) counter contract +type EthereumUpkeepCounter struct { + client *seth.Client + consumer *upkeep_counter_wrapper.UpkeepCounter + address *common.Address +} + +func (v *EthereumUpkeepCounter) Address() string { + return v.address.Hex() +} + +func (v *EthereumUpkeepCounter) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} +func (v *EthereumUpkeepCounter) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func (v *EthereumUpkeepCounter) SetSpread(testRange *big.Int, interval *big.Int) error { + _, err := v.client.Decode(v.consumer.SetSpread(v.client.NewTXOpts(), testRange, interval)) + return err +} + +// Just pass for non-logtrigger +func (v *EthereumUpkeepCounter) Start() error { + return nil +} + +func DeployUpkeepCounterFromKey(client *seth.Client, keyNum int, testRange *big.Int, interval *big.Int) (UpkeepCounter, error) { + abi, err := upkeep_counter_wrapper.UpkeepCounterMetaData.GetAbi() + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("failed to get UpkeepCounter ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "UpkeepCounter", *abi, common.FromHex(upkeep_counter_wrapper.UpkeepCounterMetaData.Bin), testRange, interval) + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("UpkeepCounter instance deployment have failed: %w", err) + } + + instance, err := upkeep_counter_wrapper.NewUpkeepCounter(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("failed to instantiate UpkeepCounter instance: %w", err) + } + + return &EthereumUpkeepCounter{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +func DeployUpkeepCounter(client *seth.Client, testRange *big.Int, interval *big.Int) (UpkeepCounter, error) { + return DeployUpkeepCounterFromKey(client, 0, testRange, interval) +} + +// EthereumUpkeepPerformCounterRestrictive represents keeper consumer (upkeep) counter contract +type EthereumUpkeepPerformCounterRestrictive struct { + client *seth.Client + consumer *upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictive + address *common.Address +} + +func (v *EthereumUpkeepPerformCounterRestrictive) Address() string { + return v.address.Hex() +} + +func (v *EthereumUpkeepPerformCounterRestrictive) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} +func (v *EthereumUpkeepPerformCounterRestrictive) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.GetCountPerforms(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func (v *EthereumUpkeepPerformCounterRestrictive) SetSpread(testRange *big.Int, interval *big.Int) error { + _, err := v.client.Decode(v.consumer.SetSpread(v.client.NewTXOpts(), testRange, interval)) + return err +} + +func DeployUpkeepPerformCounterRestrictive(client *seth.Client, testRange *big.Int, averageEligibilityCadence *big.Int) (UpkeepPerformCounterRestrictive, error) { + abi, err := upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictiveMetaData.GetAbi() + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("failed to get UpkeepPerformCounterRestrictive ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "UpkeepPerformCounterRestrictive", *abi, common.FromHex(upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictiveMetaData.Bin), testRange, averageEligibilityCadence) + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("UpkeepPerformCounterRestrictive instance deployment have failed: %w", err) + } + + instance, err := upkeep_perform_counter_restrictive_wrapper.NewUpkeepPerformCounterRestrictive(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumUpkeepCounter{}, fmt.Errorf("failed to instantiate UpkeepPerformCounterRestrictive instance: %w", err) + } + + return &EthereumUpkeepPerformCounterRestrictive{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +// EthereumKeeperPerformDataCheckerConsumer represents keeper perform data checker contract +type EthereumKeeperPerformDataCheckerConsumer struct { + client *seth.Client + performDataChecker *perform_data_checker_wrapper.PerformDataChecker + address *common.Address +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) (*big.Int, error) { + return v.performDataChecker.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error { + _, err := v.client.Decode(v.performDataChecker.SetExpectedData(v.client.NewTXOpts(), expectedData)) + return err +} + +func DeployKeeperPerformDataChecker(client *seth.Client, expectedData []byte) (KeeperPerformDataChecker, error) { + abi, err := perform_data_checker_wrapper.PerformDataCheckerMetaData.GetAbi() + if err != nil { + return &EthereumKeeperPerformDataCheckerConsumer{}, fmt.Errorf("failed to get PerformDataChecker ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "PerformDataChecker", *abi, common.FromHex(perform_data_checker_wrapper.PerformDataCheckerMetaData.Bin), expectedData) + if err != nil { + return &EthereumKeeperPerformDataCheckerConsumer{}, fmt.Errorf("PerformDataChecker instance deployment have failed: %w", err) + } + + instance, err := perform_data_checker_wrapper.NewPerformDataChecker(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperPerformDataCheckerConsumer{}, fmt.Errorf("failed to instantiate PerformDataChecker instance: %w", err) + } + + return &EthereumKeeperPerformDataCheckerConsumer{ + client: client, + performDataChecker: instance, + address: &data.Address, + }, nil +} + +// EthereumKeeperConsumerPerformance represents a more complicated keeper consumer contract, one intended only for +// performance tests. +type EthereumKeeperConsumerPerformance struct { + client *seth.Client + consumer *keeper_consumer_performance_wrapper.KeeperConsumerPerformance + address *common.Address +} + +func (v *EthereumKeeperConsumerPerformance) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperConsumerPerformance) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} + +func (v *EthereumKeeperConsumerPerformance) CheckEligible(ctx context.Context) (bool, error) { + return v.consumer.CheckEligible(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) (*big.Int, error) { + return v.consumer.GetCountPerforms(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error { + _, err := v.client.Decode(v.consumer.SetCheckGasToBurn(v.client.NewTXOpts(), gas)) + return err +} + +func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error { + _, err := v.client.Decode(v.consumer.SetPerformGasToBurn(v.client.NewTXOpts(), gas)) + return err +} + +func DeployKeeperConsumerPerformance( + client *seth.Client, + testBlockRange, + averageCadence, + checkGasToBurn, + performGasToBurn *big.Int, +) (KeeperConsumerPerformance, error) { + abi, err := keeper_consumer_performance_wrapper.KeeperConsumerPerformanceMetaData.GetAbi() + if err != nil { + return &EthereumKeeperConsumerPerformance{}, fmt.Errorf("failed to get KeeperConsumerPerformance ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "KeeperConsumerPerformance", *abi, common.FromHex(keeper_consumer_performance_wrapper.KeeperConsumerPerformanceMetaData.Bin), + testBlockRange, + averageCadence, + checkGasToBurn, + performGasToBurn) + if err != nil { + return &EthereumKeeperConsumerPerformance{}, fmt.Errorf("KeeperConsumerPerformance instance deployment have failed: %w", err) + } + + instance, err := keeper_consumer_performance_wrapper.NewKeeperConsumerPerformance(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumKeeperConsumerPerformance{}, fmt.Errorf("failed to instantiate KeeperConsumerPerformance instance: %w", err) + } + + return &EthereumKeeperConsumerPerformance{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +type EthereumAutomationSimpleLogCounterConsumer struct { + client *seth.Client + consumer *simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounter + address *common.Address +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Start() error { + return nil +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { + return v.consumer.Counter(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }) +} + +func DeployAutomationSimpleLogTriggerConsumer(client *seth.Client, isStreamsLookup bool) (KeeperConsumer, error) { + return DeployAutomationSimpleLogTriggerConsumerFromKey(client, isStreamsLookup, 0) +} + +func DeployAutomationSimpleLogTriggerConsumerFromKey(client *seth.Client, isStreamsLookup bool, keyNum int) (KeeperConsumer, error) { + abi, err := simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounterMetaData.GetAbi() + if err != nil { + return &EthereumAutomationSimpleLogCounterConsumer{}, fmt.Errorf("failed to get SimpleLogUpkeepCounter ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "SimpleLogUpkeepCounter", *abi, common.FromHex(simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounterMetaData.Bin), isStreamsLookup) + if err != nil { + return &EthereumAutomationSimpleLogCounterConsumer{}, fmt.Errorf("SimpleLogUpkeepCounter instance deployment have failed: %w", err) + } + + instance, err := simple_log_upkeep_counter_wrapper.NewSimpleLogUpkeepCounter(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumAutomationSimpleLogCounterConsumer{}, fmt.Errorf("failed to instantiate SimpleLogUpkeepCounter instance: %w", err) + } + + return &EthereumAutomationSimpleLogCounterConsumer{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +// EthereumAutomationConsumerBenchmark represents a more complicated keeper consumer contract, one intended only for +// Benchmark tests. +type EthereumAutomationConsumerBenchmark struct { + client *seth.Client + consumer *automation_consumer_benchmark.AutomationConsumerBenchmark + address *common.Address +} + +func (v *EthereumAutomationConsumerBenchmark) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationConsumerBenchmark) Fund(_ *big.Float) error { + panic("do not use this function, use actions_seth.SendFunds instead") +} + +func (v *EthereumAutomationConsumerBenchmark) CheckEligible(ctx context.Context, id *big.Int, _range *big.Int, firstEligibleBuffer *big.Int) (bool, error) { + return v.consumer.CheckEligible(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }, id, _range, firstEligibleBuffer) +} + +func (v *EthereumAutomationConsumerBenchmark) GetUpkeepCount(ctx context.Context, id *big.Int) (*big.Int, error) { + return v.consumer.GetCountPerforms(&bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + }, id) +} + +// DeployKeeperConsumerBenchmark deploys a keeper consumer benchmark contract with a standard contract backend +func DeployKeeperConsumerBenchmark(client *seth.Client) (AutomationConsumerBenchmark, error) { + return deployKeeperConsumerBenchmarkWithWrapperFn(client, func(client *seth.Client) *wrappers.WrappedContractBackend { + return wrappers.MustNewWrappedContractBackend(nil, client) + }) +} + +// DeployKeeperConsumerBenchmarkWithRetry deploys a keeper consumer benchmark contract with a read-only operations retrying contract backend +func DeployKeeperConsumerBenchmarkWithRetry(client *seth.Client, logger zerolog.Logger, maxAttempts uint, retryDelay time.Duration) (AutomationConsumerBenchmark, error) { + return deployKeeperConsumerBenchmarkWithWrapperFn(client, func(client *seth.Client) *wrappers.WrappedContractBackend { + return wrappers.MustNewRetryingWrappedContractBackend(client, logger, maxAttempts, retryDelay) + }) +} + +func deployKeeperConsumerBenchmarkWithWrapperFn(client *seth.Client, wrapperConstrFn func(client *seth.Client) *wrappers.WrappedContractBackend) (AutomationConsumerBenchmark, error) { + abi, err := automation_consumer_benchmark.AutomationConsumerBenchmarkMetaData.GetAbi() + if err != nil { + return &EthereumAutomationConsumerBenchmark{}, fmt.Errorf("failed to get AutomationConsumerBenchmark ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "AutomationConsumerBenchmark", *abi, common.FromHex(automation_consumer_benchmark.AutomationConsumerBenchmarkMetaData.Bin)) + if err != nil { + return &EthereumAutomationConsumerBenchmark{}, fmt.Errorf("AutomationConsumerBenchmark instance deployment have failed: %w", err) + } + + instance, err := automation_consumer_benchmark.NewAutomationConsumerBenchmark(data.Address, wrapperConstrFn(client)) + if err != nil { + return &EthereumAutomationConsumerBenchmark{}, fmt.Errorf("failed to instantiate AutomationConsumerBenchmark instance: %w", err) + } + + return &EthereumAutomationConsumerBenchmark{ + client: client, + consumer: instance, + address: &data.Address, + }, nil +} + +// KeeperConsumerBenchmarkUpkeepObserver is a header subscription that awaits for a round of upkeeps +type KeeperConsumerBenchmarkUpkeepObserver struct { + instance AutomationConsumerBenchmark + registry KeeperRegistry + upkeepID *big.Int + + firstBlockNum uint64 // Records the number of the first block that came in + lastBlockNum uint64 // Records the number of the last block that came in + blockRange int64 // How many blocks to watch upkeeps for + upkeepSLA int64 // SLA after which an upkeep is counted as 'missed' + metricsReporter *testreporters.KeeperBenchmarkTestReporter // Testreporter to track results + upkeepIndex int64 + firstEligibleBuffer int64 + + // State variables, changes as we get blocks + blocksSinceSubscription int64 // How many blocks have passed since subscribing + blocksSinceEligible int64 // How many blocks have come in since upkeep has been eligible for check + countEligible int64 // Number of times the upkeep became eligible + countMissed int64 // Number of times we missed SLA for performing upkeep + upkeepCount int64 // The count of upkeeps done so far + allCheckDelays []int64 // Tracks the amount of blocks missed before an upkeep since it became eligible + complete bool + l zerolog.Logger +} + +// NewKeeperConsumerBenchmarkUpkeepObserver provides a new instance of a NewKeeperConsumerBenchmarkUpkeepObserver +// Used to track and log benchmark test results for keepers +func NewKeeperConsumerBenchmarkUpkeepObserver( + contract AutomationConsumerBenchmark, + registry KeeperRegistry, + upkeepID *big.Int, + blockRange int64, + upkeepSLA int64, + metricsReporter *testreporters.KeeperBenchmarkTestReporter, + upkeepIndex int64, + firstEligibleBuffer int64, + logger zerolog.Logger, +) *KeeperConsumerBenchmarkUpkeepObserver { + return &KeeperConsumerBenchmarkUpkeepObserver{ + instance: contract, + registry: registry, + upkeepID: upkeepID, + blockRange: blockRange, + upkeepSLA: upkeepSLA, + blocksSinceSubscription: 0, + blocksSinceEligible: 0, + upkeepCount: 0, + allCheckDelays: []int64{}, + metricsReporter: metricsReporter, + complete: false, + lastBlockNum: 0, + upkeepIndex: upkeepIndex, + firstBlockNum: 0, + firstEligibleBuffer: firstEligibleBuffer, + l: logger, + } +} + +// ReceiveHeader will query the latest Keeper round and check to see whether upkeep was performed, it returns +// true when observation has finished. +func (o *KeeperConsumerBenchmarkUpkeepObserver) ReceiveHeader(receivedHeader *blockchain.SafeEVMHeader) (bool, error) { + if receivedHeader.Number.Uint64() <= o.lastBlockNum { // Uncle / reorg we won't count + return false, nil + } + if o.firstBlockNum == 0 { + o.firstBlockNum = receivedHeader.Number.Uint64() + } + o.lastBlockNum = receivedHeader.Number.Uint64() + // Increment block counters + o.blocksSinceSubscription++ + + upkeepCount, err := o.instance.GetUpkeepCount(context.Background(), big.NewInt(o.upkeepIndex)) + if err != nil { + return false, err + } + + if upkeepCount.Int64() > o.upkeepCount { // A new upkeep was done + if upkeepCount.Int64() != o.upkeepCount+1 { + return false, errors.New("upkeep count increased by more than 1 in a single block") + } + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeep_Count", upkeepCount.Int64()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Performed") + + if o.blocksSinceEligible > o.upkeepSLA { + o.l.Warn(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Missed SLA") + o.countMissed++ + } + + o.allCheckDelays = append(o.allCheckDelays, o.blocksSinceEligible) + o.upkeepCount++ + o.blocksSinceEligible = 0 + } + + isEligible, err := o.instance.CheckEligible(context.Background(), big.NewInt(o.upkeepIndex), big.NewInt(o.blockRange), big.NewInt(o.firstEligibleBuffer)) + if err != nil { + return false, err + } + if isEligible { + if o.blocksSinceEligible == 0 { + // First time this upkeep became eligible + o.countEligible++ + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Now Eligible") + } + o.blocksSinceEligible++ + } + + if o.blocksSinceSubscription >= o.blockRange || int64(o.lastBlockNum-o.firstBlockNum) >= o.blockRange { + if o.blocksSinceEligible > 0 { + if o.blocksSinceEligible > o.upkeepSLA { + o.l.Warn(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep remained eligible at end of test and missed SLA") + o.countMissed++ + } else { + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeep_Count", upkeepCount.Int64()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep remained eligible at end of test and was within SLA") + } + o.allCheckDelays = append(o.allCheckDelays, o.blocksSinceEligible) + } + + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeeps_Performed", upkeepCount.Int64()). + Int64("Total_Blocks_Watched", o.blocksSinceSubscription). + Str("Registry_Address", o.registry.Address()). + Msg("Finished Watching for Upkeeps") + + o.complete = true + return true, nil + } + return false, nil +} + +// Complete returns whether watching for upkeeps has completed +func (o *KeeperConsumerBenchmarkUpkeepObserver) Complete() bool { + return o.complete +} + +// LogDetails logs the results of the benchmark test to testreporter +func (o *KeeperConsumerBenchmarkUpkeepObserver) LogDetails() { + report := testreporters.KeeperBenchmarkTestReport{ + ContractAddress: o.instance.Address(), + TotalEligibleCount: o.countEligible, + TotalSLAMissedUpkeeps: o.countMissed, + TotalPerformedUpkeeps: o.upkeepCount, + AllCheckDelays: o.allCheckDelays, + RegistryAddress: o.registry.Address(), + } + o.metricsReporter.ReportMutex.Lock() + o.metricsReporter.Reports = append(o.metricsReporter.Reports, report) + defer o.metricsReporter.ReportMutex.Unlock() +} diff --git a/integration-tests/contracts/ethereum_contracts_seth.go b/integration-tests/contracts/ethereum_contracts_seth.go index 1d02c77bbe8..d1e6a2c8a80 100644 --- a/integration-tests/contracts/ethereum_contracts_seth.go +++ b/integration-tests/contracts/ethereum_contracts_seth.go @@ -28,6 +28,8 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/authorized_forwarder" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/flux_aggregator_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_gas_aggregator_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/operator_factory" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/operator_wrapper" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/oracle_wrapper" @@ -64,7 +66,7 @@ func LoadOffchainAggregator(l zerolog.Logger, seth *seth.Client, contractAddress } func DeployOffchainAggregator(l zerolog.Logger, seth *seth.Client, linkTokenAddress common.Address, offchainOptions OffchainOptions) (EthereumOffchainAggregator, error) { - oAbi, err := offchainaggregator.OffchainAggregatorMetaData.GetAbi() + abi, err := offchainaggregator.OffchainAggregatorMetaData.GetAbi() if err != nil { return EthereumOffchainAggregator{}, fmt.Errorf("failed to get OffChain Aggregator ABI: %w", err) } @@ -72,7 +74,7 @@ func DeployOffchainAggregator(l zerolog.Logger, seth *seth.Client, linkTokenAddr ocrDeploymentData, err := seth.DeployContract( seth.NewTXOpts(), "OffChainAggregator", - *oAbi, + *abi, common.FromHex(offchainaggregator.OffchainAggregatorMetaData.Bin), offchainOptions.MaximumGasPrice, offchainOptions.ReasonableGasPrice, @@ -284,11 +286,11 @@ type EthereumOperatorFactory struct { } func DeployEthereumOperatorFactory(seth *seth.Client, linkTokenAddress common.Address) (EthereumOperatorFactory, error) { - operatorAbi, err := operator_factory.OperatorFactoryMetaData.GetAbi() + abi, err := operator_factory.OperatorFactoryMetaData.GetAbi() if err != nil { return EthereumOperatorFactory{}, fmt.Errorf("failed to get OperatorFactory ABI: %w", err) } - operatorData, err := seth.DeployContract(seth.NewTXOpts(), "OperatorFactory", *operatorAbi, common.FromHex(operator_factory.OperatorFactoryMetaData.Bin), linkTokenAddress) + operatorData, err := seth.DeployContract(seth.NewTXOpts(), "OperatorFactory", *abi, common.FromHex(operator_factory.OperatorFactoryMetaData.Bin), linkTokenAddress) if err != nil { return EthereumOperatorFactory{}, fmt.Errorf("OperatorFactory instance deployment have failed: %w", err) } @@ -608,6 +610,28 @@ func DeployLinkTokenContract(l zerolog.Logger, client *seth.Client) (*EthereumLi }, nil } +func LoadLinkTokenContract(l zerolog.Logger, client *seth.Client, address common.Address) (*EthereumLinkToken, error) { + abi, err := link_token_interface.LinkTokenMetaData.GetAbi() + if err != nil { + return &EthereumLinkToken{}, fmt.Errorf("failed to get LinkToken ABI: %w", err) + } + + client.ContractStore.AddABI("LinkToken", *abi) + client.ContractStore.AddBIN("LinkToken", common.FromHex(link_token_interface.LinkTokenMetaData.Bin)) + + linkToken, err := link_token_interface.NewLinkToken(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumLinkToken{}, fmt.Errorf("failed to instantiate LinkToken instance: %w", err) + } + + return &EthereumLinkToken{ + client: client, + instance: linkToken, + address: address, + l: l, + }, nil +} + // Fund the LINK Token contract with ETH to distribute the token func (l *EthereumLinkToken) Fund(_ *big.Float) error { panic("do not use this function, use actions_seth.SendFunds instead") @@ -666,6 +690,19 @@ func (l *EthereumLinkToken) TransferAndCall(to string, amount *big.Int, data []b return decodedTx.Transaction, nil } +func (l *EthereumLinkToken) TransferAndCallFromKey(to string, amount *big.Int, data []byte, keyNum int) (*types.Transaction, error) { + l.l.Info(). + Str("From", l.client.Addresses[keyNum].Hex()). + Str("To", to). + Str("Amount", amount.String()). + Msg("Transferring and Calling LINK") + decodedTx, err := l.client.Decode(l.instance.TransferAndCall(l.client.NewTXKeyOpts(keyNum), common.HexToAddress(to), amount, data)) + if err != nil { + return nil, err + } + return decodedTx.Transaction, nil +} + // DeployFluxAggregatorContract deploys the Flux Aggregator Contract on an EVM chain func DeployFluxAggregatorContract( seth *seth.Client, @@ -966,6 +1003,148 @@ func (e *EthereumAPIConsumer) CreateRequestTo( return err } +// EthereumMockETHLINKFeed represents mocked ETH/LINK feed contract +type EthereumMockETHLINKFeed struct { + client *seth.Client + feed *mock_ethlink_aggregator_wrapper.MockETHLINKAggregator + address *common.Address +} + +func (v *EthereumMockETHLINKFeed) Address() string { + return v.address.Hex() +} + +func (v *EthereumMockETHLINKFeed) LatestRoundData() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: v.client.Addresses[0], + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.Ans, nil +} + +func (v *EthereumMockETHLINKFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: v.client.Addresses[0], + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.UpdatedAt, nil +} + +func DeployMockETHLINKFeed(client *seth.Client, answer *big.Int) (MockETHLINKFeed, error) { + abi, err := mock_ethlink_aggregator_wrapper.MockETHLINKAggregatorMetaData.GetAbi() + if err != nil { + return &EthereumMockETHLINKFeed{}, fmt.Errorf("failed to get MockETHLINKFeed ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "MockETHLINKFeed", *abi, common.FromHex(mock_ethlink_aggregator_wrapper.MockETHLINKAggregatorMetaData.Bin), answer) + if err != nil { + return &EthereumMockETHLINKFeed{}, fmt.Errorf("MockETHLINKFeed instance deployment have failed: %w", err) + } + + instance, err := mock_ethlink_aggregator_wrapper.NewMockETHLINKAggregator(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumMockETHLINKFeed{}, fmt.Errorf("failed to instantiate MockETHLINKFeed instance: %w", err) + } + + return &EthereumMockETHLINKFeed{ + address: &data.Address, + client: client, + feed: instance, + }, nil +} + +func LoadMockETHLINKFeed(client *seth.Client, address common.Address) (MockETHLINKFeed, error) { + abi, err := mock_ethlink_aggregator_wrapper.MockETHLINKAggregatorMetaData.GetAbi() + if err != nil { + return &EthereumMockETHLINKFeed{}, fmt.Errorf("failed to get MockETHLINKFeed ABI: %w", err) + } + client.ContractStore.AddABI("MockETHLINKFeed", *abi) + client.ContractStore.AddBIN("MockETHLINKFeed", common.FromHex(mock_ethlink_aggregator_wrapper.MockETHLINKAggregatorMetaData.Bin)) + + instance, err := mock_ethlink_aggregator_wrapper.NewMockETHLINKAggregator(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumMockETHLINKFeed{}, fmt.Errorf("failed to instantiate MockETHLINKFeed instance: %w", err) + } + + return &EthereumMockETHLINKFeed{ + address: &address, + client: client, + feed: instance, + }, nil +} + +// EthereumMockGASFeed represents mocked Gas feed contract +type EthereumMockGASFeed struct { + client *seth.Client + feed *mock_gas_aggregator_wrapper.MockGASAggregator + address *common.Address +} + +func (v *EthereumMockGASFeed) Address() string { + return v.address.Hex() +} + +func DeployMockGASFeed(client *seth.Client, answer *big.Int) (MockGasFeed, error) { + abi, err := mock_gas_aggregator_wrapper.MockGASAggregatorMetaData.GetAbi() + if err != nil { + return &EthereumMockGASFeed{}, fmt.Errorf("failed to get MockGasFeed ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXOpts(), "MockGasFeed", *abi, common.FromHex(mock_gas_aggregator_wrapper.MockGASAggregatorMetaData.Bin), answer) + if err != nil { + return &EthereumMockGASFeed{}, fmt.Errorf("MockGasFeed instance deployment have failed: %w", err) + } + + instance, err := mock_gas_aggregator_wrapper.NewMockGASAggregator(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumMockGASFeed{}, fmt.Errorf("failed to instantiate MockGasFeed instance: %w", err) + } + + return &EthereumMockGASFeed{ + address: &data.Address, + client: client, + feed: instance, + }, nil +} + +func LoadMockGASFeed(client *seth.Client, address common.Address) (MockGasFeed, error) { + abi, err := mock_gas_aggregator_wrapper.MockGASAggregatorMetaData.GetAbi() + if err != nil { + return &EthereumMockGASFeed{}, fmt.Errorf("failed to get MockGasFeed ABI: %w", err) + } + client.ContractStore.AddABI("MockGasFeed", *abi) + client.ContractStore.AddBIN("MockGasFeed", common.FromHex(mock_gas_aggregator_wrapper.MockGASAggregatorMetaData.Bin)) + + instance, err := mock_gas_aggregator_wrapper.NewMockGASAggregator(address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &EthereumMockGASFeed{}, fmt.Errorf("failed to instantiate MockGasFeed instance: %w", err) + } + + return &EthereumMockGASFeed{ + address: &address, + client: client, + feed: instance, + }, nil +} + +func DeployMultiCallContract(client *seth.Client) (common.Address, error) { + abi, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return common.Address{}, err + } + + data, err := client.DeployContract(client.NewTXOpts(), "MultiCall", abi, common.FromHex(MultiCallBIN)) + if err != nil { + return common.Address{}, fmt.Errorf("MultiCall instance deployment have failed: %w", err) + } + + return data.Address, nil +} + func LoadFunctionsCoordinator(seth *seth.Client, addr string) (FunctionsCoordinator, error) { abi, err := functions_coordinator.FunctionsCoordinatorMetaData.GetAbi() if err != nil { diff --git a/integration-tests/contracts/ethereum_keeper_contracts.go b/integration-tests/contracts/ethereum_keeper_contracts.go index 8ec6a547b55..31c3cb32dc7 100644 --- a/integration-tests/contracts/ethereum_keeper_contracts.go +++ b/integration-tests/contracts/ethereum_keeper_contracts.go @@ -69,6 +69,7 @@ type KeeperRegistry interface { SetConfigTypeSafe(ocrConfig OCRv2Config) error SetRegistrar(registrarAddr string) error AddUpkeepFunds(id *big.Int, amount *big.Int) error + AddUpkeepFundsFromKey(id *big.Int, amount *big.Int, keyNum int) error GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error @@ -209,8 +210,8 @@ type UpkeepInfo struct { OffchainConfig []byte } -// EthereumKeeperRegistry represents keeper registry contract -type EthereumKeeperRegistry struct { +// LegacyEthereumKeeperRegistry represents keeper registry contract +type LegacyEthereumKeeperRegistry struct { client blockchain.EVMClient version ethereum.KeeperRegistryVersion registry1_1 *keeper_registry_wrapper1_1.KeeperRegistry @@ -224,24 +225,24 @@ type EthereumKeeperRegistry struct { l zerolog.Logger } -func (v *EthereumKeeperRegistry) ReorgProtectionEnabled() bool { +func (v *LegacyEthereumKeeperRegistry) ReorgProtectionEnabled() bool { chainId := v.client.GetChainID().Uint64() // reorg protection is disabled in polygon zkEVM and Scroll bc currently there is no way to get the block hash onchain return v.version != ethereum.RegistryVersion_2_2 || (chainId != 1101 && chainId != 1442 && chainId != 2442 && chainId != 534352 && chainId != 534351) } -func (v *EthereumKeeperRegistry) ChainModuleAddress() common.Address { +func (v *LegacyEthereumKeeperRegistry) ChainModuleAddress() common.Address { if v.version == ethereum.RegistryVersion_2_2 { return v.chainModule.Address() } return common.Address{} } -func (v *EthereumKeeperRegistry) Address() string { +func (v *LegacyEthereumKeeperRegistry) Address() string { return v.address.Hex() } -func (v *EthereumKeeperRegistry) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumKeeperRegistry) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err @@ -311,7 +312,7 @@ func (rcs *KeeperRegistrySettings) Encode20OnchainConfig(registrar string) []byt return onchainConfig } -func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { +func (v *LegacyEthereumKeeperRegistry) RegistryOwnerAddress() common.Address { callOpts := &bind.CallOpts{ Pending: false, } @@ -334,7 +335,7 @@ func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { return common.HexToAddress(v.client.GetDefaultWallet().Address()) } -func (v *EthereumKeeperRegistry) SetConfigTypeSafe(ocrConfig OCRv2Config) error { +func (v *LegacyEthereumKeeperRegistry) SetConfigTypeSafe(ocrConfig OCRv2Config) error { txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -372,7 +373,7 @@ func (v *EthereumKeeperRegistry) SetConfigTypeSafe(ocrConfig OCRv2Config) error } } -func (v *EthereumKeeperRegistry) SetConfig(config KeeperRegistrySettings, ocrConfig OCRv2Config) error { +func (v *LegacyEthereumKeeperRegistry) SetConfig(config KeeperRegistrySettings, ocrConfig OCRv2Config) error { txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -469,7 +470,7 @@ func (v *EthereumKeeperRegistry) SetConfig(config KeeperRegistrySettings, ocrCon } // Pause pauses the registry. -func (v *EthereumKeeperRegistry) Pause() error { +func (v *LegacyEthereumKeeperRegistry) Pause() error { txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -520,7 +521,7 @@ func (v *EthereumKeeperRegistry) Pause() error { } // Migrate performs a migration of the given upkeep ids to the specific destination passed as parameter. -func (v *EthereumKeeperRegistry) Migrate(upkeepIDs []*big.Int, destinationAddress common.Address) error { +func (v *LegacyEthereumKeeperRegistry) Migrate(upkeepIDs []*big.Int, destinationAddress common.Address) error { if v.version != ethereum.RegistryVersion_1_2 { return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") } @@ -539,7 +540,7 @@ func (v *EthereumKeeperRegistry) Migrate(upkeepIDs []*big.Int, destinationAddres } // SetMigrationPermissions sets the permissions of another registry to allow migrations between the two. -func (v *EthereumKeeperRegistry) SetMigrationPermissions(peerAddress common.Address, permission uint8) error { +func (v *LegacyEthereumKeeperRegistry) SetMigrationPermissions(peerAddress common.Address, permission uint8) error { if v.version != ethereum.RegistryVersion_1_2 { return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") } @@ -557,7 +558,7 @@ func (v *EthereumKeeperRegistry) SetMigrationPermissions(peerAddress common.Addr return v.client.ProcessTransaction(tx) } -func (v *EthereumKeeperRegistry) SetRegistrar(registrarAddr string) error { +func (v *LegacyEthereumKeeperRegistry) SetRegistrar(registrarAddr string) error { if v.version == ethereum.RegistryVersion_2_0 { // we short circuit and exit, so we don't create a new txs messing up the nonce before exiting return fmt.Errorf("please use set config") @@ -609,7 +610,12 @@ func (v *EthereumKeeperRegistry) SetRegistrar(registrarAddr string) error { } // AddUpkeepFunds adds link for particular upkeep id -func (v *EthereumKeeperRegistry) AddUpkeepFunds(id *big.Int, amount *big.Int) error { +func (v *LegacyEthereumKeeperRegistry) AddUpkeepFundsFromKey(_ *big.Int, _ *big.Int, _ int) error { + panic("this method is only supported by contracts using Seth client") +} + +// AddUpkeepFunds adds link for particular upkeep id +func (v *LegacyEthereumKeeperRegistry) AddUpkeepFunds(id *big.Int, amount *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -638,7 +644,7 @@ func (v *EthereumKeeperRegistry) AddUpkeepFunds(id *big.Int, amount *big.Int) er } // GetUpkeepInfo gets upkeep info -func (v *EthereumKeeperRegistry) GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) { +func (v *LegacyEthereumKeeperRegistry) GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -728,7 +734,7 @@ func (v *EthereumKeeperRegistry) GetUpkeepInfo(ctx context.Context, id *big.Int) return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) } -func (v *EthereumKeeperRegistry) getUpkeepInfo22(opts *bind.CallOpts, id *big.Int) (*UpkeepInfo, error) { +func (v *LegacyEthereumKeeperRegistry) getUpkeepInfo22(opts *bind.CallOpts, id *big.Int) (*UpkeepInfo, error) { uk, err := v.registry2_2.GetUpkeep(opts, id) if err != nil { return nil, err @@ -747,7 +753,7 @@ func (v *EthereumKeeperRegistry) getUpkeepInfo22(opts *bind.CallOpts, id *big.In }, nil } -func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) { +func (v *LegacyEthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -781,7 +787,7 @@ func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr s }, nil } -func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error { +func (v *LegacyEthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -823,7 +829,7 @@ func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, o } // RegisterUpkeep registers contract to perform upkeep -func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, admin string, checkData []byte) error { +func (v *LegacyEthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, admin string, checkData []byte) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -875,7 +881,7 @@ func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, } // CancelUpkeep cancels the given upkeep ID -func (v *EthereumKeeperRegistry) CancelUpkeep(id *big.Int) error { +func (v *LegacyEthereumKeeperRegistry) CancelUpkeep(id *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -924,7 +930,7 @@ func (v *EthereumKeeperRegistry) CancelUpkeep(id *big.Int) error { } // SetUpkeepGasLimit sets the perform gas limit for a given upkeep ID -func (v *EthereumKeeperRegistry) SetUpkeepGasLimit(id *big.Int, gas uint32) error { +func (v *LegacyEthereumKeeperRegistry) SetUpkeepGasLimit(id *big.Int, gas uint32) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -964,7 +970,7 @@ func (v *EthereumKeeperRegistry) SetUpkeepGasLimit(id *big.Int, gas uint32) erro } // GetKeeperList get list of all registered keeper addresses -func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, error) { +func (v *LegacyEthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -1008,7 +1014,7 @@ func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, e } // UpdateCheckData updates the check data of an upkeep -func (v *EthereumKeeperRegistry) UpdateCheckData(id *big.Int, newCheckData []byte) error { +func (v *LegacyEthereumKeeperRegistry) UpdateCheckData(id *big.Int, newCheckData []byte) error { switch v.version { case ethereum.RegistryVersion_1_3: @@ -1061,7 +1067,7 @@ func (v *EthereumKeeperRegistry) UpdateCheckData(id *big.Int, newCheckData []byt } // SetUpkeepTriggerConfig updates the trigger config of an upkeep (only for version 2.1) -func (v *EthereumKeeperRegistry) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error { +func (v *LegacyEthereumKeeperRegistry) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error { switch v.version { case ethereum.RegistryVersion_2_1: @@ -1092,7 +1098,7 @@ func (v *EthereumKeeperRegistry) SetUpkeepTriggerConfig(id *big.Int, triggerConf } // SetUpkeepPrivilegeConfig sets the privilege config of an upkeep (only for version 2.1) -func (v *EthereumKeeperRegistry) SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error { +func (v *LegacyEthereumKeeperRegistry) SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error { switch v.version { case ethereum.RegistryVersion_2_1: @@ -1123,7 +1129,7 @@ func (v *EthereumKeeperRegistry) SetUpkeepPrivilegeConfig(id *big.Int, privilege } // PauseUpkeep stops an upkeep from an upkeep -func (v *EthereumKeeperRegistry) PauseUpkeep(id *big.Int) error { +func (v *LegacyEthereumKeeperRegistry) PauseUpkeep(id *big.Int) error { switch v.version { case ethereum.RegistryVersion_1_3: opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) @@ -1175,7 +1181,7 @@ func (v *EthereumKeeperRegistry) PauseUpkeep(id *big.Int) error { } // UnpauseUpkeep get list of all registered keeper addresses -func (v *EthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { +func (v *LegacyEthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { switch v.version { case ethereum.RegistryVersion_1_3: opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) @@ -1226,7 +1232,7 @@ func (v *EthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { } } -func (v *EthereumKeeperRegistry) SetUpkeepOffchainConfig(id *big.Int, offchainConfig []byte) error { +func (v *LegacyEthereumKeeperRegistry) SetUpkeepOffchainConfig(id *big.Int, offchainConfig []byte) error { switch v.version { case ethereum.RegistryVersion_2_0: opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) @@ -1267,7 +1273,7 @@ func (v *EthereumKeeperRegistry) SetUpkeepOffchainConfig(id *big.Int, offchainCo } // Parses upkeep performed log -func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) { +func (v *LegacyEthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) { switch v.version { case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: parsedLog, err := v.registry1_1.ParseUpkeepPerformed(*log) @@ -1334,7 +1340,7 @@ func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*Upkee } // ParseStaleUpkeepReportLog Parses Stale upkeep report log -func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) { +func (v *LegacyEthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) { //nolint:exhaustive switch v.version { case ethereum.RegistryVersion_2_0: @@ -1366,7 +1372,7 @@ func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*Sta } // Parses the upkeep ID from an 'UpkeepRegistered' log, returns error on any other log -func (v *EthereumKeeperRegistry) ParseUpkeepIdFromRegisteredLog(log *types.Log) (*big.Int, error) { +func (v *LegacyEthereumKeeperRegistry) ParseUpkeepIdFromRegisteredLog(log *types.Log) (*big.Int, error) { switch v.version { case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: parsedLog, err := v.registry1_1.ParseUpkeepRegistered(*log) @@ -1630,8 +1636,8 @@ func (o *KeeperConsumerPerformanceRoundConfirmer) logDetails() { defer o.metricsReporter.ReportMutex.Unlock() } -// KeeperConsumerBenchmarkRoundConfirmer is a header subscription that awaits for a round of upkeeps -type KeeperConsumerBenchmarkRoundConfirmer struct { +// LegacyKeeperConsumerBenchmarkRoundConfirmer is a header subscription that awaits for a round of upkeeps +type LegacyKeeperConsumerBenchmarkRoundConfirmer struct { instance AutomationConsumerBenchmark registry KeeperRegistry upkeepID *big.Int @@ -1658,9 +1664,9 @@ type KeeperConsumerBenchmarkRoundConfirmer struct { l zerolog.Logger } -// NewKeeperConsumerBenchmarkRoundConfirmer provides a new instance of a KeeperConsumerBenchmarkRoundConfirmer +// NewLegacyKeeperConsumerBenchmarkRoundConfirmer provides a new instance of a LegacyKeeperConsumerBenchmarkRoundConfirmer // Used to track and log benchmark test results for keepers -func NewKeeperConsumerBenchmarkRoundConfirmer( +func NewLegacyKeeperConsumerBenchmarkRoundConfirmer( contract AutomationConsumerBenchmark, registry KeeperRegistry, upkeepID *big.Int, @@ -1670,9 +1676,9 @@ func NewKeeperConsumerBenchmarkRoundConfirmer( upkeepIndex int64, firstEligibleBuffer int64, logger zerolog.Logger, -) *KeeperConsumerBenchmarkRoundConfirmer { +) *LegacyKeeperConsumerBenchmarkRoundConfirmer { ctx, cancelFunc := context.WithCancel(context.Background()) - return &KeeperConsumerBenchmarkRoundConfirmer{ + return &LegacyKeeperConsumerBenchmarkRoundConfirmer{ instance: contract, registry: registry, upkeepID: upkeepID, @@ -1696,7 +1702,7 @@ func NewKeeperConsumerBenchmarkRoundConfirmer( } // ReceiveHeader will query the latest Keeper round and check to see whether the round has confirmed -func (o *KeeperConsumerBenchmarkRoundConfirmer) ReceiveHeader(receivedHeader blockchain.NodeHeader) error { +func (o *LegacyKeeperConsumerBenchmarkRoundConfirmer) ReceiveHeader(receivedHeader blockchain.NodeHeader) error { if receivedHeader.Number.Uint64() <= o.lastBlockNum { // Uncle / reorg we won't count return nil } @@ -1800,7 +1806,7 @@ func (o *KeeperConsumerBenchmarkRoundConfirmer) ReceiveHeader(receivedHeader blo } // Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed -func (o *KeeperConsumerBenchmarkRoundConfirmer) Wait() error { +func (o *LegacyKeeperConsumerBenchmarkRoundConfirmer) Wait() error { defer func() { o.complete = true }() for { select { @@ -1814,11 +1820,11 @@ func (o *KeeperConsumerBenchmarkRoundConfirmer) Wait() error { } } -func (o *KeeperConsumerBenchmarkRoundConfirmer) Complete() bool { +func (o *LegacyKeeperConsumerBenchmarkRoundConfirmer) Complete() bool { return o.complete } -func (o *KeeperConsumerBenchmarkRoundConfirmer) logDetails() { +func (o *LegacyKeeperConsumerBenchmarkRoundConfirmer) logDetails() { report := testreporters.KeeperBenchmarkTestReport{ ContractAddress: o.instance.Address(), TotalEligibleCount: o.countEligible, @@ -1832,25 +1838,25 @@ func (o *KeeperConsumerBenchmarkRoundConfirmer) logDetails() { defer o.metricsReporter.ReportMutex.Unlock() } -// EthereumUpkeepCounter represents keeper consumer (upkeep) counter contract -type EthereumUpkeepCounter struct { +// LegacyEthereumUpkeepCounter represents keeper consumer (upkeep) counter contract +type LegacyEthereumUpkeepCounter struct { client blockchain.EVMClient consumer *upkeep_counter_wrapper.UpkeepCounter address *common.Address } -func (v *EthereumUpkeepCounter) Address() string { +func (v *LegacyEthereumUpkeepCounter) Address() string { return v.address.Hex() } -func (v *EthereumUpkeepCounter) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumUpkeepCounter) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err } return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) } -func (v *EthereumUpkeepCounter) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumUpkeepCounter) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -1862,7 +1868,7 @@ func (v *EthereumUpkeepCounter) Counter(ctx context.Context) (*big.Int, error) { return cnt, nil } -func (v *EthereumUpkeepCounter) SetSpread(testRange *big.Int, interval *big.Int) error { +func (v *LegacyEthereumUpkeepCounter) SetSpread(testRange *big.Int, interval *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -1875,29 +1881,29 @@ func (v *EthereumUpkeepCounter) SetSpread(testRange *big.Int, interval *big.Int) } // Just pass for non-logtrigger -func (v *EthereumUpkeepCounter) Start() error { +func (v *LegacyEthereumUpkeepCounter) Start() error { return nil } -// EthereumUpkeepPerformCounterRestrictive represents keeper consumer (upkeep) counter contract -type EthereumUpkeepPerformCounterRestrictive struct { +// LegacyEthereumUpkeepPerformCounterRestrictive represents keeper consumer (upkeep) counter contract +type LegacyEthereumUpkeepPerformCounterRestrictive struct { client blockchain.EVMClient consumer *upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictive address *common.Address } -func (v *EthereumUpkeepPerformCounterRestrictive) Address() string { +func (v *LegacyEthereumUpkeepPerformCounterRestrictive) Address() string { return v.address.Hex() } -func (v *EthereumUpkeepPerformCounterRestrictive) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumUpkeepPerformCounterRestrictive) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err } return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) } -func (v *EthereumUpkeepPerformCounterRestrictive) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumUpkeepPerformCounterRestrictive) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -1906,7 +1912,7 @@ func (v *EthereumUpkeepPerformCounterRestrictive) Counter(ctx context.Context) ( return count, err } -func (v *EthereumUpkeepPerformCounterRestrictive) SetSpread(testRange *big.Int, interval *big.Int) error { +func (v *LegacyEthereumUpkeepPerformCounterRestrictive) SetSpread(testRange *big.Int, interval *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -1946,17 +1952,17 @@ func (v *EthereumKeeperConsumer) Counter(ctx context.Context) (*big.Int, error) return cnt, nil } -type EthereumAutomationStreamsLookupUpkeepConsumer struct { +type LegacyEthereumAutomationStreamsLookupUpkeepConsumer struct { client blockchain.EVMClient consumer *streams_lookup_upkeep_wrapper.StreamsLookupUpkeep address *common.Address } -func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Address() string { +func (v *LegacyEthereumAutomationStreamsLookupUpkeepConsumer) Address() string { return v.address.Hex() } -func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Start() error { +func (v *LegacyEthereumAutomationStreamsLookupUpkeepConsumer) Start() error { // For this consumer upkeep, we use this Start() function to set ParamKeys so as to run mercury v0.2 txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { @@ -1971,7 +1977,7 @@ func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Start() error { return v.client.ProcessTransaction(tx) } -func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumAutomationStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -1983,18 +1989,18 @@ func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Counter(ctx context.Cont return cnt, nil } -type EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer struct { +type LegacyEthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer struct { client blockchain.EVMClient consumer *log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup address *common.Address } -func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Address() string { +func (v *LegacyEthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Address() string { return v.address.Hex() } // Kick off the log trigger event. The contract uses Mercury v0.2 so no need to set ParamKeys -func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Start() error { +func (v *LegacyEthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Start() error { txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2007,7 +2013,7 @@ func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Start() erro return v.client.ProcessTransaction(tx) } -func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2019,17 +2025,17 @@ func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Counter(ctx return cnt, nil } -type EthereumAutomationLogCounterConsumer struct { +type LegacyEthereumAutomationLogCounterConsumer struct { client blockchain.EVMClient consumer *log_upkeep_counter_wrapper.LogUpkeepCounter address *common.Address } -func (v *EthereumAutomationLogCounterConsumer) Address() string { +func (v *LegacyEthereumAutomationLogCounterConsumer) Address() string { return v.address.Hex() } -func (v *EthereumAutomationLogCounterConsumer) Start() error { +func (v *LegacyEthereumAutomationLogCounterConsumer) Start() error { txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2042,7 +2048,7 @@ func (v *EthereumAutomationLogCounterConsumer) Start() error { return v.client.ProcessTransaction(tx) } -func (v *EthereumAutomationLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumAutomationLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2054,21 +2060,21 @@ func (v *EthereumAutomationLogCounterConsumer) Counter(ctx context.Context) (*bi return cnt, nil } -type EthereumAutomationSimpleLogCounterConsumer struct { +type LegacyEthereumAutomationSimpleLogCounterConsumer struct { client blockchain.EVMClient consumer *simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounter address *common.Address } -func (v *EthereumAutomationSimpleLogCounterConsumer) Address() string { +func (v *LegacyEthereumAutomationSimpleLogCounterConsumer) Address() string { return v.address.Hex() } -func (v *EthereumAutomationSimpleLogCounterConsumer) Start() error { +func (v *LegacyEthereumAutomationSimpleLogCounterConsumer) Start() error { return nil } -func (v *EthereumAutomationSimpleLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumAutomationSimpleLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2080,19 +2086,19 @@ func (v *EthereumAutomationSimpleLogCounterConsumer) Counter(ctx context.Context return cnt, nil } -// EthereumKeeperConsumerPerformance represents a more complicated keeper consumer contract, one intended only for +// LegacyEthereumKeeperConsumerPerformance represents a more complicated keeper consumer contract, one intended only for // performance tests. -type EthereumKeeperConsumerPerformance struct { +type LegacyEthereumKeeperConsumerPerformance struct { client blockchain.EVMClient consumer *keeper_consumer_performance_wrapper.KeeperConsumerPerformance address *common.Address } -func (v *EthereumKeeperConsumerPerformance) Address() string { +func (v *LegacyEthereumKeeperConsumerPerformance) Address() string { return v.address.Hex() } -func (v *EthereumKeeperConsumerPerformance) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumKeeperConsumerPerformance) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err @@ -2100,7 +2106,7 @@ func (v *EthereumKeeperConsumerPerformance) Fund(ethAmount *big.Float) error { return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) } -func (v *EthereumKeeperConsumerPerformance) CheckEligible(ctx context.Context) (bool, error) { +func (v *LegacyEthereumKeeperConsumerPerformance) CheckEligible(ctx context.Context) (bool, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2109,7 +2115,7 @@ func (v *EthereumKeeperConsumerPerformance) CheckEligible(ctx context.Context) ( return eligible, err } -func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2118,7 +2124,7 @@ func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) return eligible, err } -func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error { +func (v *LegacyEthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2130,7 +2136,7 @@ func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, return v.client.ProcessTransaction(tx) } -func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error { +func (v *LegacyEthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2142,18 +2148,18 @@ func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Contex return v.client.ProcessTransaction(tx) } -// EthereumKeeperPerformDataCheckerConsumer represents keeper perform data checker contract -type EthereumKeeperPerformDataCheckerConsumer struct { +// LegacyEthereumKeeperPerformDataCheckerConsumer represents keeper perform data checker contract +type LegacyEthereumKeeperPerformDataCheckerConsumer struct { client blockchain.EVMClient performDataChecker *perform_data_checker_wrapper.PerformDataChecker address *common.Address } -func (v *EthereumKeeperPerformDataCheckerConsumer) Address() string { +func (v *LegacyEthereumKeeperPerformDataCheckerConsumer) Address() string { return v.address.Hex() } -func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) (*big.Int, error) { +func (v *LegacyEthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2165,7 +2171,7 @@ func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) return cnt, nil } -func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error { +func (v *LegacyEthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error { opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) if err != nil { return err @@ -2177,19 +2183,19 @@ func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Con return v.client.ProcessTransaction(tx) } -// EthereumAutomationConsumerBenchmark represents a more complicated keeper consumer contract, one intended only for +// LegacyEthereumAutomationConsumerBenchmark represents a more complicated keeper consumer contract, one intended only for // Benchmark tests. -type EthereumAutomationConsumerBenchmark struct { +type LegacyEthereumAutomationConsumerBenchmark struct { client blockchain.EVMClient consumer *automation_consumer_benchmark.AutomationConsumerBenchmark address *common.Address } -func (v *EthereumAutomationConsumerBenchmark) Address() string { +func (v *LegacyEthereumAutomationConsumerBenchmark) Address() string { return v.address.Hex() } -func (v *EthereumAutomationConsumerBenchmark) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumAutomationConsumerBenchmark) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err @@ -2197,7 +2203,7 @@ func (v *EthereumAutomationConsumerBenchmark) Fund(ethAmount *big.Float) error { return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) } -func (v *EthereumAutomationConsumerBenchmark) CheckEligible(ctx context.Context, id *big.Int, _range *big.Int, firstEligibleBuffer *big.Int) (bool, error) { +func (v *LegacyEthereumAutomationConsumerBenchmark) CheckEligible(ctx context.Context, id *big.Int, _range *big.Int, firstEligibleBuffer *big.Int) (bool, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2206,7 +2212,7 @@ func (v *EthereumAutomationConsumerBenchmark) CheckEligible(ctx context.Context, return eligible, err } -func (v *EthereumAutomationConsumerBenchmark) GetUpkeepCount(ctx context.Context, id *big.Int) (*big.Int, error) { +func (v *LegacyEthereumAutomationConsumerBenchmark) GetUpkeepCount(ctx context.Context, id *big.Int) (*big.Int, error) { opts := &bind.CallOpts{ From: common.HexToAddress(v.client.GetDefaultWallet().Address()), Context: ctx, @@ -2215,9 +2221,9 @@ func (v *EthereumAutomationConsumerBenchmark) GetUpkeepCount(ctx context.Context return eligible, err } -// EthereumKeeperRegistrar corresponds to the registrar which is used to send requests to the registry when +// LegacyEthereumKeeperRegistrar corresponds to the registrar which is used to send requests to the registry when // registering new upkeeps. -type EthereumKeeperRegistrar struct { +type LegacyEthereumKeeperRegistrar struct { client blockchain.EVMClient registrar *keeper_registrar_wrapper1_2.KeeperRegistrar registrar20 *keeper_registrar_wrapper2_0.KeeperRegistrar @@ -2225,11 +2231,11 @@ type EthereumKeeperRegistrar struct { address *common.Address } -func (v *EthereumKeeperRegistrar) Address() string { +func (v *LegacyEthereumKeeperRegistrar) Address() string { return v.address.Hex() } -func (v *EthereumKeeperRegistrar) Fund(ethAmount *big.Float) error { +func (v *LegacyEthereumKeeperRegistrar) Fund(ethAmount *big.Float) error { gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) if err != nil { return err @@ -2238,7 +2244,7 @@ func (v *EthereumKeeperRegistrar) Fund(ethAmount *big.Float) error { } // EncodeRegisterRequest encodes register request to call it through link token TransferAndCall -func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byte, upkeepAddr string, gasLimit uint32, adminAddr string, checkData []byte, amount *big.Int, source uint8, senderAddr string, isLogTrigger bool, isMercury bool) ([]byte, error) { +func (v *LegacyEthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byte, upkeepAddr string, gasLimit uint32, adminAddr string, checkData []byte, amount *big.Int, source uint8, senderAddr string, isLogTrigger bool, isMercury bool) ([]byte, error) { if v.registrar20 != nil { registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.ABI)) if err != nil { @@ -2349,14 +2355,14 @@ func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byt return req, nil } -// EthereumUpkeepTranscoder represents the transcoder which is used to perform migrations +// LegacyEthereumUpkeepTranscoder represents the transcoder which is used to perform migrations // of upkeeps from one registry to another. -type EthereumUpkeepTranscoder struct { +type LegacyEthereumUpkeepTranscoder struct { client blockchain.EVMClient transcoder *upkeep_transcoder.UpkeepTranscoder address *common.Address } -func (v *EthereumUpkeepTranscoder) Address() string { +func (v *LegacyEthereumUpkeepTranscoder) Address() string { return v.address.Hex() } diff --git a/integration-tests/contracts/multicall.go b/integration-tests/contracts/multicall.go index b809c20021d..be1af13cceb 100644 --- a/integration-tests/contracts/multicall.go +++ b/integration-tests/contracts/multicall.go @@ -10,10 +10,12 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/pkg/errors" "github.com/rs/zerolog/log" + "github.com/smartcontractkit/seth" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + "github.com/smartcontractkit/chainlink/integration-tests/wrappers" ) const ( @@ -53,43 +55,24 @@ func WaitForSuccessfulTxMined(evmClient blockchain.EVMClient, tx *types.Transact } func MultiCallLogTriggerLoadGen( - evmClient blockchain.EVMClient, + client *seth.Client, multiCallAddress string, logTriggerAddress []string, logTriggerData [][]byte, ) (*types.Transaction, error) { - contractAddress := common.HexToAddress(multiCallAddress) multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) if err != nil { return nil, err } - boundContract := bind.NewBoundContract(contractAddress, multiCallABI, evmClient.Backend(), evmClient.Backend(), evmClient.Backend()) + wrapper := wrappers.MustNewWrappedContractBackend(nil, client) + boundContract := bind.NewBoundContract(contractAddress, multiCallABI, wrapper, wrapper, wrapper) var call []Call for i, d := range logTriggerData { data := Call{Target: common.HexToAddress(logTriggerAddress[i]), AllowFailure: false, CallData: d} call = append(call, data) } - - opts, err := evmClient.TransactionOpts(evmClient.GetDefaultWallet()) - if err != nil { - return nil, err - } - // call aggregate3 to group all msg call data and send them in a single transaction - tx, err := boundContract.Transact(opts, "aggregate3", call) - if err != nil { - return nil, err - } - err = evmClient.MarkTxAsSentOnL2(tx) - if err != nil { - return nil, err - } - err = WaitForSuccessfulTxMined(evmClient, tx) - if err != nil { - return nil, errors.Wrapf(err, "multicall failed for log trigger load gen; multicall %s", contractAddress.Hex()) - } - return tx, nil - + return boundContract.Transact(client.NewTXKeyOpts(client.AnySyncedKey()), "aggregate3", call) } diff --git a/integration-tests/contracts/test_contracts.go b/integration-tests/contracts/test_contracts.go index 8a6d0b5be02..25b87bcc5ba 100644 --- a/integration-tests/contracts/test_contracts.go +++ b/integration-tests/contracts/test_contracts.go @@ -12,18 +12,18 @@ import ( le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" ) -type LogEmitterContract struct { +type LegacyLogEmitterContract struct { address common.Address client blockchain.EVMClient instance *le.LogEmitter l zerolog.Logger } -func (e *LogEmitterContract) Address() common.Address { +func (e *LegacyLogEmitterContract) Address() common.Address { return e.address } -func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) { opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) if err != nil { return nil, err @@ -39,7 +39,7 @@ func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) return tx, e.client.ProcessTransaction(tx) } -func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) { opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) if err != nil { return nil, err @@ -55,7 +55,7 @@ func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, return tx, e.client.ProcessTransaction(tx) } -func (e *LogEmitterContract) EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) { opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) if err != nil { return nil, err @@ -67,7 +67,7 @@ func (e *LogEmitterContract) EmitLogIntMultiIndexed(ints int, ints2 int, count i return tx, e.client.ProcessTransaction(tx) } -func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) { opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) if err != nil { return nil, err @@ -79,14 +79,27 @@ func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transactio return tx, e.client.ProcessTransaction(tx) } -func (e *LogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) { return e.EmitLogInts([]int{payload}) } -func (e *LogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) { return e.EmitLogIntsIndexed([]int{payload}) } -func (e *LogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) { +func (e *LegacyLogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) { return e.EmitLogStrings([]string{strings}) } + +func (e *LegacyLogEmitterContract) EmitLogIntsFromKey(_ []int, _ int) (*types.Transaction, error) { + panic("only Seth-based contracts support this method") +} +func (e *LegacyLogEmitterContract) EmitLogIntsIndexedFromKey(_ []int, _ int) (*types.Transaction, error) { + panic("only Seth-based contracts support this method") +} +func (e *LegacyLogEmitterContract) EmitLogIntMultiIndexedFromKey(_ int, _ int, _ int, _ int) (*types.Transaction, error) { + panic("only Seth-based contracts support this method") +} +func (e *LegacyLogEmitterContract) EmitLogStringsFromKey(_ []string, _ int) (*types.Transaction, error) { + panic("only Seth-based contracts support this method") +} diff --git a/integration-tests/contracts/test_contracts_seth.go b/integration-tests/contracts/test_contracts_seth.go new file mode 100644 index 00000000000..85e76054c71 --- /dev/null +++ b/integration-tests/contracts/test_contracts_seth.go @@ -0,0 +1,123 @@ +package contracts + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/smartcontractkit/seth" + + "github.com/smartcontractkit/chainlink/integration-tests/wrappers" + le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" +) + +type LogEmitterContract struct { + address common.Address + client *seth.Client + instance *le.LogEmitter + l zerolog.Logger +} + +func (e *LogEmitterContract) Address() common.Address { + return e.address +} + +func (e *LogEmitterContract) EmitLogIntsFromKey(ints []int, keyNum int) (*types.Transaction, error) { + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.client.Decode(e.instance.EmitLog1(e.client.NewTXKeyOpts(keyNum), bigInts)) + if err != nil { + return nil, err + } + + return tx.Transaction, nil +} + +func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) { + return e.EmitLogIntsFromKey(ints, 0) +} + +func (e *LogEmitterContract) EmitLogIntsIndexedFromKey(ints []int, keyNum int) (*types.Transaction, error) { + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.client.Decode(e.instance.EmitLog2(e.client.NewTXKeyOpts(keyNum), bigInts)) + if err != nil { + return nil, err + } + + return tx.Transaction, nil +} + +func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) { + return e.EmitLogIntsIndexedFromKey(ints, 0) +} + +func (e *LogEmitterContract) EmitLogIntMultiIndexedFromKey(ints int, ints2 int, count int, keyNum int) (*types.Transaction, error) { + tx, err := e.client.Decode(e.instance.EmitLog4(e.client.NewTXKeyOpts(keyNum), big.NewInt(int64(ints)), big.NewInt(int64(ints2)), big.NewInt(int64(count)))) + if err != nil { + return nil, err + } + + return tx.Transaction, nil +} + +func (e *LogEmitterContract) EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) { + return e.EmitLogIntMultiIndexedFromKey(ints, ints2, count, 0) +} + +func (e *LogEmitterContract) EmitLogStringsFromKey(strings []string, keyNum int) (*types.Transaction, error) { + tx, err := e.client.Decode(e.instance.EmitLog3(e.client.NewTXKeyOpts(keyNum), strings)) + if err != nil { + return nil, err + } + return tx.Transaction, nil +} + +func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) { + return e.EmitLogStringsFromKey(strings, 0) +} + +func (e *LogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) { + return e.EmitLogInts([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) { + return e.EmitLogIntsIndexed([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) { + return e.EmitLogStrings([]string{strings}) +} + +func DeployLogEmitterContract(l zerolog.Logger, client *seth.Client) (LogEmitter, error) { + return DeployLogEmitterContractFromKey(l, client, 0) +} + +func DeployLogEmitterContractFromKey(l zerolog.Logger, client *seth.Client, keyNum int) (LogEmitter, error) { + abi, err := le.LogEmitterMetaData.GetAbi() + if err != nil { + return &LogEmitterContract{}, fmt.Errorf("failed to get LogEmitter ABI: %w", err) + } + data, err := client.DeployContract(client.NewTXKeyOpts(keyNum), "LogEmitter", *abi, common.FromHex(le.LogEmitterMetaData.Bin)) + if err != nil { + return &LogEmitterContract{}, fmt.Errorf("LogEmitter instance deployment have failed: %w", err) + } + + instance, err := le.NewLogEmitter(data.Address, wrappers.MustNewWrappedContractBackend(nil, client)) + if err != nil { + return &LogEmitterContract{}, fmt.Errorf("failed to instantiate LogEmitter instance: %w", err) + } + + return &LogEmitterContract{ + client: client, + instance: instance, + address: data.Address, + l: l, + }, err +} diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go index fd1555ec055..7cb618b8dae 100644 --- a/integration-tests/docker/test_env/test_env.go +++ b/integration-tests/docker/test_env/test_env.go @@ -393,7 +393,7 @@ func (te *CLClusterTestEnv) returnFunds() error { } for _, sethClient := range te.sethClients { - if err := actions_seth.ReturnFunds(te.l, sethClient, contracts.ChainlinkClientToChainlinkNodeWithKeysAndAddress(te.ClCluster.NodeAPIs())); err != nil { + if err := actions_seth.ReturnFundsFromNodes(te.l, sethClient, contracts.ChainlinkClientToChainlinkNodeWithKeysAndAddress(te.ClCluster.NodeAPIs())); err != nil { te.l.Error().Err(err).Msg("Error returning funds from node") } } @@ -403,6 +403,10 @@ func (te *CLClusterTestEnv) returnFunds() error { } func (te *CLClusterTestEnv) GetEVMClient(chainId int64) (blockchain.EVMClient, error) { + if len(te.sethClients) > 0 { + return nil, fmt.Errorf("Environment is using Seth clients, not EVM clients") + } + if evmClient, ok := te.evmClients[chainId]; ok { return evmClient, nil } @@ -411,6 +415,9 @@ func (te *CLClusterTestEnv) GetEVMClient(chainId int64) (blockchain.EVMClient, e } func (te *CLClusterTestEnv) GetSethClient(chainId int64) (*seth.Client, error) { + if len(te.evmClients) > 0 { + return nil, fmt.Errorf("Environment is using EVMClients, not Seth clients") + } if sethClient, ok := te.sethClients[chainId]; ok { return sethClient, nil } diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go index 549fdcafae4..0089ce0778a 100644 --- a/integration-tests/docker/test_env/test_env_builder.go +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -30,7 +30,6 @@ import ( actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" - "github.com/smartcontractkit/chainlink/integration-tests/utils" ) type CleanUpType string @@ -66,7 +65,7 @@ type CLTestEnvBuilder struct { cleanUpType CleanUpType cleanUpCustomFn func() chainOptionsFn []ChainOption - evmClientNetworkOption []EVMClientNetworkOption + evmNetworkOption []EVMNetworkOption privateEthereumNetworks []*ctf_config.EthereumNetworkConfig testConfig ctf_config.GlobalTestConfig chainlinkNodeLogScannerSettings *ChainlinkNodeLogScannerSettings @@ -244,11 +243,11 @@ func (b *CLTestEnvBuilder) WithChainOptions(opts ...ChainOption) *CLTestEnvBuild return b } -type EVMClientNetworkOption = func(*blockchain.EVMNetwork) *blockchain.EVMNetwork +type EVMNetworkOption = func(*blockchain.EVMNetwork) *blockchain.EVMNetwork -func (b *CLTestEnvBuilder) EVMClientNetworkOptions(opts ...EVMClientNetworkOption) *CLTestEnvBuilder { - b.evmClientNetworkOption = make([]EVMClientNetworkOption, 0) - b.evmClientNetworkOption = append(b.evmClientNetworkOption, opts...) +func (b *CLTestEnvBuilder) EVMNetworkOptions(opts ...EVMNetworkOption) *CLTestEnvBuilder { + b.evmNetworkOption = make([]EVMNetworkOption, 0) + b.evmNetworkOption = append(b.evmNetworkOption, opts...) return b } @@ -421,16 +420,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { } if b.hasSeth { - readSethCfg := b.testConfig.GetSethConfig() - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(networkConfig, *readSethCfg) - if err != nil { - return nil, err - } - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - if err != nil { - return nil, err - } - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClientWithConfigFunction(b.testConfig, networkConfig, actions_seth.OneEphemeralKeysLiveTestnetAutoFixFn) if err != nil { return nil, err } @@ -494,8 +484,8 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { } if !b.isNonEVM { - if b.evmClientNetworkOption != nil && len(b.evmClientNetworkOption) > 0 { - for _, fn := range b.evmClientNetworkOption { + if b.evmNetworkOption != nil && len(b.evmNetworkOption) > 0 { + for _, fn := range b.evmNetworkOption { fn(&networkConfig) } } @@ -523,16 +513,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { if b.hasSeth { b.te.sethClients = make(map[int64]*seth.Client) - readSethCfg := b.testConfig.GetSethConfig() - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(networkConfig, *readSethCfg) - if err != nil { - return nil, err - } - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - if err != nil { - return nil, err - } - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClientWithConfigFunction(b.testConfig, networkConfig, actions_seth.OneEphemeralKeysLiveTestnetAutoFixFn) if err != nil { return nil, err } diff --git a/integration-tests/experiments/gas_test.go b/integration-tests/experiments/gas_test.go index ba096b69dbc..640187352ed 100644 --- a/integration-tests/experiments/gas_test.go +++ b/integration-tests/experiments/gas_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-testing-framework/logging" @@ -13,7 +12,6 @@ import ( actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" - "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestGasExperiment(t *testing.T) { @@ -22,15 +20,7 @@ func TestGasExperiment(t *testing.T) { require.NoError(t, err, "Error getting config") network := networks.MustGetSelectedNetworkConfig(config.GetNetworkConfig())[0] - readSethCfg := config.GetSethConfig() - require.NotNil(t, readSethCfg, "Seth config shouldn't be nil") - - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(network, *readSethCfg) - require.NoError(t, err, "Error merging seth and evm network configs") - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - require.NoError(t, err, "Error validating seth network config") - - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(&config, network) require.NoError(t, err, "Error creating seth client") _, err = actions_seth.SendFunds(l, seth, actions_seth.FundsToSendPayload{ diff --git a/integration-tests/go.mod b/integration-tests/go.mod index b189bd94e8a..1ae90758975 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -6,11 +6,12 @@ go 1.21.7 replace github.com/smartcontractkit/chainlink/v2 => ../ require ( + github.com/avast/retry-go/v4 v4.5.1 github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df github.com/cli/go-gh/v2 v2.0.0 github.com/ethereum/go-ethereum v1.13.8 github.com/fxamacker/cbor/v2 v2.5.0 - github.com/go-resty/resty/v2 v2.7.0 + github.com/go-resty/resty/v2 v2.11.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/jmoiron/sqlx v1.3.5 @@ -31,8 +32,8 @@ require ( github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c - github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec - github.com/smartcontractkit/wasp v0.4.5 + github.com/smartcontractkit/seth v1.0.9 + github.com/smartcontractkit/wasp v0.4.7 github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.9.0 github.com/test-go/testify v1.1.4 @@ -49,7 +50,6 @@ require ( exclude github.com/hashicorp/consul v1.2.1 replace ( - // Pin K8s versions as their updates are highly disruptive and go mod keeps wanting to update them k8s.io/api => k8s.io/api v0.25.11 k8s.io/client-go => k8s.io/client-go v0.25.11 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d @@ -76,8 +76,6 @@ require ( github.com/CosmWasm/wasmd v0.40.1 // indirect github.com/CosmWasm/wasmvm v1.2.4 // indirect github.com/DataDog/zstd v1.5.2 // indirect - github.com/K-Phoen/grabana v0.22.1 // indirect - github.com/K-Phoen/sdk v0.12.4 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect @@ -92,7 +90,6 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/avast/retry-go v3.0.0+incompatible // indirect - github.com/avast/retry-go/v4 v4.5.1 // indirect github.com/aws/aws-sdk-go v1.45.25 // indirect github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect github.com/aws/jsii-runtime-go v1.75.0 // indirect @@ -234,10 +231,9 @@ require ( github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/sessions v1.2.2 // indirect github.com/gorilla/websocket v1.5.1 // indirect - github.com/gosimple/slug v1.13.1 // indirect - github.com/gosimple/unidecode v1.0.1 // indirect github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f // indirect github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b // indirect github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 // indirect github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 // indirect github.com/grafana/pyroscope-go v1.1.1 // indirect @@ -382,6 +378,7 @@ require ( github.com/smartcontractkit/chainlink-feeds v0.0.0-20240422130241-13c17a91b2ab // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240510181707-46b1311a5a83 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240508155030-1024f2b55c69 // indirect + github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect github.com/smartcontractkit/wsrpc v0.8.1 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index ca48a3a90b1..2ddfc316840 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -130,10 +130,6 @@ github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtix github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/K-Phoen/grabana v0.22.1 h1:b/O+C3H2H6VNYSeMCYUO4X4wYuwFXgBcRkvYa+fjpQA= -github.com/K-Phoen/grabana v0.22.1/go.mod h1:3LTXrTzQzTKTgvKSXdRjlsJbizSOW/V23Q3iX00R5bU= -github.com/K-Phoen/sdk v0.12.4 h1:j2EYuBJm3zDTD0fGKACVFWxAXtkR0q5QzfVqxmHSeGQ= -github.com/K-Phoen/sdk v0.12.4/go.mod h1:qmM0wO23CtoDux528MXPpYvS4XkRWkWX6rvX9Za8EVU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -647,8 +643,8 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= +github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -852,14 +848,12 @@ github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gosimple/slug v1.13.1 h1:bQ+kpX9Qa6tHRaK+fZR0A0M2Kd7Pa5eHPPsb1JpHD+Q= -github.com/gosimple/slug v1.13.1/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= -github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= -github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b h1:Msqs1nc2qWMxTriDCITKl58Td+7Md/RURmUmH7RXKns= +github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b/go.mod h1:WtWosval1KCZP9BGa42b8aVoJmVXSg0EvQXi9LDSVZQ= github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 h1:gdrsYbmk8822v6qvPwZO5DC6QjnAW7uKJ9YXnoUmV8c= github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503/go.mod h1:d8seWXCEXkL42mhuIJYcGi6DxfehzoIpLrMQWJojvOo= github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 h1:wQ0FnSeebhJIBkgYOD06Mxk9HV2KhtEG0hp/7R+5RUQ= @@ -1532,6 +1526,8 @@ github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.202405 github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240508155030-1024f2b55c69/go.mod h1:VsfjhvWgjxqWja4q+FlXEtX5lu8BSxn10xRo6gi948g= github.com/smartcontractkit/chainlink-testing-framework v1.28.12 h1:15ssos9DvWekvj6JjmiPjTYsj/uw12HvTWlm1FHdYaA= github.com/smartcontractkit/chainlink-testing-framework v1.28.12/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= +github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449 h1:fX/xmGm1GBsD1ZZnooNT+eWA0hiTAqFlHzOC5CY4dy8= +github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449/go.mod h1:DC8sQMyTlI/44UCTL8QWFwb0bYNoXCfjwCv2hMivYZU= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= github.com/smartcontractkit/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo= @@ -1540,14 +1536,14 @@ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c h1:lIyMbTaF2H0Q71vkwZHX/Ew4KF2BxiKhqEXwF8rn+KI= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= -github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec h1:BT1loU6TT2YqMenD7XE+aw7IeeTiC25+r1TLKAySVIg= -github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= +github.com/smartcontractkit/seth v1.0.9 h1:v+gxRY5JT9u4Ptk1mg/Sm76aqdG2vFw1zq1Ngwoj6yk= +github.com/smartcontractkit/seth v1.0.9/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= -github.com/smartcontractkit/wasp v0.4.5 h1:pgiXwBci2m15eo33AzspzhpNG/gxg+8QGxl+I5LpfsQ= -github.com/smartcontractkit/wasp v0.4.5/go.mod h1:eVhBVLbVv0qORUlN7aR5C4aTN/lTYO3KnN1erO4ROOI= +github.com/smartcontractkit/wasp v0.4.7 h1:7mKJfwzFbuE8xVLUYtLt7Bjw8q/bmVZRW6Ks8kc1LVM= +github.com/smartcontractkit/wasp v0.4.7/go.mod h1:jeabvyXikb2aNoLQwcZGqaz17efrR8NJhpq4seAmdgs= github.com/smartcontractkit/wsrpc v0.8.1 h1:kk0SXLqWrWaZ3J6c7n8D0NZ2uTMBBBpG5dZZXZX8UGE= github.com/smartcontractkit/wsrpc v0.8.1/go.mod h1:yfg8v8fPLXkb6Mcnx6Pm/snP6jJ0r5Kf762Yd1a/KpA= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1854,6 +1850,7 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1952,7 +1949,6 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1961,6 +1957,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2092,6 +2089,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -2104,6 +2102,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2126,6 +2125,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/integration-tests/load/automationv2_1/automationv2_1_test.go b/integration-tests/load/automationv2_1/automationv2_1_test.go index 0304ebd0c71..f8b1f1fc6d3 100644 --- a/integration-tests/load/automationv2_1/automationv2_1_test.go +++ b/integration-tests/load/automationv2_1/automationv2_1_test.go @@ -15,8 +15,6 @@ import ( "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/wiremock" - geth "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -30,10 +28,10 @@ import ( ocr2keepers30config "github.com/smartcontractkit/chainlink-automation/pkg/v3/config" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/wiremock" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" @@ -41,14 +39,15 @@ import ( gowiremock "github.com/wiremock/go-wiremock" - "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/actions/automationv2" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" contractseth "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" aconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig/automation" "github.com/smartcontractkit/chainlink/integration-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/utils" ac "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_compatible_utils" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper" @@ -192,7 +191,7 @@ Spec Type: %s Log Level: %s Image: %s Tag: %s - + Load Config: %s` @@ -313,28 +312,24 @@ Load Config: "prometheus": *loadedTestConfig.Automation.General.UsePrometheus, "secretsToml": secretsTOML, }, loadedTestConfig.ChainlinkImage, overrideFn) - testEnvironment.AddHelm(cd) } err = testEnvironment.Run() require.NoError(t, err, "Error running chainlink DON") - chainClient, err := blockchain.NewEVMClient(testNetwork, testEnvironment, l) - require.NoError(t, err, "Error building chain client") + testNetwork = utils.MustReplaceSimulatedNetworkUrlWithK8(l, testNetwork, *testEnvironment) - contractDeployer, err := contracts.NewContractDeployer(chainClient, l) - require.NoError(t, err, "Error building contract deployer") + chainClient, err := actions_seth.GetChainClientWithConfigFunction(loadedTestConfig, testNetwork, actions_seth.OneEphemeralKeysLiveTestnetCheckFn) + require.NoError(t, err, "Error creating seth client") chainlinkNodes, err := client.ConnectChainlinkNodes(testEnvironment) require.NoError(t, err, "Error connecting to chainlink nodes") - chainClient.ParallelTransactions(true) - - multicallAddress, err := contractDeployer.DeployMultiCallContract() + multicallAddress, err := contracts.DeployMultiCallContract(chainClient) require.NoError(t, err, "Error deploying multicall contract") - a := automationv2.NewAutomationTestK8s(chainClient, contractDeployer, chainlinkNodes) + a := automationv2.NewAutomationTestK8s(l, chainClient, chainlinkNodes) conf := loadedTestConfig.Automation.AutomationConfig a.RegistrySettings = contracts.KeeperRegistrySettings{ PaymentPremiumPPB: *conf.RegistrySettings.PaymentPremiumPPB, @@ -398,7 +393,7 @@ Load Config: a.SetupAutomationDeployment(t) - err = actions.FundChainlinkNodesAddress(chainlinkNodes[1:], chainClient, big.NewFloat(*loadedTestConfig.Common.ChainlinkNodeFunding), 0) + err = actions_seth.FundChainlinkNodesFromRootAddress(l, a.ChainClient, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(chainlinkNodes[1:]), big.NewFloat(*loadedTestConfig.Common.ChainlinkNodeFunding)) require.NoError(t, err, "Error funding chainlink nodes") consumerContracts := make([]contracts.KeeperConsumer, 0) @@ -422,55 +417,26 @@ Load Config: upkeepConfigs := make([]automationv2.UpkeepConfig, 0) loadConfigs := make([]aconfig.Load, 0) - cEVMClient, err := blockchain.ConcurrentEVMClient(testNetwork, testEnvironment, chainClient, l) - require.NoError(t, err, "Error building concurrent chain client") + expectedTotalUpkeepCount := 0 for _, u := range loadedTestConfig.Automation.Load { - for i := 0; i < *u.NumberOfUpkeeps; i++ { - consumerContract, err := contractDeployer.DeployAutomationSimpleLogTriggerConsumer(*u.IsStreamsLookup) - require.NoError(t, err, "Error deploying automation consumer contract") - consumerContracts = append(consumerContracts, consumerContract) - l.Debug(). - Str("Contract Address", consumerContract.Address()). - Int("Number", i+1). - Int("Out Of", *u.NumberOfUpkeeps). - Msg("Deployed Automation Log Trigger Consumer Contract") - - loadCfg := aconfig.Load{ - NumberOfEvents: u.NumberOfEvents, - NumberOfSpamMatchingEvents: u.NumberOfSpamMatchingEvents, - NumberOfSpamNonMatchingEvents: u.NumberOfSpamNonMatchingEvents, - CheckBurnAmount: u.CheckBurnAmount, - PerformBurnAmount: u.PerformBurnAmount, - UpkeepGasLimit: u.UpkeepGasLimit, - SharedTrigger: u.SharedTrigger, - Feeds: []string{}, - } + expectedTotalUpkeepCount += *u.NumberOfUpkeeps + } - if *u.IsStreamsLookup { - loadCfg.Feeds = u.Feeds - } + maxDeploymentConcurrency := 100 - loadConfigs = append(loadConfigs, loadCfg) + for _, u := range loadedTestConfig.Automation.Load { + deploymentData, err := deployConsumerAndTriggerContracts(l, u, a.ChainClient, multicallAddress, maxDeploymentConcurrency, automationDefaultLinkFunds, a.LinkToken) + require.NoError(t, err, "Error deploying consumer and trigger contracts") - if *u.SharedTrigger && i > 0 { - triggerAddresses = append(triggerAddresses, triggerAddresses[len(triggerAddresses)-1]) - continue - } - triggerContract, err := contractDeployer.DeployLogEmitterContract() - require.NoError(t, err, "Error deploying log emitter contract") - triggerContracts = append(triggerContracts, triggerContract) - triggerAddresses = append(triggerAddresses, triggerContract.Address()) - l.Debug(). - Str("Contract Address", triggerContract.Address().Hex()). - Int("Number", i+1). - Int("Out Of", *u.NumberOfUpkeeps). - Msg("Deployed Automation Log Trigger Emitter Contract") - } - err = chainClient.WaitForEvents() - require.NoError(t, err, "Failed waiting for contracts to deploy") + consumerContracts = append(consumerContracts, deploymentData.ConsumerContracts...) + triggerContracts = append(triggerContracts, deploymentData.TriggerContracts...) + triggerAddresses = append(triggerAddresses, deploymentData.TriggerAddresses...) + loadConfigs = append(loadConfigs, deploymentData.LoadConfigs...) } + require.Equal(t, expectedTotalUpkeepCount, len(consumerContracts), "Incorrect number of consumer/trigger contracts deployed") + for i, consumerContract := range consumerContracts { logTriggerConfigStruct := ac.IAutomationV21PlusCommonLogTriggerConfig{ ContractAddress: triggerAddresses[i], @@ -504,7 +470,7 @@ Load Config: EncryptedEmail: []byte("test@mail.com"), UpkeepContract: common.HexToAddress(consumerContract.Address()), GasLimit: *loadConfigs[i].UpkeepGasLimit, - AdminAddress: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + AdminAddress: chainClient.MustGetRootKeyAddress(), TriggerType: uint8(1), CheckData: encodedCheckDataStruct, TriggerConfig: encodedLogTriggerConfig, @@ -515,21 +481,20 @@ Load Config: upkeepConfigs = append(upkeepConfigs, upkeepConfig) } - registrationTxHashes, err := a.RegisterUpkeeps(upkeepConfigs) + require.Equal(t, expectedTotalUpkeepCount, len(upkeepConfigs), "Incorrect number of upkeep configs created") + registrationTxHashes, err := a.RegisterUpkeeps(upkeepConfigs, maxDeploymentConcurrency) require.NoError(t, err, "Error registering upkeeps") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Failed waiting for upkeeps to register") - - upkeepIds, err := a.ConfirmUpkeepsRegistered(registrationTxHashes) + upkeepIds, err := a.ConfirmUpkeepsRegistered(registrationTxHashes, maxDeploymentConcurrency) require.NoError(t, err, "Error confirming upkeeps registered") + require.Equal(t, expectedTotalUpkeepCount, len(upkeepIds), "Incorrect number of upkeeps registered") l.Info().Msg("Successfully registered all Automation Upkeeps") l.Info().Interface("Upkeep IDs", upkeepIds).Msg("Upkeeps Registered") l.Info().Str("STARTUP_WAIT_TIME", StartupWaitTime.String()).Msg("Waiting for plugin to start") time.Sleep(StartupWaitTime) - startBlock, err := chainClient.LatestBlockNumber(ctx) + startBlock, err := a.ChainClient.Client.BlockNumber(ctx) require.NoError(t, err, "Error getting latest block number") p := wasp.NewProfile() @@ -575,7 +540,7 @@ Load Config: Gun: NewLogTriggerUser( l, configs, - cEVMClient, + a.ChainClient, multicallAddress.Hex(), ), CallResultBufLen: 1000, @@ -601,7 +566,7 @@ Load Config: Msg("Test execution ended") l.Info().Str("Duration", testExDuration.String()).Msg("Test Execution Duration") - endBlock, err := chainClient.LatestBlockNumber(ctx) + endBlock, err := chainClient.Client.BlockNumber(ctx) require.NoError(t, err, "Error getting latest block number") l.Info().Uint64("Starting Block", startBlock).Uint64("Ending Block", endBlock).Msg("Test Block Range") @@ -643,14 +608,14 @@ Load Config: logsInBatch []types.Log ) ctx2, cancel := context.WithTimeout(ctx, timeout) - logsInBatch, err = chainClient.FilterLogs(ctx2, filterQuery) + logsInBatch, err = a.ChainClient.Client.FilterLogs(ctx2, filterQuery) cancel() if err != nil { l.Error().Err(err). Interface("FilterQuery", filterQuery). Str("Contract Address", consumerContract.Address()). Str("Timeout", timeout.String()). - Msg("Error getting logs") + Msg("Error getting consumer contract logs") timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) continue } @@ -658,7 +623,8 @@ Load Config: Interface("FilterQuery", filterQuery). Str("Contract Address", consumerContract.Address()). Str("Timeout", timeout.String()). - Msg("Collected logs") + Int("Number of Logs", len(logsInBatch)). + Msg("Collected consumer contract logs") logs = append(logs, logsInBatch...) } } @@ -670,7 +636,7 @@ Load Config: eventDetails, err := consumerABI.EventByID(log.Topics[0]) require.NoError(t, err, "Error getting event details") consumer, err := simple_log_upkeep_counter_wrapper.NewSimpleLogUpkeepCounter( - address, chainClient.Backend(), + address, a.ChainClient.Client, ) require.NoError(t, err, "Error getting consumer contract") if eventDetails.Name == "PerformingUpkeep" { @@ -707,22 +673,23 @@ Load Config: logsInBatch []types.Log ) ctx2, cancel := context.WithTimeout(ctx, timeout) - logsInBatch, err = chainClient.FilterLogs(ctx2, filterQuery) + logsInBatch, err = chainClient.Client.FilterLogs(ctx2, filterQuery) cancel() if err != nil { l.Error().Err(err). Interface("FilterQuery", filterQuery). - Str("Contract Address", triggerContract.Address().Hex()). + Str("Contract Address", address.Hex()). Str("Timeout", timeout.String()). - Msg("Error getting logs") + Msg("Error getting trigger contract logs") timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) continue } l.Debug(). Interface("FilterQuery", filterQuery). - Str("Contract Address", triggerContract.Address().Hex()). + Str("Contract Address", address.Hex()). Str("Timeout", timeout.String()). - Msg("Collected logs") + Int("Number of Logs", len(logsInBatch)). + Msg("Collected trigger contract logs") logs = append(logs, logsInBatch...) } } @@ -824,14 +791,14 @@ Test Duration: %s` } t.Cleanup(func() { - if err = actions.TeardownRemoteSuite(t, testEnvironment.Cfg.Namespace, chainlinkNodes, nil, &loadedTestConfig, chainClient); err != nil { + if err = actions_seth.TeardownRemoteSuite(t, chainClient, testEnvironment.Cfg.Namespace, chainlinkNodes, nil, &loadedTestConfig); err != nil { l.Error().Err(err).Msg("Error when tearing down remote suite") testEnvironment.Cfg.TTL += time.Hour * 48 err := testEnvironment.Run() if err != nil { l.Error().Err(err).Msg("Error increasing TTL of namespace") } - } else if chainClient.NetworkSimulated() { + } else if chainClient.Cfg.IsSimulatedNetwork() { err := testEnvironment.Client.RemoveNamespace(testEnvironment.Cfg.Namespace) if err != nil { l.Error().Err(err).Msg("Error removing namespace") diff --git a/integration-tests/load/automationv2_1/gun.go b/integration-tests/load/automationv2_1/gun.go index c80c9cf7cc1..162aca251fb 100644 --- a/integration-tests/load/automationv2_1/gun.go +++ b/integration-tests/load/automationv2_1/gun.go @@ -5,9 +5,9 @@ import ( "sync" "github.com/rs/zerolog" + "github.com/smartcontractkit/seth" "github.com/smartcontractkit/wasp" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter" "github.com/smartcontractkit/chainlink/integration-tests/contracts" @@ -24,7 +24,7 @@ type LogTriggerGun struct { data [][]byte addresses []string multiCallAddress string - evmClient blockchain.EVMClient + client *seth.Client logger zerolog.Logger } @@ -43,7 +43,7 @@ func generateCallData(int1 int64, int2 int64, count int64) []byte { func NewLogTriggerUser( logger zerolog.Logger, TriggerConfigs []LogTriggerConfig, - evmClient blockchain.EVMClient, + client *seth.Client, multicallAddress string, ) *LogTriggerGun { var data [][]byte @@ -72,7 +72,7 @@ func NewLogTriggerUser( data: data, logger: logger, multiCallAddress: multicallAddress, - evmClient: evmClient, + client: client, } } @@ -88,17 +88,37 @@ func (m *LogTriggerGun) Call(_ *wasp.Generator) *wasp.Response { } dividedData = append(dividedData, d[i:end]) } + + resultCh := make(chan *wasp.Response, len(dividedData)) + for _, a := range dividedData { wg.Add(1) - go func(a [][]byte, m *LogTriggerGun) *wasp.Response { + go func(a [][]byte, m *LogTriggerGun) { defer wg.Done() - _, err := contracts.MultiCallLogTriggerLoadGen(m.evmClient, m.multiCallAddress, m.addresses, a) + + _, err := contracts.MultiCallLogTriggerLoadGen(m.client, m.multiCallAddress, m.addresses, a) if err != nil { - return &wasp.Response{Error: err.Error(), Failed: true} + m.logger.Error().Err(err).Msg("Error calling MultiCallLogTriggerLoadGen") + resultCh <- &wasp.Response{Error: err.Error(), Failed: true} + return } - return &wasp.Response{} + resultCh <- &wasp.Response{} }(a, m) } wg.Wait() - return &wasp.Response{} + close(resultCh) + + r := &wasp.Response{} + for result := range resultCh { + if result.Failed { + r.Failed = true + if r.Error != "" { + r.Error += "; " + result.Error + } else { + r.Error = result.Error + } + } + } + + return r } diff --git a/integration-tests/load/automationv2_1/helpers.go b/integration-tests/load/automationv2_1/helpers.go index 00576c255e4..4fa13149d77 100644 --- a/integration-tests/load/automationv2_1/helpers.go +++ b/integration-tests/load/automationv2_1/helpers.go @@ -2,12 +2,20 @@ package automationv2_1 import ( "fmt" + "math/big" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/slack-go/slack" + "github.com/smartcontractkit/seth" + ctf_concurrency "github.com/smartcontractkit/chainlink-testing-framework/concurrency" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" + aconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig/automation" ) func extraBlockWithText(text string) slack.Block { @@ -63,3 +71,136 @@ func sendSlackNotification(header string, l zerolog.Logger, config *tc.TestConfi l.Info().Str("ts", ts).Msg("Sent Slack Message") return ts, err } + +type DeploymentData struct { + ConsumerContracts []contracts.KeeperConsumer + TriggerContracts []contracts.LogEmitter + TriggerAddresses []common.Address + LoadConfigs []aconfig.Load +} + +type deployedContractData struct { + consumerContract contracts.KeeperConsumer + triggerContract contracts.LogEmitter + triggerAddress common.Address + loadConfig aconfig.Load +} + +func (d deployedContractData) GetResult() deployedContractData { + return d +} + +type task struct { + deployTrigger bool +} + +func deployConsumerAndTriggerContracts(l zerolog.Logger, loadConfig aconfig.Load, chainClient *seth.Client, multicallAddress common.Address, maxConcurrency int, automationDefaultLinkFunds *big.Int, linkToken contracts.LinkToken) (DeploymentData, error) { + data := DeploymentData{} + + concurrency, err := actions_seth.GetAndAssertCorrectConcurrency(chainClient, 1) + if err != nil { + return DeploymentData{}, err + } + + if concurrency > maxConcurrency { + concurrency = maxConcurrency + l.Debug(). + Msgf("Concurrency is higher than max concurrency, setting concurrency to %d", concurrency) + } + + l.Debug(). + Int("Number of Upkeeps", *loadConfig.NumberOfUpkeeps). + Int("Concurrency", concurrency). + Msg("Deployment parallelisation info") + + tasks := []task{} + for i := 0; i < *loadConfig.NumberOfUpkeeps; i++ { + if *loadConfig.SharedTrigger { + if i == 0 { + tasks = append(tasks, task{deployTrigger: true}) + } else { + tasks = append(tasks, task{deployTrigger: false}) + } + continue + } + tasks = append(tasks, task{deployTrigger: true}) + } + + var deployContractFn = func(deployedCh chan deployedContractData, errorCh chan error, keyNum int, task task) { + data := deployedContractData{} + consumerContract, err := contracts.DeployAutomationSimpleLogTriggerConsumerFromKey(chainClient, *loadConfig.IsStreamsLookup, keyNum) + if err != nil { + errorCh <- errors.Wrapf(err, "Error deploying simple log trigger contract") + return + } + + data.consumerContract = consumerContract + + loadCfg := aconfig.Load{ + NumberOfEvents: loadConfig.NumberOfEvents, + NumberOfSpamMatchingEvents: loadConfig.NumberOfSpamMatchingEvents, + NumberOfSpamNonMatchingEvents: loadConfig.NumberOfSpamNonMatchingEvents, + CheckBurnAmount: loadConfig.CheckBurnAmount, + PerformBurnAmount: loadConfig.PerformBurnAmount, + UpkeepGasLimit: loadConfig.UpkeepGasLimit, + SharedTrigger: loadConfig.SharedTrigger, + Feeds: []string{}, + } + + if *loadConfig.IsStreamsLookup { + loadCfg.Feeds = loadConfig.Feeds + } + + data.loadConfig = loadCfg + + if !task.deployTrigger { + deployedCh <- data + return + } + + triggerContract, err := contracts.DeployLogEmitterContractFromKey(l, chainClient, keyNum) + if err != nil { + errorCh <- errors.Wrapf(err, "Error deploying log emitter contract") + return + } + + data.triggerContract = triggerContract + data.triggerAddress = triggerContract.Address() + deployedCh <- data + } + + executor := ctf_concurrency.NewConcurrentExecutor[deployedContractData, deployedContractData, task](l) + results, err := executor.Execute(concurrency, tasks, deployContractFn) + if err != nil { + return DeploymentData{}, err + } + + for _, result := range results { + if result.GetResult().triggerContract != nil { + data.TriggerContracts = append(data.TriggerContracts, result.GetResult().triggerContract) + data.TriggerAddresses = append(data.TriggerAddresses, result.GetResult().triggerAddress) + } + data.ConsumerContracts = append(data.ConsumerContracts, result.GetResult().consumerContract) + data.LoadConfigs = append(data.LoadConfigs, result.GetResult().loadConfig) + } + + // if there's more than 1 upkeep and it's a shared trigger, then we should use only the first address in triggerAddresses + // as triggerAddresses array + if *loadConfig.SharedTrigger { + if len(data.TriggerAddresses) == 0 { + return DeploymentData{}, errors.New("No trigger addresses found") + } + triggerAddress := data.TriggerAddresses[0] + data.TriggerAddresses = make([]common.Address, 0) + for i := 0; i < *loadConfig.NumberOfUpkeeps; i++ { + data.TriggerAddresses = append(data.TriggerAddresses, triggerAddress) + } + } + + sendErr := actions_seth.SendLinkFundsToDeploymentAddresses(chainClient, concurrency, *loadConfig.NumberOfUpkeeps, *loadConfig.NumberOfUpkeeps/concurrency, multicallAddress, automationDefaultLinkFunds, linkToken) + if sendErr != nil { + return DeploymentData{}, sendErr + } + + return data, nil +} diff --git a/integration-tests/load/functions/setup.go b/integration-tests/load/functions/setup.go index 190dbbd8692..e6f96ccbdf2 100644 --- a/integration-tests/load/functions/setup.go +++ b/integration-tests/load/functions/setup.go @@ -14,13 +14,14 @@ import ( "github.com/smartcontractkit/seth" "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy" - "github.com/smartcontractkit/chainlink-testing-framework/networks" + chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/types" - "github.com/smartcontractkit/chainlink/integration-tests/utils" - chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" ) type FunctionsTest struct { @@ -52,16 +53,7 @@ type S4SecretsCfg struct { func SetupLocalLoadTestEnv(globalConfig ctf_config.GlobalTestConfig, functionsConfig types.FunctionsTestConfig) (*FunctionsTest, error) { selectedNetwork := networks.MustGetSelectedNetworkConfig(globalConfig.GetNetworkConfig())[0] - readSethCfg := globalConfig.GetSethConfig() - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(selectedNetwork, *readSethCfg) - if err != nil { - return nil, err - } - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - if err != nil { - return nil, err - } - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(globalConfig, selectedNetwork) if err != nil { return nil, err } diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 4bb7e8c876a..6402912ef6c 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -21,9 +21,9 @@ require ( github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8 github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c - github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec + github.com/smartcontractkit/seth v1.0.9 github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 - github.com/smartcontractkit/wasp v0.4.6 + github.com/smartcontractkit/wasp v0.4.7 github.com/stretchr/testify v1.9.0 github.com/wiremock/go-wiremock v1.9.0 go.uber.org/ratelimit v0.3.0 @@ -219,6 +219,7 @@ require ( github.com/gosimple/unidecode v1.0.1 // indirect github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f // indirect github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b // indirect github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 // indirect github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 // indirect github.com/grafana/pyroscope-go v1.1.1 // indirect @@ -370,7 +371,7 @@ require ( github.com/smartcontractkit/chainlink-feeds v0.0.0-20240422130241-13c17a91b2ab // indirect github.com/smartcontractkit/chainlink-solana v1.0.3-0.20240510181707-46b1311a5a83 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240508155030-1024f2b55c69 // indirect - github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240227164431-18a7065e23ea // indirect + github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449 // indirect github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect github.com/smartcontractkit/wsrpc v0.8.1 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 52b71dadd72..def3bae4118 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -850,6 +850,8 @@ github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNa github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b h1:Msqs1nc2qWMxTriDCITKl58Td+7Md/RURmUmH7RXKns= +github.com/grafana/grafana-foundation-sdk/go v0.0.0-20240326122733-6f96a993222b/go.mod h1:WtWosval1KCZP9BGa42b8aVoJmVXSg0EvQXi9LDSVZQ= github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 h1:gdrsYbmk8822v6qvPwZO5DC6QjnAW7uKJ9YXnoUmV8c= github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503/go.mod h1:d8seWXCEXkL42mhuIJYcGi6DxfehzoIpLrMQWJojvOo= github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 h1:wQ0FnSeebhJIBkgYOD06Mxk9HV2KhtEG0hp/7R+5RUQ= @@ -1514,8 +1516,8 @@ github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.202405 github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20240508155030-1024f2b55c69/go.mod h1:VsfjhvWgjxqWja4q+FlXEtX5lu8BSxn10xRo6gi948g= github.com/smartcontractkit/chainlink-testing-framework v1.28.12 h1:15ssos9DvWekvj6JjmiPjTYsj/uw12HvTWlm1FHdYaA= github.com/smartcontractkit/chainlink-testing-framework v1.28.12/go.mod h1:x1zDOz8zcLjEvs9fNA9y/DMguLam/2+CJdpxX0+rM8A= -github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240227164431-18a7065e23ea h1:ZdLmNAfKRjH8AYUvjiiDGUgiWQfq/7iNpxyTkvjx/ko= -github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240227164431-18a7065e23ea/go.mod h1:gCKC9w6XpNk6jm+XIk2psrkkfxhi421N9NSiFceXW88= +github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449 h1:fX/xmGm1GBsD1ZZnooNT+eWA0hiTAqFlHzOC5CY4dy8= +github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240328204215-ac91f55f1449/go.mod h1:DC8sQMyTlI/44UCTL8QWFwb0bYNoXCfjwCv2hMivYZU= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss= @@ -1524,14 +1526,14 @@ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJ github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c h1:lIyMbTaF2H0Q71vkwZHX/Ew4KF2BxiKhqEXwF8rn+KI= github.com/smartcontractkit/libocr v0.0.0-20240419185742-fd3cab206b2c/go.mod h1:fb1ZDVXACvu4frX3APHZaEBp0xi1DIm34DcA0CwTsZM= -github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec h1:BT1loU6TT2YqMenD7XE+aw7IeeTiC25+r1TLKAySVIg= -github.com/smartcontractkit/seth v0.1.6-0.20240429143720-cacb8160ecec/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= +github.com/smartcontractkit/seth v1.0.9 h1:v+gxRY5JT9u4Ptk1mg/Sm76aqdG2vFw1zq1Ngwoj6yk= +github.com/smartcontractkit/seth v1.0.9/go.mod h1:2TMOZQ8WTAw7rR1YBbXpnad6VmT/+xDd/nXLmB7Eero= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= -github.com/smartcontractkit/wasp v0.4.6 h1:s6J8HgpxMHORl19nCpZPxc5jaVUQv8EXB6QjTuLXXnw= -github.com/smartcontractkit/wasp v0.4.6/go.mod h1:+ViWdUf1ap6powiEiwPskpZfH/Q1sG29YoVav7zGOIo= +github.com/smartcontractkit/wasp v0.4.7 h1:7mKJfwzFbuE8xVLUYtLt7Bjw8q/bmVZRW6Ks8kc1LVM= +github.com/smartcontractkit/wasp v0.4.7/go.mod h1:jeabvyXikb2aNoLQwcZGqaz17efrR8NJhpq4seAmdgs= github.com/smartcontractkit/wsrpc v0.8.1 h1:kk0SXLqWrWaZ3J6c7n8D0NZ2uTMBBBpG5dZZXZX8UGE= github.com/smartcontractkit/wsrpc v0.8.1/go.mod h1:yfg8v8fPLXkb6Mcnx6Pm/snP6jJ0r5Kf762Yd1a/KpA= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= diff --git a/integration-tests/load/ocr/ocr_test.go b/integration-tests/load/ocr/ocr_test.go index a1388280e55..e836fda39c6 100644 --- a/integration-tests/load/ocr/ocr_test.go +++ b/integration-tests/load/ocr/ocr_test.go @@ -5,14 +5,13 @@ import ( "github.com/stretchr/testify/require" - "github.com/smartcontractkit/seth" "github.com/smartcontractkit/wasp" "github.com/smartcontractkit/chainlink-testing-framework/logging" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/k8s" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" - "github.com/smartcontractkit/chainlink/integration-tests/utils" ) var ( @@ -31,13 +30,7 @@ func TestOCRLoad(t *testing.T) { evmNetwork, msClient, bootstrapNode, workerNodes, err := k8s.ConnectRemote() require.NoError(t, err) - readSethCfg := config.GetSethConfig() - require.NotNil(t, readSethCfg, "Seth config shouldn't be nil") - - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(*evmNetwork, *readSethCfg) - require.NoError(t, err, "Error merging seth and evm network configs") - - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(config, *evmNetwork) require.NoError(t, err, "Error creating seth client") lta, err := SetupCluster(l, seth, workerNodes) @@ -73,13 +66,7 @@ func TestOCRVolume(t *testing.T) { evmNetwork, msClient, bootstrapNode, workerNodes, err := k8s.ConnectRemote() require.NoError(t, err) - readSethCfg := config.GetSethConfig() - require.NotNil(t, readSethCfg, "Seth config shouldn't be nil") - - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(*evmNetwork, *readSethCfg) - require.NoError(t, err, "Error merging seth and evm network configs") - - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(config, *evmNetwork) require.NoError(t, err, "Error creating seth client") lta, err := SetupCluster(l, seth, workerNodes) diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go index ee9541926df..1458c2607f5 100644 --- a/integration-tests/smoke/automation_test.go +++ b/integration-tests/smoke/automation_test.go @@ -1,6 +1,7 @@ package smoke import ( + "context" "encoding/json" "fmt" "math/big" @@ -38,6 +39,8 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/gasprice" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" + + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" ) const ( @@ -78,14 +81,10 @@ func TestMain(m *testing.M) { } func TestAutomationBasic(t *testing.T) { - config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } - SetupAutomationBasic(t, false, &config) + SetupAutomationBasic(t, false) } -func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig types.AutomationTestConfig) { +func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { t.Parallel() registryVersions := map[string]ethereum.KeeperRegistryVersion{ @@ -106,11 +105,12 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t name := n registryVersion := rv t.Run(name, func(t *testing.T) { - cfg := tc.MustCopy(automationTestConfig) t.Parallel() l := logging.GetTestLogger(t) - var err error + cfg, err := tc.GetConfig("Smoke", tc.Automation) + require.NoError(t, err, "Failed to get config") + if nodeUpgrade { if cfg.GetChainlinkUpgradeImageConfig() == nil { t.Fatal("[ChainlinkUpgradeImage] must be set in TOML config to upgrade nodes") @@ -124,16 +124,18 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t isMercury := isMercuryV02 || isMercuryV03 a := setupAutomationTestDocker( - t, registryVersion, automationDefaultRegistryConfig(automationTestConfig), isMercuryV02, isMercuryV03, automationTestConfig, + t, registryVersion, automationDefaultRegistryConfig(cfg), isMercuryV02, isMercuryV03, &cfg, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, @@ -141,15 +143,10 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t isMercury, ) - for i := 0; i < len(upkeepIDs); i++ { - if isLogTrigger || isMercuryV02 { - if err := consumers[i].Start(); err != nil { - l.Error().Msg("Error when starting consumer") - return - } - } - - if isMercury { + // Do it in two separate loops, so we don't end up setting up one upkeep, but starting the consumer for another one + // since we cannot be sure that consumers and upkeeps at the same index are related + if isMercury { + for i := 0; i < len(upkeepIDs); i++ { // Set privilege config to enable mercury privilegeConfigBytes, _ := json.Marshal(streams.UpkeepPrivilegeConfig{ MercuryEnabled: true, @@ -158,13 +155,30 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t l.Error().Msg("Error when setting upkeep privilege config") return } + l.Info().Int("Upkeep index", i).Msg("Upkeep privilege config set") + } + } + + if isLogTrigger || isMercuryV02 { + for i := 0; i < len(upkeepIDs); i++ { + if err := consumers[i].Start(); err != nil { + l.Error().Msg("Error when starting consumer") + return + } + l.Info().Int("Consumer index", i).Msg("Consumer started") } } - l.Info().Msg("Waiting for all upkeeps to be performed") + l.Info().Msg("Waiting 5m for all upkeeps to be performed") gom := gomega.NewGomegaWithT(t) startTime := time.Now() + + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + // TODO Tune this timeout window after stress testing + l.Info().Msg("Waiting 10m for all upkeeps to perform at least 1 upkeep") gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters for i := 0; i < len(upkeepIDs); i++ { @@ -207,9 +221,6 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig t require.NoError(t, err, "Could not cancel upkeep at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error encountered when waiting for upkeeps to be cancelled") - var countersAfterCancellation = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { @@ -249,21 +260,21 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, @@ -279,6 +290,10 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { } } + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + l.Info().Msg("Waiting for all upkeeps to perform") gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { @@ -332,9 +347,6 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { require.NoError(t, err, "Could not set upkeep trigger config at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") - var countersAfterSetNoMatch = make([]*big.Int, len(upkeepIDs)) // Wait for 10 seconds to let in-flight upkeeps finish @@ -380,9 +392,6 @@ func TestSetUpkeepTriggerConfig(t *testing.T) { require.NoError(t, err, "Could not set upkeep trigger config at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") - var countersAfterSetMatch = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { @@ -431,21 +440,22 @@ func TestAutomationAddFunds(t *testing.T) { registryVersion := rv t.Run(name, func(t *testing.T) { t.Parallel() + l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(1), automationDefaultUpkeepGasLimit, @@ -453,34 +463,43 @@ func TestAutomationAddFunds(t *testing.T) { false, ) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + l.Info().Msg("Making sure for 2m no upkeeps are performed") gom := gomega.NewGomegaWithT(t) // Since the upkeep is currently underfunded, check that it doesn't get executed gom.Consistently(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), - "Expected consumer counter to remain zero, but got %d", counter.Int64()) + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected consumer counter to remain zero, but got %d", counter.Int64()) + } }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion // Grant permission to the registry to fund the upkeep - err = a.LinkToken.Approve(a.Registry.Address(), big.NewInt(9e18)) + err = a.LinkToken.Approve(a.Registry.Address(), big.NewInt(0).Mul(big.NewInt(9e18), big.NewInt(int64(len(upkeepIDs))))) require.NoError(t, err, "Could not approve permissions for the registry on the link token contract") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") - // Add funds to the upkeep whose ID we know from above - err = a.Registry.AddUpkeepFunds(upkeepIDs[0], big.NewInt(9e18)) - require.NoError(t, err, "Unable to add upkeep") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") + l.Info().Msg("Adding funds to the upkeeps") + for i := 0; i < len(upkeepIDs); i++ { + // Add funds to the upkeep whose ID we know from above + err = a.Registry.AddUpkeepFunds(upkeepIDs[i], big.NewInt(9e18)) + require.NoError(t, err, "Unable to add upkeep") + } + l.Info().Msg("Waiting for 2m for all contracts to perform at least one upkeep") // Now the new upkeep should be performing because we added enough funds gom.Eventually(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), - "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) - }, "2m", "1s").Should(gomega.Succeed()) // ~1m for perform, 1m buffer + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion }) } } @@ -500,20 +519,21 @@ func TestAutomationPauseUnPause(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, @@ -521,6 +541,10 @@ func TestAutomationPauseUnPause(t *testing.T) { false, ) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5 @@ -539,9 +563,6 @@ func TestAutomationPauseUnPause(t *testing.T) { require.NoError(t, err, "Could not pause upkeep at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for upkeeps to be paused") - var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far @@ -568,9 +589,6 @@ func TestAutomationPauseUnPause(t *testing.T) { require.NoError(t, err, "Could not unpause upkeep at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for upkeeps to be unpaused") - gom.Eventually(func(g gomega.Gomega) { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause for i := 0; i < len(upkeepIDs); i++ { @@ -600,20 +618,21 @@ func TestAutomationRegisterUpkeep(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, @@ -621,6 +640,10 @@ func TestAutomationRegisterUpkeep(t *testing.T) { false, ) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + var initialCounters = make([]*big.Int, len(upkeepIDs)) gom := gomega.NewGomegaWithT(t) // Observe that the upkeeps which are initially registered are performing and @@ -639,7 +662,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) { } }, "4m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer - newConsumers, _ := actions.RegisterNewUpkeeps(t, a.Deployer, a.ChainClient, a.LinkToken, + newConsumers, _ := actions_seth.RegisterNewUpkeeps(t, a.ChainClient, a.LinkToken, a.Registry, a.Registrar, automationDefaultUpkeepGasLimit, 1) // We know that newConsumers has size 1, so we can just use the newly registered upkeep. @@ -687,29 +710,35 @@ func TestAutomationPauseRegistry(t *testing.T) { registryVersion := rv t.Run(name, func(t *testing.T) { t.Parallel() + config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false, ) - gom := gomega.NewGomegaWithT(t) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + gom := gomega.NewGomegaWithT(t) // Observe that the upkeeps which are initially registered are performing gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { @@ -723,8 +752,6 @@ func TestAutomationPauseRegistry(t *testing.T) { // Pause the registry err = a.Registry.Pause() require.NoError(t, err, "Error pausing registry") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for registry to pause") // Store how many times each upkeep performed once the registry was successfully paused var countersAfterPause = make([]*big.Int, len(upkeepIDs)) @@ -763,26 +790,32 @@ func TestAutomationKeeperNodesDown(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false, ) + + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + gom := gomega.NewGomegaWithT(t) nodesWithoutBootstrap := a.ChainlinkNodes[1:] @@ -803,8 +836,6 @@ func TestAutomationKeeperNodesDown(t *testing.T) { // Take down 1 node. Currently, using 4 nodes so f=1 and is the max nodes that can go down. err = nodesWithoutBootstrap[0].MustDeleteJob("1") require.NoError(t, err, "Error deleting job from Chainlink node") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for blockchain events") l.Info().Msg("Successfully managed to take down the first half of the nodes") @@ -825,8 +856,6 @@ func TestAutomationKeeperNodesDown(t *testing.T) { for _, nodeToTakeDown := range restOfNodesDown { err = nodeToTakeDown.MustDeleteJob("1") require.NoError(t, err, "Error deleting job from Chainlink node") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for blockchain events") } l.Info().Msg("Successfully managed to take down the second half of the nodes") @@ -867,20 +896,21 @@ func TestAutomationPerformSimulation(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumersPerformance, _ := actions.DeployPerformanceConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumersPerformance, _ := actions_seth.DeployPerformanceConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, @@ -889,8 +919,12 @@ func TestAutomationPerformSimulation(t *testing.T) { 100000, // How much gas should be burned on checkUpkeep() calls 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit ) - gom := gomega.NewGomegaWithT(t) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + gom := gomega.NewGomegaWithT(t) consumerPerformance := consumersPerformance[0] // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed @@ -906,8 +940,6 @@ func TestAutomationPerformSimulation(t *testing.T) { // Set performGas on consumer to be low, so that performUpkeep starts becoming successful err = consumerPerformance.SetPerformGasToBurn(testcontext.Get(t), big.NewInt(100000)) require.NoError(t, err, "Perform gas should be set successfully on consumer") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for set perform gas tx") // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) { @@ -936,21 +968,21 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumersPerformance, upkeepIDs := actions.DeployPerformanceConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumersPerformance, upkeepIDs := actions_seth.DeployPerformanceConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, - defaultAmountOfUpkeeps, + 1, // It was impossible to investigate, why with multiple outputs it fails ONLY in CI and only for 2.1 and 2.2 versions big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, 10000, // How many blocks this upkeep will be eligible from first upkeep block @@ -958,62 +990,91 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { 100000, // How much gas should be burned on checkUpkeep() calls 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit ) - gom := gomega.NewGomegaWithT(t) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + gom := gomega.NewGomegaWithT(t) nodesWithoutBootstrap := a.ChainlinkNodes[1:] - consumerPerformance := consumersPerformance[0] - upkeepID := upkeepIDs[0] // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed + l.Info().Msg("Making sure for 2m no upkeeps are performed") gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should( - gomega.Equal(int64(0)), - "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + } }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion // Increase gas limit for the upkeep, higher than the performGasBurn - err = a.Registry.SetUpkeepGasLimit(upkeepID, uint32(4500000)) - require.NoError(t, err, "Error setting upkeep gas limit") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for SetUpkeepGasLimit tx") + l.Info().Msg("Increasing gas limit for upkeeps") + for i := 0; i < len(upkeepIDs); i++ { + err = a.Registry.SetUpkeepGasLimit(upkeepIDs[i], uint32(4500000)) + require.NoError(t, err, "Error setting upkeep gas limit") + } // Upkeep should now start performing + l.Info().Msg("Waiting for 4m for all contracts to perform at least one upkeep after gas limit increase") gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), - "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), - ) - }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + l.Info().Int("Upkeep index", i).Int64("Upkeep counter", cnt.Int64()).Msg("Number of upkeeps performed") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + } + }, "4m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer // Now increase the checkGasBurn on consumer, upkeep should stop performing - err = consumerPerformance.SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) - require.NoError(t, err, "Check gas burn should be set successfully on consumer") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") + l.Info().Msg("Increasing check gas to burn for upkeeps") + for i := 0; i < len(upkeepIDs); i++ { + err = consumersPerformance[i].SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) + require.NoError(t, err, "Check gas burn should be set successfully on consumer") + } + + countPerID := make(map[*big.Int]*big.Int) // Get existing performed count - existingCnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - require.NoError(t, err, "Calling consumer's counter shouldn't fail") - l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when check gas increased") + l.Info().Msg("Getting existing performed count") + for i := 0; i < len(upkeepIDs); i++ { + existingCnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + l.Info(). + Str("UpkeepID", upkeepIDs[i].String()). + Int64("Upkeep counter", existingCnt.Int64()). + Msg("Upkeep counter when check gas increased") + countPerID[upkeepIDs[i]] = existingCnt + } // In most cases count should remain constant, but it might increase by upto 1 due to pending perform + l.Info().Msg("Waiting for 1m for all contracts to make sure they perform at maximum 1 upkeep") gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should( - gomega.BeNumerically("<=", existingCnt.Int64()+1), - "Expected consumer counter to remain less than equal %d, but got %d", existingCnt.Int64()+1, cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + existingCnt := countPerID[upkeepIDs[i]] + g.Expect(cnt.Int64()).Should( + gomega.BeNumerically("<=", existingCnt.Int64()+1), + "Expected consumer counter to remain less than equal %d, but got %d", existingCnt.Int64()+1, cnt.Int64(), + ) + } }, "1m", "1s").Should(gomega.Succeed()) - existingCnt, err = consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - require.NoError(t, err, "Calling consumer's counter shouldn't fail") - existingCntInt := existingCnt.Int64() - l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") + l.Info().Msg("Getting existing performed count") + for i := 0; i < len(upkeepIDs); i++ { + existingCnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + l.Info(). + Str("UpkeepID", upkeepIDs[i].String()). + Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when consistently block finished") + countPerID[upkeepIDs[i]] = existingCnt + } // Now increase checkGasLimit on registry highCheckGasLimit := automationDefaultRegistryConfig(config) @@ -1029,16 +1090,18 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) { err = a.Registry.SetConfigTypeSafe(ocrConfig) } require.NoError(t, err, "Registry config should be set successfully!") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for set config tx") + l.Info().Msg("Waiting for 3m for all contracts to make sure they perform at maximum 1 upkeep after check gas limit increase") // Upkeep should start performing again, and it should get regularly performed gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") - g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), - "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + existingCnt := countPerID[upkeepIDs[i]] + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCnt.Int64()), + "Expected consumer counter to be greater than %d, but got %d", existingCnt.Int64(), cnt.Int64(), + ) + } }, "3m", "1s").Should(gomega.Succeed()) // ~1m to setup cluster, 1m to perform once, 1m buffer }) } @@ -1059,28 +1122,32 @@ func TestUpdateCheckData(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") a := setupAutomationTestDocker( t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + performDataChecker, upkeepIDs := actions_seth.DeployPerformDataCheckerConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, []byte(automationExpectedData), ) - gom := gomega.NewGomegaWithT(t) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + gom := gomega.NewGomegaWithT(t) gom.Consistently(func(g gomega.Gomega) { // expect the counter to remain 0 because perform data does not match for i := 0; i < len(upkeepIDs); i++ { @@ -1098,9 +1165,6 @@ func TestUpdateCheckData(t *testing.T) { require.NoError(t, err, "Could not update check data for upkeep at index %d", i) } - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error while waiting for check data update") - // retrieve new check data for all upkeeps for i := 0; i < len(upkeepIDs); i++ { upkeep, err := a.Registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) @@ -1145,21 +1209,27 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { t, registryVersion, automationDefaultRegistryConfig(config), false, false, &config, ) - consumers, upkeepIDs := actions.DeployConsumers( + sb, err := a.ChainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + consumers, upkeepIDs := actions_seth.DeployConsumers( t, + a.ChainClient, a.Registry, a.Registrar, a.LinkToken, - a.Deployer, - a.ChainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false, ) - gom := gomega.NewGomegaWithT(t) + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, a.Logger, a.ChainClient, sb, a.Registry, registryVersion)() + }) + + gom := gomega.NewGomegaWithT(t) l.Info().Msg("waiting for all upkeeps to be performed at least once") gom.Eventually(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { @@ -1168,7 +1238,7 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), "Expected consumer counter to be greater than 0, but got %d") } - }, "3m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + }, "2m", "5s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer // set the maxGasPrice to 1 wei uoc, _ := cbor.Marshal(gasprice.UpkeepOffchainConfig{MaxGasPrice: big.NewInt(1)}) @@ -1176,8 +1246,6 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { for _, uid := range upkeepIDs { err = a.Registry.SetUpkeepOffchainConfig(uid, uoc) require.NoError(t, err, "Error setting upkeep offchain config") - err = a.ChainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events from setting upkeep offchain config") } // Store how many times each upkeep performed once their offchain config is set with maxGasPrice = 1 wei @@ -1185,11 +1253,11 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { for i := 0; i < len(upkeepIDs); i++ { countersAfterSettingLowMaxGasPrice[i], err = consumers[i].Counter(testcontext.Get(t)) require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) - l.Info().Int64("Upkeep Performed times", countersAfterSettingLowMaxGasPrice[i].Int64()).Int("Upkeep index", i).Msg("Number of upkeeps performed") + l.Info().Int64("Upkeep Performed times", countersAfterSettingLowMaxGasPrice[i].Int64()).Int("Upkeep index", i).Msg("Number of upkeeps performed after setting low max gas price") } var latestCounter *big.Int - // the counters of all the upkeeps should stay constant because they are no longer getting serviced + // the upkeepsPerformed of all the upkeeps should stay constant because they are no longer getting serviced gom.Consistently(func(g gomega.Gomega) { for i := 0; i < len(upkeepIDs); i++ { latestCounter, err = consumers[i].Counter(testcontext.Get(t)) @@ -1198,7 +1266,7 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { "Expected consumer counter to remain constant at %d, but got %d", countersAfterSettingLowMaxGasPrice[i].Int64(), latestCounter.Int64()) } - }, "2m", "1s").Should(gomega.Succeed()) + }, "2m", "5s").Should(gomega.Succeed()) l.Info().Msg("no upkeeps is performed because their max gas price is only 1 wei") // setting offchain config with a high max gas price for the first upkeep, it should perform again while @@ -1209,26 +1277,61 @@ func TestSetOffchainConfigWithMaxGasPrice(t *testing.T) { err = a.Registry.SetUpkeepOffchainConfig(upkeepIDs[0], uoc) require.NoError(t, err, "Error setting upkeep offchain config") - // the counters of all other upkeeps should stay constant because their max gas price remains very low + upkeepsPerformedBefore := make(map[int]int64) + upkeepsPerformedAfter := make(map[int]int64) + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + upkeepsPerformedBefore[i] = latestCounter.Int64() + upkeepsPerformedAfter[i] = latestCounter.Int64() + + l.Info().Int64("No of Upkeep Performed", latestCounter.Int64()).Str("Consumer address", consumers[i].Address()).Msg("Number of upkeeps performed just after setting offchain config") + } + + // the upkeepsPerformed of all other upkeeps should stay constant because their max gas price remains very low. + // consumer at index N, might not be correlated with upkeep at index N, so instead of focusing on one of them + // we iterate over all of them and make sure that at most only one is performing upkeeps gom.Consistently(func(g gomega.Gomega) { - for i := 1; i < len(upkeepIDs); i++ { + activeConsumers := 0 + for i := 0; i < len(upkeepIDs); i++ { latestCounter, err = consumers[i].Counter(testcontext.Get(t)) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) - g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterSettingLowMaxGasPrice[i].Int64()), - "Expected consumer counter to remain constant at %d, but got %d", - countersAfterSettingLowMaxGasPrice[i].Int64(), latestCounter.Int64()) + if latestCounter.Int64() != upkeepsPerformedAfter[i] { + activeConsumers++ + } + upkeepsPerformedAfter[i] = latestCounter.Int64() } - }, "2m", "1s").Should(gomega.Succeed()) + // 0 is also okay, because it means that no upkeep was performed yet + g.Expect(activeConsumers).Should(gomega.BeNumerically("<=", 1), "Only one consumer should have been performing upkeeps, but %d did", activeConsumers) + }, "2m", "5s").Should(gomega.Succeed()) + + performingConsumerIndex := -1 + onlyOneConsumerPerformed := false + for i := 0; i < len(upkeepIDs); i++ { + if upkeepsPerformedAfter[i] > upkeepsPerformedBefore[i] { + onlyOneConsumerPerformed = true + performingConsumerIndex = i + break + } + } + + for i := 0; i < len(upkeepIDs); i++ { + l.Info().Int64("No of Upkeep Performed", latestCounter.Int64()).Str("Consumer address", consumers[i].Address()).Msg("Number of upkeeps performed after waiting for the results of offchain config change") + } + + require.True(t, onlyOneConsumerPerformed, "Only one consumer should have been performing upkeeps") + l.Info().Msg("all the rest upkeeps did not perform again because their max gas price remains 1 wei") + l.Info().Msg("making sure the consumer keeps performing upkeeps because its max gas price is 500 gwei") // the first upkeep should start performing again gom.Eventually(func(g gomega.Gomega) { - latestCounter, err = consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index 0") + latestCounter, err = consumers[performingConsumerIndex].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), fmt.Sprintf("Failed to retrieve consumer counter for upkeep at index %d", performingConsumerIndex)) g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically(">", countersAfterSettingLowMaxGasPrice[0].Int64()), "Expected consumer counter to be greater than %d, but got %d", countersAfterSettingLowMaxGasPrice[0].Int64(), latestCounter.Int64()) - }, "2m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + }, "2m", "5s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer l.Info().Int64("Upkeep Performed times", latestCounter.Int64()).Msg("the first upkeep performed again") }) } @@ -1278,9 +1381,9 @@ func setupAutomationTestDocker( WithMockAdapter(). WithFunding(big.NewFloat(*automationTestConfig.GetCommonConfig().ChainlinkNodeFunding)). WithStandardCleanup(). + WithSeth(). Build() require.NoError(t, err, "Error deploying test environment for Mercury") - env.ParallelTransactions(true) secretsConfig := ` [Mercury.Credentials.cred1] @@ -1320,17 +1423,17 @@ func setupAutomationTestDocker( WithCLNodeConfig(clNodeConfig). WithFunding(big.NewFloat(*automationTestConfig.GetCommonConfig().ChainlinkNodeFunding)). WithStandardCleanup(). + WithSeth(). Build() require.NoError(t, err, "Error deploying test environment") } - env.ParallelTransactions(true) nodeClients := env.ClCluster.NodeAPIs() - evmClient, err := env.GetEVMClient(network.ChainID) - require.NoError(t, err, "Error getting evm client") + sethClient, err := env.GetSethClient(network.ChainID) + require.NoError(t, err, "Error getting seth client") - a := automationv2.NewAutomationTestDocker(evmClient, env.ContractDeployer, nodeClients) + a := automationv2.NewAutomationTestDocker(l, sethClient, nodeClients) a.SetMercuryCredentialName("cred1") a.RegistrySettings = registryConfig a.RegistrarSettings = contracts.KeeperRegistrarSettings{ diff --git a/integration-tests/smoke/automation_upgrade_test.go b/integration-tests/smoke/automation_upgrade_test.go index 86617a50901..28285543621 100644 --- a/integration-tests/smoke/automation_upgrade_test.go +++ b/integration-tests/smoke/automation_upgrade_test.go @@ -2,14 +2,8 @@ package smoke import ( "testing" - - tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" ) func TestAutomationNodeUpgrade(t *testing.T) { - config, err := tc.GetConfig("Smoke", tc.Automation) - if err != nil { - t.Fatal(err, "Error getting config") - } - SetupAutomationBasic(t, true, &config) + SetupAutomationBasic(t, true) } diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go index 1ff132f09ab..0446254362a 100644 --- a/integration-tests/smoke/forwarder_ocr_test.go +++ b/integration-tests/smoke/forwarder_ocr_test.go @@ -45,8 +45,6 @@ func TestForwarderOCRBasic(t *testing.T) { Build() require.NoError(t, err) - env.ParallelTransactions(true) - nodeClients := env.ClCluster.NodeAPIs() bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] @@ -68,6 +66,9 @@ func TestForwarderOCRBasic(t *testing.T) { operators, authorizedForwarders, _ := actions_seth.DeployForwarderContracts( t, sethClient, common.HexToAddress(lt.Address()), len(workerNodes), ) + + require.Equal(t, len(workerNodes), len(operators), "Number of operators should match number of worker nodes") + for i := range workerNodes { actions_seth.AcceptAuthorizedReceiversOperator( t, l, sethClient, operators[i], authorizedForwarders[i], []common.Address{workerNodeAddresses[i]}, diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go index d3aa9e85ce6..9dd5d5c39a4 100644 --- a/integration-tests/smoke/forwarders_ocr2_test.go +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -74,6 +74,8 @@ func TestForwarderOCR2Basic(t *testing.T) { t, sethClient, common.HexToAddress(lt.Address()), len(workerNodes), ) + require.Equal(t, len(workerNodes), len(operators), "Number of operators should match number of worker nodes") + for i := range workerNodes { actions_seth.AcceptAuthorizedReceiversOperator( t, l, sethClient, operators[i], authorizedForwarders[i], []common.Address{workerNodeAddresses[i]}, diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go index fbfe4c73c89..00e45256cea 100644 --- a/integration-tests/smoke/keeper_test.go +++ b/integration-tests/smoke/keeper_test.go @@ -1,6 +1,7 @@ package smoke import ( + "context" "fmt" "math/big" "strconv" @@ -10,15 +11,16 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/onsi/gomega" "github.com/rs/zerolog" + "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" @@ -31,7 +33,7 @@ import ( const ( keeperDefaultUpkeepGasLimit = uint32(2500000) keeperDefaultLinkFunds = int64(9e18) - keeperDefaultUpkeepsToDeploy = 10 + keeperDefaultUpkeepsToDeploy = 2 numUpkeepsAllowedForStragglingTxs = 6 keeperExpectedData = "abcdef" ) @@ -91,29 +93,32 @@ func TestKeeperBasicSmoke(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 for i := 0; i < len(upkeepIDs); i++ { @@ -132,9 +137,6 @@ func TestKeeperBasicSmoke(t *testing.T) { require.NoError(t, err, "Could not cancel upkeep at index %d", i) } - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for upkeeps to be cancelled") - var countersAfterCancellation = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { @@ -172,98 +174,133 @@ func TestKeeperBlockCountPerTurn(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, highBCPTRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") - keepersPerformed := make([]string, 0) - upkeepID := upkeepIDs[0] + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + + keepersPerformedLowFreq := map[*big.Int][]string{} + + // gom := gomega.NewGomegaWithT(t) + // Wait for upkeep to be performed by two different keepers that alternate (buddies) + l.Info().Msg("Waiting for 2m for upkeeps to be performed by different keepers") + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + stop := time.After(2 * time.Minute) + + LOW_LOOP: + for { + select { + case <-ticker.C: + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + l.Info().Str("UpkeepId", upkeepIDs[i].String()).Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) + require.NoError(t, err, "Registry's getUpkeep shouldn't fail") + + latestKeeper := upkeepInfo.LastKeeper + if latestKeeper == actions.ZeroAddress.String() { + continue + } + + keepersPerformedLowFreq[upkeepIDs[i]] = append(keepersPerformedLowFreq[upkeepIDs[i]], latestKeeper) + } + case <-stop: + ticker.Stop() + break LOW_LOOP + } + } - // Wait for upkeep to be performed twice by different keepers (buddies) - gom.Eventually(func(g gomega.Gomega) error { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + require.GreaterOrEqual(t, 2, len(keepersPerformedLowFreq), "At least 2 different keepers should have been performing upkeeps") - upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + // Now set BCPT to be low, so keepers change turn frequently + err = registry.SetConfig(lowBCPTRegistryConfig, contracts.OCRv2Config{}) + require.NoError(t, err, "Error setting registry config") - latestKeeper := upkeepInfo.LastKeeper - l.Info().Str("keeper", latestKeeper).Msg("last keeper to perform upkeep") - g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") - g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + keepersPerformedHigherFreq := map[*big.Int][]string{} - l.Info().Str("keeper", latestKeeper).Msg("New keeper performed upkeep") - keepersPerformed = append(keepersPerformed, latestKeeper) - return nil - }, "1m", "1s").Should(gomega.Succeed()) + ticker = time.NewTicker(1 * time.Second) + defer ticker.Stop() - gom.Eventually(func(g gomega.Gomega) error { - upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + stop = time.After(2 * time.Minute) - latestKeeper := upkeepInfo.LastKeeper - g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") - g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + HIGH_LOOP: + for { + select { + case <-ticker.C: + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + l.Info().Str("UpkeepId", upkeepIDs[i].String()).Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") - l.Info().Str("Keeper", latestKeeper).Msg("New keeper performed upkeep") - keepersPerformed = append(keepersPerformed, latestKeeper) - return nil - }, "1m", "1s").Should(gomega.Succeed()) + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) + require.NoError(t, err, "Registry's getUpkeep shouldn't fail") - // Expect no new keepers to perform for a while - gom.Consistently(func(g gomega.Gomega) { - upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + latestKeeper := upkeepInfo.LastKeeper + if latestKeeper == actions.ZeroAddress.String() { + continue + } - latestKeeper := upkeepInfo.LastKeeper - g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") - g.Expect(latestKeeper).Should(gomega.BeElementOf(keepersPerformed), "Existing keepers should alternate turns within BCPT") - }, "1m", "1s").Should(gomega.Succeed()) + keepersPerformedHigherFreq[upkeepIDs[i]] = append(keepersPerformedHigherFreq[upkeepIDs[i]], latestKeeper) + } + case <-stop: + ticker.Stop() + break HIGH_LOOP + } + } - // Now set BCPT to be low, so keepers change turn frequently - err = registry.SetConfig(lowBCPTRegistryConfig, contracts.OCRv2Config{}) - require.NoError(t, err, "Error setting registry config") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for set config tx") + require.GreaterOrEqual(t, 3, len(keepersPerformedHigherFreq), "At least 3 different keepers should have been performing upkeeps after BCPT change") - // Expect a new keeper to perform - gom.Eventually(func(g gomega.Gomega) error { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Num upkeeps performed") + var countFreq = func(keepers []string, freqMap map[string]int) { + for _, keeper := range keepers { + freqMap[keeper]++ + } + } - upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + for i := 0; i < len(upkeepIDs); i++ { + lowFreqMap := make(map[string]int) + highFreqMap := make(map[string]int) - latestKeeper := upkeepInfo.LastKeeper - l.Info().Str("keeper", latestKeeper).Msg("last keeper to perform upkeep") - g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") - g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + countFreq(keepersPerformedLowFreq[upkeepIDs[i]], lowFreqMap) + countFreq(keepersPerformedHigherFreq[upkeepIDs[i]], highFreqMap) - l.Info().Str("keeper", latestKeeper).Msg("New keeper performed upkeep") - keepersPerformed = append(keepersPerformed, latestKeeper) - return nil - }, "1m", "1s").Should(gomega.Succeed()) + require.Greater(t, len(highFreqMap), len(lowFreqMap), "High frequency map should have more keepers than low frequency map") + + l.Info().Interface("Low BCPT", lowFreqMap).Interface("High BCPT", highFreqMap).Str("UpkeepID", upkeepIDs[i].String()).Msg("Keeper frequency map") + + for lowKeeper, lowFreq := range lowFreqMap { + highFreq, ok := highFreqMap[lowKeeper] + // it might happen due to fluke that a keeper is not found in high frequency map + if !ok { + continue + } + // require.True(t, ok, "Keeper %s not found in high frequency map. This should not happen", lowKeeper) + require.GreaterOrEqual(t, lowFreq, highFreq, "Keeper %s should have performed less times with high BCPT than with low BCPT", lowKeeper) + } + } }) } } @@ -281,19 +318,20 @@ func TestKeeperSimulation(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumersPerformance, upkeepIDs := actions.DeployPerformanceKeeperContracts( + registry, _, consumersPerformance, upkeepIDs := actions_seth.DeployPerformanceKeeperContracts( t, + chainClient, registryVersion, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, - chainClient, &keeperDefaultRegistryConfig, big.NewInt(keeperDefaultLinkFunds), 10000, // How many blocks this upkeep will be eligible from first upkeep block @@ -301,16 +339,18 @@ func TestKeeperSimulation(t *testing.T) { 100000, // How much gas should be burned on checkUpkeep() calls 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + consumerPerformance := consumersPerformance[0] upkeepID := upkeepIDs[0] + gom := gomega.NewGomegaWithT(t) // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed gom.Consistently(func(g gomega.Gomega) { // Consumer count should remain at 0 @@ -330,8 +370,6 @@ func TestKeeperSimulation(t *testing.T) { // Set performGas on consumer to be low, so that performUpkeep starts becoming successful err = consumerPerformance.SetPerformGasToBurn(testcontext.Get(t), big.NewInt(100000)) require.NoError(t, err, "Error setting PerformGasToBurn") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting to set PerformGasToBurn") // Upkeep should now start performing gom.Eventually(func(g gomega.Gomega) error { @@ -359,18 +397,20 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumersPerformance, upkeepIDs := actions.DeployPerformanceKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumersPerformance, upkeepIDs := actions_seth.DeployPerformanceKeeperContracts( t, + chainClient, registryVersion, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, - chainClient, &keeperDefaultRegistryConfig, big.NewInt(keeperDefaultLinkFunds), 10000, // How many blocks this upkeep will be eligible from first upkeep block @@ -378,85 +418,103 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) { 100000, // How much gas should be burned on checkUpkeep() calls 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") - consumerPerformance := consumersPerformance[0] - upkeepID := upkeepIDs[0] + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + gom := gomega.NewGomegaWithT(t) // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed + l.Info().Msg("Waiting for 1m for upkeeps to be performed") gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should( - gomega.Equal(int64(0)), - "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + } }, "1m", "1s").Should(gomega.Succeed()) // Increase gas limit for the upkeep, higher than the performGasBurn - err = registry.SetUpkeepGasLimit(upkeepID, uint32(4500000)) - require.NoError(t, err, "Error setting Upkeep gas limit") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for SetUpkeepGasLimit tx") + l.Info().Msg("Setting upkeep gas limit higher than performGasBurn") + for i := 0; i < len(upkeepIDs); i++ { + err = registry.SetUpkeepGasLimit(upkeepIDs[i], uint32(4500000)) + require.NoError(t, err, "Error setting Upkeep gas limit") + } // Upkeep should now start performing - gom.Eventually(func(g gomega.Gomega) error { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), - "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), - ) - return nil + l.Info().Msg("Waiting for 1m for upkeeps to be performed") + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + } }, "1m", "1s").Should(gomega.Succeed()) // Now increase the checkGasBurn on consumer, upkeep should stop performing - err = consumerPerformance.SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) - require.NoError(t, err, "Error setting CheckGasToBurn") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") + l.Info().Msg("Setting checkGasBurn higher than performGasBurn") + for i := 0; i < len(upkeepIDs); i++ { + err = consumersPerformance[i].SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) + require.NoError(t, err, "Error setting CheckGasToBurn") + } // Get existing performed count - existingCnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - require.NoError(t, err, "Error calling consumer's counter") - l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Check Gas Increased") + existingCnts := make(map[*big.Int]*big.Int) + for i := 0; i < len(upkeepIDs); i++ { + existingCnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + existingCnts[upkeepIDs[i]] = existingCnt + require.NoError(t, err, "Error calling consumer's counter") + l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Check Gas Increased") + } // In most cases count should remain constant, but there might be a straggling perform tx which // gets committed later. Since every keeper node cannot have more than 1 straggling tx, it // is sufficient to check that the upkeep count does not increase by more than 6. + l.Info().Msg("Waiting for 3m to make sure no more than 6 upkeeps are performed") gom.Consistently(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(cnt.Int64()).Should( - gomega.BeNumerically("<=", existingCnt.Int64()+numUpkeepsAllowedForStragglingTxs), - "Expected consumer counter to remain constant at %d, but got %d", existingCnt.Int64(), cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + existingCnt := existingCnts[upkeepIDs[i]] + g.Expect(cnt.Int64()).Should( + gomega.BeNumerically("<=", existingCnt.Int64()+numUpkeepsAllowedForStragglingTxs), + "Expected consumer counter to remain constant at %d, but got %d", existingCnt.Int64(), cnt.Int64(), + ) + } }, "3m", "1s").Should(gomega.Succeed()) - existingCnt, err = consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - require.NoError(t, err, "Error calling consumer's counter") - existingCntInt := existingCnt.Int64() - l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") + for i := 0; i < len(upkeepIDs); i++ { + existingCnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + existingCnts[upkeepIDs[i]] = existingCnt + require.NoError(t, err, "Error calling consumer's counter") + l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when consistently block finished") + } // Now increase checkGasLimit on registry highCheckGasLimit := keeperDefaultRegistryConfig highCheckGasLimit.CheckGasLimit = uint32(5000000) err = registry.SetConfig(highCheckGasLimit, contracts.OCRv2Config{}) require.NoError(t, err, "Error setting registry config") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for set config tx") // Upkeep should start performing again, and it should get regularly performed + l.Info().Msg("Waiting for 1m for upkeeps to be performed") gom.Eventually(func(g gomega.Gomega) { - cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") - g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), - "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), - ) + for i := 0; i < len(upkeepIDs); i++ { + cnt, err := consumersPerformance[i].GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + existingCnt := existingCnts[upkeepIDs[i]] + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCnt.Int64()), + "Expected consumer counter to be greater than %d, but got %d", existingCnt.Int64(), cnt.Int64(), + ) + } }, "1m", "1s").Should(gomega.Succeed()) }) } @@ -476,30 +534,33 @@ func TestKeeperRegisterUpkeep(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, registrar, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, registrar, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + var initialCounters = make([]*big.Int, len(upkeepIDs)) + gom := gomega.NewGomegaWithT(t) // Observe that the upkeeps which are initially registered are performing and // store the value of their initial counters in order to compare later on that the value increased. gom.Eventually(func(g gomega.Gomega) error { @@ -518,7 +579,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) { return nil }, "1m", "1s").Should(gomega.Succeed()) - newConsumers, _ := actions.RegisterNewUpkeeps(t, contractDeployer, chainClient, linkToken, + newConsumers, _ := actions_seth.RegisterNewUpkeeps(t, chainClient, linkToken, registry, registrar, keeperDefaultUpkeepGasLimit, 1) // We know that newConsumers has size 1, so we can just use the newly registered upkeep. @@ -569,54 +630,62 @@ func TestKeeperAddFunds(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(1), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + // Since the upkeep is currently underfunded, check that it doesn't get executed + gom := gomega.NewGomegaWithT(t) + l.Info().Msg("Waiting for 1m to make sure no upkeeps are performed") gom.Consistently(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), - "Expected consumer counter to remain zero, but got %d", counter.Int64()) + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected consumer counter to remain zero, but got %d", counter.Int64()) + } }, "1m", "1s").Should(gomega.Succeed()) // Grant permission to the registry to fund the upkeep - err = linkToken.Approve(registry.Address(), big.NewInt(9e18)) + err = linkToken.Approve(registry.Address(), big.NewInt(0).Mul(big.NewInt(9e18), big.NewInt(int64(len(upkeepIDs))))) require.NoError(t, err, "Error approving permissions for registry") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") // Add funds to the upkeep whose ID we know from above - err = registry.AddUpkeepFunds(upkeepIDs[0], big.NewInt(9e18)) - require.NoError(t, err, "Error funding upkeep") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") + l.Info().Msg("Adding funds to upkeeps") + for i := 0; i < len(upkeepIDs); i++ { + err = registry.AddUpkeepFunds(upkeepIDs[i], big.NewInt(9e18)) + require.NoError(t, err, "Error funding upkeep") + } // Now the new upkeep should be performing because we added enough funds gom.Eventually(func(g gomega.Gomega) { - counter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), - "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + } }, "1m", "1s").Should(gomega.Succeed()) }) } @@ -636,29 +705,34 @@ func TestKeeperRemove(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + var initialCounters = make([]*big.Int, len(upkeepIDs)) + + gom := gomega.NewGomegaWithT(t) // Make sure the upkeeps are running before we remove a keeper gom.Eventually(func(g gomega.Gomega) error { for upkeepID := 0; upkeepID < len(upkeepIDs); upkeepID++ { @@ -687,8 +761,6 @@ func TestKeeperRemove(t *testing.T) { err = registry.SetKeepers(newKeeperList, payees, contracts.OCRv2Config{}) require.NoError(t, err, "Error setting new list of Keepers") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") l.Info().Msg("Successfully removed keeper at address " + keepers[0] + " from the list of Keepers") // The upkeeps should still perform and their counters should have increased compared to the first check @@ -718,28 +790,32 @@ func TestKeeperPauseRegistry(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + // Observe that the upkeeps which are initially registered are performing gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { @@ -754,8 +830,6 @@ func TestKeeperPauseRegistry(t *testing.T) { // Pause the registry err = registry.Pause() require.NoError(t, err, "Error pausing the registry") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") // Store how many times each upkeep performed once the registry was successfully paused var countersAfterPause = make([]*big.Int, len(upkeepIDs)) @@ -783,85 +857,92 @@ func TestKeeperMigrateRegistry(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Error getting config") + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, ethereum.RegistryVersion_1_2, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, ethereum.RegistryVersion_1_2)() + }) + // Deploy the second registry, second registrar, and the same number of upkeeps as the first one - secondRegistry, _, _, _ := actions.DeployKeeperContracts( + secondRegistry, _, _, _ := actions_seth.DeployKeeperContracts( t, ethereum.RegistryVersion_1_2, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) // Set the jobs for the second registry - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, secondRegistry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, secondRegistry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") err = registry.SetMigrationPermissions(common.HexToAddress(secondRegistry.Address()), 3) require.NoError(t, err, "Error setting bidirectional permissions for first registry") err = secondRegistry.SetMigrationPermissions(common.HexToAddress(registry.Address()), 3) require.NoError(t, err, "Error setting bidirectional permissions for second registry") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting to set permissions") + + gom := gomega.NewGomegaWithT(t) // Check that the first upkeep from the first registry is performing (before being migrated) - gom.Eventually(func(g gomega.Gomega) error { - counterBeforeMigration, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(counterBeforeMigration.Int64()).Should(gomega.BeNumerically(">", int64(0)), - "Expected consumer counter to be greater than 0, but got %s", counterBeforeMigration) - return nil + l.Info().Msg("Waiting for 1m for upkeeps to be performed before migration") + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + counterBeforeMigration, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counterBeforeMigration.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %s", counterBeforeMigration) + } }, "1m", "1s").Should(gomega.Succeed()) - // Migrate the upkeep with index 0 from the first to the second registry - err = registry.Migrate([]*big.Int{upkeepIDs[0]}, common.HexToAddress(secondRegistry.Address())) - require.NoError(t, err, "Error migrating first upkeep") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for migration") + // Migrate the upkeeps from the first to the second registry + for i := 0; i < len(upkeepIDs); i++ { + err = registry.Migrate([]*big.Int{upkeepIDs[i]}, common.HexToAddress(secondRegistry.Address())) + require.NoError(t, err, "Error migrating first upkeep") + } // Pause the first registry, in that way we make sure that the upkeep is being performed by the second one err = registry.Pause() require.NoError(t, err, "Error pausing registry") - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting to pause first registry") - counterAfterMigration, err := consumers[0].Counter(testcontext.Get(t)) - require.NoError(t, err, "Error calling consumer's counter") + counterAfterMigrationPerUpkeep := make(map[*big.Int]*big.Int) + + for i := 0; i < len(upkeepIDs); i++ { + counterAfterMigration, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error calling consumer's counter") + counterAfterMigrationPerUpkeep[upkeepIDs[i]] = counterAfterMigration + } // Check that once we migrated the upkeep, the counter has increased - gom.Eventually(func(g gomega.Gomega) error { - currentCounter, err := consumers[0].Counter(testcontext.Get(t)) - g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") - g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", counterAfterMigration.Int64()), - "Expected counter to have increased, but stayed constant at %s", counterAfterMigration) - return nil + l.Info().Msg("Waiting for 1m for upkeeps to be performed after migration") + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + currentCounter, err := consumers[i].Counter(testcontext.Get(t)) + counterAfterMigration := counterAfterMigrationPerUpkeep[upkeepIDs[i]] + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", counterAfterMigration.Int64()), + "Expected counter to have increased, but stayed constant at %s", counterAfterMigration) + } }, "1m", "1s").Should(gomega.Succeed()) } @@ -879,30 +960,34 @@ func TestKeeperNodeDown(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, lowBCPTRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - jobs, err := actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + jobs, err := actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, registryVersion)() + }) + var initialCounters = make([]*big.Int, len(upkeepIDs)) + gom := gomega.NewGomegaWithT(t) // Watch upkeeps being performed and store their counters in order to compare them later in the test gom.Eventually(func(g gomega.Gomega) error { for i := 0; i < len(upkeepIDs); i++ { @@ -920,8 +1005,6 @@ func TestKeeperNodeDown(t *testing.T) { for i, nodeToTakeDown := range firstHalfToTakeDown { err = nodeToTakeDown.MustDeleteJob(jobs[0].Data.ID) require.NoError(t, err, "Error deleting job from node %d", i) - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") } l.Info().Msg("Successfully managed to take down the first half of the nodes") @@ -946,8 +1029,6 @@ func TestKeeperNodeDown(t *testing.T) { for i, nodeToTakeDown := range secondHalfToTakeDown { err = nodeToTakeDown.node.MustDeleteJob(nodeToTakeDown.job.Data.ID) require.NoError(t, err, "Error deleting job from node %d", i) - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") } l.Info().Msg("Successfully managed to take down the second half of the nodes") @@ -989,28 +1070,32 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, ethereum.RegistryVersion_1_3, lowBCPTRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, ethereum.RegistryVersion_1_3)() + }) + + gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 for i := 0; i < len(upkeepIDs); i++ { @@ -1029,9 +1114,6 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { require.NoError(t, err, "Error pausing upkeep at index %d", i) } - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting to pause upkeeps") - var countersAfterPause = make([]*big.Int, len(upkeepIDs)) for i := 0; i < len(upkeepIDs); i++ { // Obtain the amount of times the upkeep has been executed so far @@ -1062,9 +1144,6 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) { require.NoError(t, err, "Error un-pausing upkeep at index %d", i) } - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting to un-pause upkeeps") - gom.Eventually(func(g gomega.Gomega) error { // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause for i := 0; i < len(upkeepIDs); i++ { @@ -1083,29 +1162,33 @@ func TestKeeperUpdateCheckData(t *testing.T) { t.Parallel() l := logging.GetTestLogger(t) config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerContracts( + require.NoError(t, err, "Failed to get config") + + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + + sb, err := chainClient.Client.BlockNumber(context.Background()) + require.NoError(t, err, "Failed to get start block") + + registry, _, performDataChecker, upkeepIDs := actions_seth.DeployPerformDataCheckerContracts( t, + chainClient, ethereum.RegistryVersion_1_3, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, - chainClient, &lowBCPTRegistryConfig, big.NewInt(keeperDefaultLinkFunds), []byte(keeperExpectedData), ) - gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") + t.Cleanup(func() { + actions_seth.GetStalenessReportCleanupFn(t, l, chainClient, sb, registry, ethereum.RegistryVersion_1_3)() + }) + + gom := gomega.NewGomegaWithT(t) gom.Consistently(func(g gomega.Gomega) { // expect the counter to remain 0 because perform data does not match for i := 0; i < len(upkeepIDs); i++ { @@ -1122,9 +1205,6 @@ func TestKeeperUpdateCheckData(t *testing.T) { require.NoError(t, err, "Error updating check data at index %d", i) } - err = chainClient.WaitForEvents() - require.NoError(t, err, "Error waiting for updated check data") - // retrieve new check data for all upkeeps for i := 0; i < len(upkeepIDs); i++ { upkeep, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) @@ -1146,9 +1226,8 @@ func TestKeeperUpdateCheckData(t *testing.T) { } func setupKeeperTest(l zerolog.Logger, t *testing.T, config *tc.TestConfig) ( - blockchain.EVMClient, + *seth.Client, []*client.ChainlinkClient, - contracts.ContractDeployer, contracts.LinkToken, *test_env.CLClusterTestEnv, ) { @@ -1171,22 +1250,19 @@ func setupKeeperTest(l zerolog.Logger, t *testing.T, config *tc.TestConfig) ( WithCLNodeConfig(clNodeConfig). WithFunding(big.NewFloat(.5)). WithStandardCleanup(). + WithSeth(). Build() require.NoError(t, err, "Error deploying test environment") - env.ParallelTransactions(true) - - linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() - require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") - network := networks.MustGetSelectedNetworkConfig(config.GetNetworkConfig())[0] - evmClient, err := env.GetEVMClient(network.ChainID) + + sethClient, err := env.GetSethClient(network.ChainID) require.NoError(t, err, "Getting EVM client shouldn't fail") - err = evmClient.WaitForEvents() - require.NoError(t, err, "Error waiting for events") + linkTokenContract, err := contracts.DeployLinkTokenContract(l, sethClient) + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") - return evmClient, env.ClCluster.NodeAPIs(), env.ContractDeployer, linkTokenContract, env + return sethClient, env.ClCluster.NodeAPIs(), linkTokenContract, env } func TestKeeperJobReplacement(t *testing.T) { @@ -1194,27 +1270,22 @@ func TestKeeperJobReplacement(t *testing.T) { l := logging.GetTestLogger(t) registryVersion := ethereum.RegistryVersion_1_3 config, err := tc.GetConfig("Smoke", tc.Keeper) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Failed to get config") - chainClient, chainlinkNodes, contractDeployer, linkToken, _ := setupKeeperTest(l, t, &config) - registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + chainClient, chainlinkNodes, linkToken, _ := setupKeeperTest(l, t, &config) + registry, _, consumers, upkeepIDs := actions_seth.DeployKeeperContracts( t, registryVersion, keeperDefaultRegistryConfig, keeperDefaultUpkeepsToDeploy, keeperDefaultUpkeepGasLimit, linkToken, - contractDeployer, chainClient, big.NewInt(keeperDefaultLinkFunds), ) gom := gomega.NewGomegaWithT(t) - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") gom.Eventually(func(g gomega.Gomega) error { @@ -1241,9 +1312,7 @@ func TestKeeperJobReplacement(t *testing.T) { } } - _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) - require.NoError(t, err, "Error creating keeper jobs") - err = chainClient.WaitForEvents() + _, err = actions.CreateKeeperJobsLocal(l, chainlinkNodes, registry, contracts.OCRv2Config{}, fmt.Sprint(chainClient.ChainID)) require.NoError(t, err, "Error creating keeper jobs") gom.Eventually(func(g gomega.Gomega) error { diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go index 09b601395c0..8a95b040031 100644 --- a/integration-tests/smoke/log_poller_test.go +++ b/integration-tests/smoke/log_poller_test.go @@ -17,7 +17,7 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/testreporters" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" - "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" @@ -97,6 +97,7 @@ func TestLogPollerReplayFinalityTag(t *testing.T) { func executeBasicLogPollerTest(t *testing.T, logScannerSettings test_env.ChainlinkNodeLogScannerSettings) { testConfig, err := tc.GetConfig(t.Name(), tc.LogPoller) require.NoError(t, err, "Error getting config") + overrideEphemeralAddressesCount(&testConfig) eventsToEmit := []abi.Event{} for _, event := range logpoller.EmitterABI.Events { @@ -122,17 +123,14 @@ func executeBasicLogPollerTest(t *testing.T, logScannerSettings test_env.Chainli l.Info().Msg("No duplicate filters found. OK!") network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := testEnv.GetEVMClient(network.ChainID) - require.NoError(t, err, "Getting EVM client shouldn't fail") - - err = evmClient.WaitForEvents() - require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + sethClient, err := testEnv.GetSethClient(network.ChainID) + require.NoError(t, err, "Getting Seth client shouldn't fail") expectedFilters := logpoller.GetExpectedFilters(lpTestEnv.logEmitters, cfg) waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx, l, coreLogger, t, testEnv, &testConfig, expectedFilters) // Save block number before starting to emit events, so that we can later use it when querying logs - sb, err := evmClient.LatestBlockNumber(testcontext.Get(t)) + sb, err := sethClient.Client.BlockNumber(testcontext.Get(t)) require.NoError(t, err, "Error getting latest block number") startBlock := int64(sb) @@ -145,14 +143,14 @@ func executeBasicLogPollerTest(t *testing.T, logScannerSettings test_env.Chainli logpoller.ExecuteChaosExperiment(l, testEnv, &testConfig, chaosDoneCh) }() - totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, lpTestEnv.logEmitters) + totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, sethClient, lpTestEnv.logEmitters) endTime := time.Now() require.NoError(t, err, "Error executing event generator") expectedLogsEmitted := logpoller.GetExpectedLogCount(cfg) duration := int(endTime.Sub(startTime).Seconds()) - eb, err := evmClient.LatestBlockNumber(testcontext.Get(t)) + eb, err := sethClient.Client.BlockNumber(testcontext.Get(t)) require.NoError(t, err, "Error getting latest block number") l.Info(). @@ -171,7 +169,7 @@ func executeBasicLogPollerTest(t *testing.T, logScannerSettings test_env.Chainli // as that's not trivial to do (i.e. just because chain was at block X when log emission ended it doesn't mean all events made it to that block) endBlock := int64(eb) + 10000 - allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv, evmClient.GetChainID().Int64()) + allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv, sethClient.ChainID) require.NoError(t, err, "Error checking if CL nodes have expected log count") conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l, coreLogger, t, allNodesLogCountMatches, lpTestEnv, &testConfig, startBlock, endBlock, "5m") @@ -180,6 +178,7 @@ func executeBasicLogPollerTest(t *testing.T, logScannerSettings test_env.Chainli func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { testConfig, err := tc.GetConfig(t.Name(), tc.LogPoller) require.NoError(t, err, "Error getting config") + overrideEphemeralAddressesCount(&testConfig) eventsToEmit := []abi.Event{} for _, event := range logpoller.EmitterABI.Events { @@ -197,34 +196,34 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { ctx := testcontext.Get(t) network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := testEnv.GetEVMClient(network.ChainID) - require.NoError(t, err, "Getting EVM client shouldn't fail") + sethClient, err := testEnv.GetSethClient(network.ChainID) + require.NoError(t, err, "Getting Seth client shouldn't fail") // Save block number before starting to emit events, so that we can later use it when querying logs - sb, err := evmClient.LatestBlockNumber(testcontext.Get(t)) + sb, err := sethClient.Client.BlockNumber(testcontext.Get(t)) require.NoError(t, err, "Error getting latest block number") startBlock := int64(sb) l.Info().Int64("Starting Block", startBlock).Msg("STARTING EVENT EMISSION") startTime := time.Now() - totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, lpTestEnv.logEmitters) + totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, sethClient, lpTestEnv.logEmitters) endTime := time.Now() require.NoError(t, err, "Error executing event generator") expectedLogsEmitted := logpoller.GetExpectedLogCount(cfg) duration := int(endTime.Sub(startTime).Seconds()) // Save block number after finishing to emit events, so that we can later use it when querying logs - eb, err := evmClient.LatestBlockNumber(testcontext.Get(t)) + eb, err := sethClient.Client.BlockNumber(testcontext.Get(t)) require.NoError(t, err, "Error getting latest block number") - endBlock, err := logpoller.GetEndBlockToWaitFor(int64(eb), evmClient.GetChainID().Int64(), cfg) + endBlock, err := logpoller.GetEndBlockToWaitFor(int64(eb), sethClient.ChainID, cfg) require.NoError(t, err, "Error getting end block to wait for") l.Info().Int64("Ending Block", endBlock).Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION") // Lets make sure no logs are in DB yet expectedFilters := logpoller.GetExpectedFilters(lpTestEnv.logEmitters, cfg) - logCountMatches, err := logpoller.ClNodesHaveExpectedLogCount(startBlock, endBlock, evmClient.GetChainID(), 0, expectedFilters, l, coreLogger, testEnv.ClCluster) + logCountMatches, err := logpoller.ClNodesHaveExpectedLogCount(startBlock, endBlock, big.NewInt(sethClient.ChainID), 0, expectedFilters, l, coreLogger, testEnv.ClCluster) require.NoError(t, err, "Error checking if CL nodes have expected log count") require.True(t, logCountMatches, "Some CL nodes already had logs in DB") l.Info().Msg("No logs were saved by CL nodes yet, as expected. Proceeding.") @@ -234,16 +233,13 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { err = logpoller.RegisterFiltersAndAssertUniquness(l, lpTestEnv.registry, lpTestEnv.upkeepIDs, lpTestEnv.logEmitters, cfg, lpTestEnv.upKeepsNeeded) require.NoError(t, err, "Error registering filters") - err = evmClient.WaitForEvents() - require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") - waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx, l, coreLogger, t, testEnv, &testConfig, expectedFilters) blockFinalisationWaitDuration := "5m" l.Warn().Str("Duration", blockFinalisationWaitDuration).Msg("Waiting for all CL nodes to have end block finalised") gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - hasFinalised, err := logpoller.LogPollerHasFinalisedEndBlock(endBlock, evmClient.GetChainID(), l, coreLogger, testEnv.ClCluster) + hasFinalised, err := logpoller.LogPollerHasFinalisedEndBlock(endBlock, big.NewInt(sethClient.ChainID), l, coreLogger, testEnv.ClCluster) if err != nil { l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...") } @@ -254,7 +250,7 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { l.Info().Msg("Triggering log poller's replay") for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { nodeName := testEnv.ClCluster.Nodes[i].ContainerName - response, _, err := testEnv.ClCluster.Nodes[i].API.ReplayLogPollerFromBlock(startBlock, evmClient.GetChainID().Int64()) + response, _, err := testEnv.ClCluster.Nodes[i].API.ReplayLogPollerFromBlock(startBlock, sethClient.ChainID) require.NoError(t, err, "Error triggering log poller's replay on node %s", nodeName) require.Equal(t, "Replay started", response.Data.Attributes.Message, "Unexpected response message from log poller's replay") } @@ -264,7 +260,7 @@ func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for replay logs to be processed by all nodes") // logCountWaitDuration, err := time.ParseDuration("5m") - allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv, evmClient.GetChainID().Int64()) + allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv, sethClient.ChainID) require.NoError(t, err, "Error checking if CL nodes have expected log count") conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l, coreLogger, t, allNodesLogCountMatches, lpTestEnv, &testConfig, startBlock, endBlock, "5m") @@ -296,7 +292,7 @@ func prepareEnvironment(l zerolog.Logger, t *testing.T, testConfig *tc.TestConfi upKeepsNeeded = *cfg.General.Contracts * len(cfg.General.EventsToEmit) ) - chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := logpoller.SetupLogPollerTestDocker( + chainClient, _, linkToken, registry, registrar, testEnv, _ := logpoller.SetupLogPollerTestDocker( t, ethereum.RegistryVersion_2_1, logpoller.DefaultOCRRegistryConfig, @@ -308,13 +304,12 @@ func prepareEnvironment(l zerolog.Logger, t *testing.T, testConfig *tc.TestConfi logScannerSettings, ) - _, upkeepIDs := actions.DeployConsumers( + _, upkeepIDs := actions_seth.DeployConsumers( t, + chainClient, registry, registrar, linkToken, - contractDeployer, - chainClient, upKeepsNeeded, big.NewInt(int64(9e18)), uint32(2500000), @@ -327,7 +322,7 @@ func prepareEnvironment(l zerolog.Logger, t *testing.T, testConfig *tc.TestConfi l.Info().Msg("No duplicate upkeep IDs found. OK!") // Deploy Log Emitter contracts - logEmitters := logpoller.UploadLogEmitterContractsAndWaitForFinalisation(l, t, testEnv, testConfig) + logEmitters := logpoller.UploadLogEmitterContracts(l, t, chainClient, testConfig) err = logpoller.AssertContractAddressUniquneness(logEmitters) require.NoError(t, err, "Error asserting contract addresses uniqueness") l.Info().Msg("No duplicate contract addresses found. OK!") @@ -346,8 +341,8 @@ func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx context.Context, l // Make sure that all nodes have expected filters registered before starting to emit events network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := testEnv.GetEVMClient(network.ChainID) - require.NoError(t, err, "Getting EVM client shouldn't fail") + sethClient, err := testEnv.GetSethClient(network.ChainID) + require.NoError(t, err, "Getting Seth client shouldn't fail") gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { @@ -360,7 +355,7 @@ func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(ctx context.Context, l var message string var err error - hasFilters, message, err = logpoller.NodeHasExpectedFilters(ctx, expectedFilters, coreLogger, evmClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + hasFilters, message, err = logpoller.NodeHasExpectedFilters(ctx, expectedFilters, coreLogger, big.NewInt(sethClient.ChainID), testEnv.ClCluster.Nodes[i].PostgresDb) if !hasFilters || err != nil { l.Warn(). Str("Details", message). @@ -385,12 +380,12 @@ func conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l zerolog.Logger, coreLogge require.NoError(t, err, "Error parsing log count wait duration") network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := lpTestEnv.testEnv.GetEVMClient(network.ChainID) - require.NoError(t, err, "Getting EVM client shouldn't fail") + chainClient, err := lpTestEnv.testEnv.GetSethClient(network.ChainID) + require.NoError(t, err, "Getting Seth client shouldn't fail") allNodesHaveAllExpectedLogs := false if !allNodesLogCountMatches { - missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, evmClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, testConfig.LogPoller) + missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, chainClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, testConfig.LogPoller) if err == nil { if !missingLogs.IsEmpty() { logpoller.PrintMissingLogsInfo(missingLogs, l, testConfig.LogPoller) @@ -414,7 +409,7 @@ func conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l zerolog.Logger, coreLogge gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, evmClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, testConfig.LogPoller) + missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, chainClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, testConfig.LogPoller) if err != nil { l.Warn(). Err(err). @@ -428,3 +423,14 @@ func conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l zerolog.Logger, coreLogge }, logConsistencyWaitDuration, "10s").Should(gomega.Succeed()) } } + +func overrideEphemeralAddressesCount(testConfig *tc.TestConfig) { + // override whatever is in the config file to avoid a situatiation where we don't have enough ephemeral addresses + // to emit events from all contracts + minContracts := int64(*testConfig.LogPoller.General.Contracts * 20) + if testConfig.Seth.EphemeralAddrs != nil && *testConfig.Seth.EphemeralAddrs > minContracts { + return + } + + testConfig.Seth.EphemeralAddrs = &minContracts +} diff --git a/integration-tests/soak/ocr_test.go b/integration-tests/soak/ocr_test.go index 100bc6f7ef8..70cd7974373 100644 --- a/integration-tests/soak/ocr_test.go +++ b/integration-tests/soak/ocr_test.go @@ -3,7 +3,6 @@ package soak import ( "testing" - "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" "github.com/smartcontractkit/chainlink-testing-framework/logging" @@ -12,7 +11,6 @@ import ( actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" tc "github.com/smartcontractkit/chainlink/integration-tests/testconfig" "github.com/smartcontractkit/chainlink/integration-tests/testsetups" - "github.com/smartcontractkit/chainlink/integration-tests/utils" ) func TestOCRSoak(t *testing.T) { @@ -29,16 +27,8 @@ func TestOCRSoak(t *testing.T) { require.NoError(t, err, "Error getting config") // validate Seth config before anything else - readSethCfg := config.GetSethConfig() - require.NotNil(t, readSethCfg, "Seth config shouldn't be nil") - network := networks.MustGetSelectedNetworkConfig(config.GetNetworkConfig())[0] - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(network, *readSethCfg) - require.NoError(t, err, "Error merging seth and evm network configs") - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - require.NoError(t, err, "Error validating seth network config") - - _, err = seth.NewClientWithConfig(&sethCfg) + _, err = actions_seth.GetChainClient(config, network) require.NoError(t, err, "Error creating seth client") ocrSoakTest, err := testsetups.NewOCRSoakTest(t, &config, false) diff --git a/integration-tests/testconfig/automation/automation.toml b/integration-tests/testconfig/automation/automation.toml index a774a622123..4df2cbebc52 100644 --- a/integration-tests/testconfig/automation/automation.toml +++ b/integration-tests/testconfig/automation/automation.toml @@ -59,6 +59,9 @@ chainlink_node_log_level="info" use_prometheus=false # load test specific overrides +[Load.Seth] +ephemeral_addresses_number = 100 + [Load.Common] chainlink_node_funding = 100 diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index ef3f60d4282..6de19ee57d1 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -30,18 +30,25 @@ addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] Deneb=500 [Seth] -# enables automatic tracing of all transactions that are decoded via Decode() method -tracing_enabled = false -# saves each tracing result to json file in ./traces/.json -trace_to_json = false +# controls which transactions are decoded/traced. Possbile values are: none, all, reverted (default). +# if transaction level doesn't match, then calling Decode() does nothing. It's advised to keep it set +# to 'reverted' to limit noise. If you combine it with 'trace_to_json' it will save all possible data +# in JSON files for reverted transactions. +tracing_level = "reverted" +# saves each decoding/tracing results to JSON files; what exactly is saved depends on what we +# were able te decode, we try to save maximum information possible. It can either be: +# just tx hash, decoded transaction or call trace. Which transactions traces are saved depends +# on 'tracing_level'. # number of addresses to be generated and runtime, if set to 0, no addresses will be generated # each generated address will receive a proportion of native tokens from root private key's balance # with the value equal to (root_balance / ephemeral_addresses_number) - transfer_fee * ephemeral_addresses_number -ephemeral_addresses_number = 0 +ephemeral_addresses_number = 10 # If enabled we will panic when getting transaction options if current key/address has a pending transaction # That's because the one we are about to send would get queued, possibly for a very long time -pending_nonce_protection_enabled = false +pending_nonce_protection_enabled = true + +root_key_funds_buffer = 1000 # when enabled when creating a new Seth client we will send 10k wei from root address to root address # to make sure transaction can be submited and mined @@ -49,7 +56,7 @@ check_rpc_health_on_start = false [Seth.nonce_manager] key_sync_rate_limit_per_sec = 10 -key_sync_timeout = "2s" +key_sync_timeout = "100s" key_sync_retry_delay = "1s" key_sync_retries = 10 @@ -57,21 +64,20 @@ key_sync_retries = 10 name = "Geth" chain_id = "1337" transaction_timeout = "30s" -eip_1559_dynamic_fees = false # gas limits transfer_gas_fee = 21_000 # gas limit should be explicitly set only if you are connecting to a node that's incapable of estimating gas limit itself (should only happen for very old versions) -# gas_limit = 8_000_000 +gas_limit = 8_000_000 # manual settings, used when gas_price_estimation_enabled is false or when it fails # legacy transactions gas_price = 1_000_000_000 # EIP-1559 transactions -#eip_1559_dynamic_fees = true -gas_fee_cap = 10_000_000_000 -gas_tip_cap = 3_000_000_000 +eip_1559_dynamic_fees = true +gas_fee_cap = 15_000_000_000 +gas_tip_cap = 5_000_000_000 [[Seth.networks]] name = "Fuji" diff --git a/integration-tests/testconfig/keeper/config.go b/integration-tests/testconfig/keeper/config.go index da6cd7acc98..0e11266d39f 100644 --- a/integration-tests/testconfig/keeper/config.go +++ b/integration-tests/testconfig/keeper/config.go @@ -2,17 +2,26 @@ package keeper import ( "errors" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" ) type Config struct { - Common *Common `toml:"Common"` + Common *Common `toml:"Common"` + Resiliency *ResiliencyConfig `toml:"Resiliency"` } func (c *Config) Validate() error { if c.Common == nil { return nil } - return c.Common.Validate() + if err := c.Common.Validate(); err != nil { + return err + } + if c.Resiliency == nil { + return nil + } + return c.Resiliency.Validate() } type Common struct { @@ -83,3 +92,19 @@ func (c *Common) Validate() error { } return nil } + +type ResiliencyConfig struct { + ContractCallLimit *uint `toml:"contract_call_limit"` + ContractCallInterval *blockchain.StrDuration `toml:"contract_call_interval"` +} + +func (c *ResiliencyConfig) Validate() error { + if c.ContractCallLimit == nil { + return errors.New("contract_call_limit must be set") + } + if c.ContractCallInterval == nil { + return errors.New("contract_call_interval must be set") + } + + return nil +} diff --git a/integration-tests/testconfig/keeper/example.toml b/integration-tests/testconfig/keeper/example.toml index d76fff343e7..0bda9982988 100644 --- a/integration-tests/testconfig/keeper/example.toml +++ b/integration-tests/testconfig/keeper/example.toml @@ -85,4 +85,13 @@ max_perform_gas = 5000000 block_range = 3600 block_interval = 20 forces_single_tx_key = false -delete_jobs_on_end = true \ No newline at end of file +delete_jobs_on_end = true + +# If present will wrap keeper benchmakr consumers in retrying contract backend +# that retries read-only operations on failure related to network issues or node unavailability +# To disable simply remove this section or set any of the values to 0 +[Keeper.Resiliency] +# number of retries before giving up +contract_call_limit = 500 +# static interval between retries +contract_call_interval = "5s" \ No newline at end of file diff --git a/integration-tests/testconfig/keeper/keeper.toml b/integration-tests/testconfig/keeper/keeper.toml index 516dbb35a63..228ea077bd3 100644 --- a/integration-tests/testconfig/keeper/keeper.toml +++ b/integration-tests/testconfig/keeper/keeper.toml @@ -11,7 +11,18 @@ upkeep_gas_limit = 1500000 check_gas_to_burn = 100000 perform_gas_to_burn = 50000 max_perform_gas = 5000000 -block_range = 3600 +block_range = 100 block_interval = 20 forces_single_tx_key = false -delete_jobs_on_end = true \ No newline at end of file +delete_jobs_on_end = true + +# will retry roughly for 1h before giving up (900 * 4s) +[Keeper.Resiliency] +# number of retries before giving up +contract_call_limit = 900 +# static interval between retries +contract_call_interval = "4s" + +[Seth] +# keeper benchmark running on simulated network requires 100k per node +root_key_funds_buffer = 700_000 \ No newline at end of file diff --git a/integration-tests/testconfig/log_poller/log_poller.toml b/integration-tests/testconfig/log_poller/log_poller.toml index 5ead6c91e9c..0e87c0e1e01 100644 --- a/integration-tests/testconfig/log_poller/log_poller.toml +++ b/integration-tests/testconfig/log_poller/log_poller.toml @@ -6,6 +6,9 @@ consensus_layer="prysm" seconds_per_slot=4 slots_per_epoch=2 +[Seth] +ephemeral_addresses_number = 50 + # product defaults [LogPoller] [LogPoller.General] diff --git a/integration-tests/testconfig/testconfig.go b/integration-tests/testconfig/testconfig.go index fbaf80a2c88..abeca8e6eb2 100644 --- a/integration-tests/testconfig/testconfig.go +++ b/integration-tests/testconfig/testconfig.go @@ -4,6 +4,7 @@ import ( "embed" "encoding/base64" "fmt" + "math/big" "os" "slices" "strings" @@ -21,6 +22,7 @@ import ( ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" k8s_config "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions" "github.com/smartcontractkit/chainlink-testing-framework/utils/osutil" a_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/automation" f_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/functions" @@ -369,6 +371,54 @@ func GetConfig(configurationName string, product Product) (TestConfig, error) { testConfig.Common = &Common{} } + isAnySimulated := false + for _, network := range testConfig.Network.SelectedNetworks { + if strings.Contains(strings.ToUpper(network), "SIMULATED") { + isAnySimulated = true + break + } + } + + if testConfig.Seth != nil && !isAnySimulated && (testConfig.Seth.EphemeralAddrs != nil && *testConfig.Seth.EphemeralAddrs != 0) { + testConfig.Seth.EphemeralAddrs = new(int64) + logger.Warn(). + Msg("Ephemeral addresses were enabled, but test was setup to run on a live network. Ephemeral addresses will be disabled.") + } + + if testConfig.Seth != nil && (testConfig.Seth.EphemeralAddrs != nil && *testConfig.Seth.EphemeralAddrs != 0) { + rootBuffer := testConfig.Seth.RootKeyFundsBuffer + if rootBuffer == nil { + rootBuffer = big.NewInt(0) + } + clNodeFunding := testConfig.Common.ChainlinkNodeFunding + if clNodeFunding == nil { + zero := 0.0 + clNodeFunding = &zero + } + minRequiredFunds := big.NewFloat(0).Mul(big.NewFloat(*clNodeFunding), big.NewFloat(6.0)) + + //add buffer to the minimum required funds, this isn't even a rough estimate, because we don't know how many contracts will be deployed from root key, but it's here to let you know that you should have some buffer + minRequiredFundsBuffered := big.NewFloat(0).Mul(minRequiredFunds, big.NewFloat(1.2)) + minRequiredFundsBufferedInt, _ := minRequiredFundsBuffered.Int(nil) + + rootBuffer64, _ := rootBuffer.Float64() + + if big.NewFloat(rootBuffer64).Cmp(minRequiredFundsBuffered) <= 0 { + msg := ` +The funds allocated to the root key buffer are below the minimum requirement, which could lead to insufficient funds for performing contract deployments. Please review and adjust your TOML configuration file to ensure that the root key buffer has adequate funds. Increase the fund settings as necessary to meet this requirement. + +Example: +[Seth] +root_key_funds_buffer = 1_000 +` + + logger.Warn(). + Str("Root key buffer (wei/ether)", fmt.Sprintf("%s/%s", rootBuffer.String(), conversions.WeiToEther(rootBuffer).Text('f', -1))). + Str("Minimum required funds (wei/ether)", fmt.Sprintf("%s/%s", minRequiredFundsBuffered.String(), conversions.WeiToEther(minRequiredFundsBufferedInt).Text('f', -1))). + Msg(msg) + } + } + logger.Debug().Msg("Correct test config constructed successfully") return testConfig, nil } diff --git a/integration-tests/testreporters/keeper_benchmark.go b/integration-tests/testreporters/keeper_benchmark.go index b878ff67a31..6d6221fac89 100644 --- a/integration-tests/testreporters/keeper_benchmark.go +++ b/integration-tests/testreporters/keeper_benchmark.go @@ -223,8 +223,9 @@ func (k *KeeperBenchmarkTestReporter) WriteReport(folderLocation string) error { k.Summary.Metrics.TotalTimesPerformed = totalPerformed k.Summary.Metrics.TotalStaleReports = totalStaleReports k.Summary.Metrics.PercentStale = pctStale - k.Summary.Metrics.AverageActualPerformsPerBlock = float64(totalPerformed) / float64(k.Summary.TestInputs["BlockRange"].(int64)) - + if k.Summary.TestInputs["BlockRange"] != nil { + k.Summary.Metrics.AverageActualPerformsPerBlock = float64(totalPerformed) / float64(k.Summary.TestInputs["BlockRange"].(int64)) + } // TODO: Set test expectations /* Expect(int64(pctWithinSLA)).Should(BeNumerically(">=", int64(80)), "Expected PercentWithinSLA to be greater than or equal to 80, but got %f", pctWithinSLA) Expect(int64(pctReverted)).Should(BeNumerically("<=", int64(10)), "Expected PercentRevert to be less than or equal to 10, but got %f", pctReverted) diff --git a/integration-tests/testsetups/keeper_benchmark.go b/integration-tests/testsetups/keeper_benchmark.go index 61a20ee9cd8..18c12d35c92 100644 --- a/integration-tests/testsetups/keeper_benchmark.go +++ b/integration-tests/testsetups/keeper_benchmark.go @@ -7,6 +7,7 @@ import ( "math/big" "os" "os/signal" + "sync/atomic" "syscall" "testing" "time" @@ -16,29 +17,29 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/slack-go/slack" + "github.com/smartcontractkit/seth" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "github.com/smartcontractkit/chainlink-testing-framework/blockchain" "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" "github.com/smartcontractkit/chainlink-testing-framework/logging" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" + keepertestconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig/keeper" "github.com/smartcontractkit/chainlink/integration-tests/testreporters" tt "github.com/smartcontractkit/chainlink/integration-tests/types" - iregistry22 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2" - iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" ) // KeeperBenchmarkTest builds a test to check that chainlink nodes are able to upkeep a specified amount of Upkeep @@ -56,12 +57,11 @@ type KeeperBenchmarkTest struct { keeperConsumerContracts []contracts.AutomationConsumerBenchmark upkeepIDs [][]*big.Int - env *environment.Environment - namespace string - chainlinkNodes []*client.ChainlinkK8sClient - chainClient blockchain.EVMClient - testConfig tt.KeeperBenchmarkTestConfig - contractDeployer contracts.ContractDeployer + env *environment.Environment + namespace string + chainlinkNodes []*client.ChainlinkK8sClient + chainClient *seth.Client + testConfig tt.KeeperBenchmarkTestConfig linkToken contracts.LinkToken ethFeed contracts.MockETHLINKFeed @@ -91,7 +91,7 @@ type PreDeployedContracts struct { // KeeperBenchmarkTestInputs are all the required inputs for a Keeper Benchmark Test type KeeperBenchmarkTestInputs struct { - BlockchainClient blockchain.EVMClient // Client for the test to connect to the blockchain with + BlockchainClient *seth.Client // Client for the test to connect to the blockchain with KeeperRegistrySettings *contracts.KeeperRegistrySettings // Settings of each keeper contract Upkeeps *UpkeepConfig Contracts *PreDeployedContracts @@ -130,19 +130,24 @@ func (k *KeeperBenchmarkTest) Setup(env *environment.Environment, config tt.Keep k.upkeepIDs = make([][]*big.Int, len(inputs.RegistryVersions)) k.log.Debug().Interface("TestInputs", inputs).Msg("Setting up benchmark test") + // if not present disable it + if k.testConfig.GetKeeperConfig().Resiliency == nil { + k.testConfig.GetKeeperConfig().Resiliency = &keepertestconfig.ResiliencyConfig{ + ContractCallLimit: ptr.Ptr(uint(0)), + ContractCallInterval: ptr.Ptr(blockchain.StrDuration{Duration: 0 * time.Second}), + } + } + var err error // Connect to networks and prepare for contract deployment - k.contractDeployer, err = contracts.NewContractDeployer(k.chainClient, k.log) - require.NoError(k.t, err, "Building a new contract deployer shouldn't fail") k.chainlinkNodes, err = client.ConnectChainlinkNodes(k.env) require.NoError(k.t, err, "Connecting to chainlink nodes shouldn't fail") - k.chainClient.ParallelTransactions(true) if len(inputs.RegistryVersions) > 1 && !inputs.ForceSingleTxnKey { for nodeIndex, node := range k.chainlinkNodes { for registryIndex := 1; registryIndex < len(inputs.RegistryVersions); registryIndex++ { k.log.Debug().Str("URL", node.URL()).Int("NodeIndex", nodeIndex).Int("RegistryIndex", registryIndex).Msg("Create Tx key") - _, _, err := node.CreateTxKey("evm", k.Inputs.BlockchainClient.GetChainID().String()) + _, _, err := node.CreateTxKey("evm", fmt.Sprint(k.Inputs.BlockchainClient.ChainID)) require.NoError(k.t, err, "Creating transaction key shouldn't fail") } } @@ -151,41 +156,31 @@ func (k *KeeperBenchmarkTest) Setup(env *environment.Environment, config tt.Keep c := inputs.Contracts if common.IsHexAddress(c.LinkTokenAddress) { - k.linkToken, err = k.contractDeployer.LoadLinkToken(common.HexToAddress(c.LinkTokenAddress)) + _, err = contracts.LoadLinkTokenContract(k.log, k.chainClient, common.HexToAddress(c.LinkTokenAddress)) require.NoError(k.t, err, "Loading Link Token Contract shouldn't fail") } else { - k.linkToken, err = k.contractDeployer.DeployLinkTokenContract() - require.NoError(k.t, err, "Deploying Link Token Contract shouldn't fail") - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Failed waiting for LINK Contract deployment") + k.linkToken, err = contracts.DeployLinkTokenContract(k.log, k.chainClient) + require.NoError(k.t, err, "Deploying mock Link Token Contract feed shouldn't fail") } if common.IsHexAddress(c.EthFeedAddress) { - k.ethFeed, err = k.contractDeployer.LoadETHLINKFeed(common.HexToAddress(c.EthFeedAddress)) + _, err = contracts.LoadMockETHLINKFeed(k.chainClient, common.HexToAddress(c.EthFeedAddress)) require.NoError(k.t, err, "Loading ETH-Link feed Contract shouldn't fail") } else { - k.ethFeed, err = k.contractDeployer.DeployMockETHLINKFeed(big.NewInt(2e18)) + k.ethFeed, err = contracts.DeployMockETHLINKFeed(k.chainClient, big.NewInt(2e18)) require.NoError(k.t, err, "Deploying mock ETH-Link feed shouldn't fail") - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Failed waiting for ETH-Link feed Contract deployment") } if common.IsHexAddress(c.GasFeedAddress) { - k.gasFeed, err = k.contractDeployer.LoadGasFeed(common.HexToAddress(c.GasFeedAddress)) + k.gasFeed, err = contracts.LoadMockGASFeed(k.chainClient, common.HexToAddress(c.GasFeedAddress)) require.NoError(k.t, err, "Loading Gas feed Contract shouldn't fail") } else { - k.gasFeed, err = k.contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + k.gasFeed, err = contracts.DeployMockGASFeed(k.chainClient, big.NewInt(2e11)) require.NoError(k.t, err, "Deploying mock gas feed shouldn't fail") - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Failed waiting for mock gas feed Contract deployment") } - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Failed waiting for mock feeds to deploy") - for index := range inputs.RegistryVersions { k.log.Info().Int("Index", index).Msg("Starting Test Setup") - k.DeployBenchmarkKeeperContracts(index) } @@ -200,7 +195,7 @@ func (k *KeeperBenchmarkTest) Setup(env *environment.Environment, config tt.Keep if inputs.RegistryVersions[index] == ethereum.RegistryVersion_2_0 || inputs.RegistryVersions[index] == ethereum.RegistryVersion_2_1 || inputs.RegistryVersions[index] == ethereum.RegistryVersion_2_2 { nodesToFund = k.chainlinkNodes[1:] } - err = actions.FundChainlinkNodesAddress(nodesToFund, k.chainClient, k.Inputs.ChainlinkNodeFunding, index) + err = actions_seth.FundChainlinkNodesAtKeyIndexFromRootAddress(k.log, k.chainClient, contracts.ChainlinkK8sClientToChainlinkNodeWithKeysAndAddress(nodesToFund), k.Inputs.ChainlinkNodeFunding, index) require.NoError(k.t, err, "Funding Chainlink nodes shouldn't fail") } @@ -233,7 +228,7 @@ func (k *KeeperBenchmarkTest) Run() { "NumberOfRegistries": len(k.keeperRegistries), } inputs := k.Inputs - startingBlock, err := k.chainClient.LatestBlockNumber(testcontext.Get(k.t)) + startingBlock, err := k.chainClient.Client.BlockNumber(testcontext.Get(k.t)) require.NoError(k.t, err, "Error getting latest block number") k.startingBlock = big.NewInt(0).SetUint64(startingBlock) startTime := time.Now() @@ -241,7 +236,6 @@ func (k *KeeperBenchmarkTest) Run() { nodesWithoutBootstrap := k.chainlinkNodes[1:] for rIndex := range k.keeperRegistries { - var txKeyId = rIndex if inputs.ForceSingleTxnKey { txKeyId = 0 @@ -255,7 +249,7 @@ func (k *KeeperBenchmarkTest) Run() { rv := inputs.RegistryVersions[rIndex] // Send keeper jobs to registry and chainlink nodes if rv == ethereum.RegistryVersion_2_0 || rv == ethereum.RegistryVersion_2_1 || rv == ethereum.RegistryVersion_2_2 { - actions.CreateOCRKeeperJobs(k.t, k.chainlinkNodes, kr.Address(), k.chainClient.GetChainID().Int64(), txKeyId, rv) + actions.CreateOCRKeeperJobs(k.t, k.chainlinkNodes, kr.Address(), k.chainClient.ChainID, txKeyId, rv) if rv == ethereum.RegistryVersion_2_0 { err = kr.SetConfig(*inputs.KeeperRegistrySettings, ocrConfig) } else { @@ -265,41 +259,145 @@ func (k *KeeperBenchmarkTest) Run() { // Give time for OCR nodes to bootstrap time.Sleep(1 * time.Minute) } else { - actions.CreateKeeperJobsWithKeyIndex(k.t, k.chainlinkNodes, kr, txKeyId, ocrConfig, k.chainClient.GetChainID().String()) + actions.CreateKeeperJobsWithKeyIndex(k.t, k.chainlinkNodes, kr, txKeyId, ocrConfig, fmt.Sprint(k.chainClient.ChainID)) } - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Error waiting for registry setConfig") } + k.log.Info().Msgf("Waiting for %d blocks for all upkeeps to be performed", inputs.Upkeeps.BlockRange+inputs.UpkeepSLA) + + errgroup, errCtx := errgroup.WithContext(context.Background()) + + var startedObservations = atomic.Int32{} + var finishedObservations = atomic.Int32{} + + // We create as many channels as listening goroutines (1 per upkeep). In the background we will be fanning out + // headers that we get from a single channel connected to EVM node to all upkeep-specific channels. + headerCh := make(chan *blockchain.SafeEVMHeader, 10) + sub, err := k.chainClient.Client.Client().EthSubscribe(context.Background(), headerCh, "newHeads") + require.NoError(k.t, err, "Subscribing to new headers for upkeep observation shouldn't fail") + + totalNumberOfChannels := 0 + for rIndex := range k.keeperRegistries { + totalNumberOfChannels += len(k.upkeepIDs[rIndex]) + } + + contractChannels := make([]chan *blockchain.SafeEVMHeader, totalNumberOfChannels) + for idx := 0; idx < totalNumberOfChannels; idx++ { + contractChannels[idx] = make(chan *blockchain.SafeEVMHeader, 10) // Buffered just in case processing is slow + } + + // signals all goroutines to stop when subscription error occurs + stopAllGoroutinesCh := make(chan struct{}) + + // this goroutine fans out headers to goroutines in the background + // and exists when all goroutines are done or when an error occurs + go func() { + defer func() { + // close all fanning out channels at the very end + for _, ch := range contractChannels { + close(ch) + } + k.log.Debug().Msg("Closed header distribution channels") + }() + for { + select { + case header := <-headerCh: + k.log.Trace().Int64("Number", header.Number.Int64()).Msg("Fanning out new header") + for _, ch := range contractChannels { + ch <- header + } + // we don't really care if it was a success or an error, we just want to exit + // if it was an error, we will have an error in the main goroutine + case <-errCtx.Done(): + k.log.Debug().Msg("All goroutines finished.") + sub.Unsubscribe() + return + case err := <-sub.Err(): + // no need to unsubscribe, subscription errored + k.log.Error().Err(err).Msg("header subscription failed. Trying to reconnect...") + connectionLostAt := time.Now() + // we use infinite loop here on purposes, these nodes can be down for extended periods of time ¯\_(ツ)_/¯ + RECONNECT: + for { + sub, err = k.chainClient.Client.Client().EthSubscribe(context.Background(), headerCh, "newHeads") + if err == nil { + break RECONNECT + } + + time.Sleep(5 * time.Second) + } + k.log.Info().Str("Reconnect Time", time.Since(connectionLostAt).String()).Msg("Reconnected to header subscription") + } + } + }() + + currentChannelIndex := 0 for rIndex := range k.keeperRegistries { for index, upkeepID := range k.upkeepIDs[rIndex] { - k.chainClient.AddHeaderEventSubscription(fmt.Sprintf("Keeper Tracker %d %d", rIndex, index), - contracts.NewKeeperConsumerBenchmarkRoundConfirmer( - k.keeperConsumerContracts[rIndex], - k.keeperRegistries[rIndex], - upkeepID, + chIndex := currentChannelIndex + currentChannelIndex++ + upkeepIDCopy := upkeepID + registryIndex := rIndex + upkeepIndex := int64(index) + errgroup.Go(func() error { + startedObservations.Add(1) + k.log.Info().Int("Channel index", chIndex).Str("UpkeepID", upkeepIDCopy.String()).Msg("Starting upkeep observation") + + confirmer := contracts.NewKeeperConsumerBenchmarkUpkeepObserver( + k.keeperConsumerContracts[registryIndex], + k.keeperRegistries[registryIndex], + upkeepIDCopy, inputs.Upkeeps.BlockRange+inputs.UpkeepSLA, inputs.UpkeepSLA, &k.TestReporter, - int64(index), + upkeepIndex, inputs.Upkeeps.FirstEligibleBuffer, k.log, - ), - ) + ) + + k.log.Debug().Str("UpkeepID", upkeepIDCopy.String()).Msg("Stared listening to new headers for upkeep observation") + + for { + select { + case <-stopAllGoroutinesCh: // header listening failed, exit + return errors.New("header distribution channel closed") + case <-errCtx.Done(): //one of goroutines errored, shut down gracefully, no need to return error + k.log.Error().Err(errCtx.Err()).Str("UpkeepID", upkeepIDCopy.String()).Msg("Stopping obervations due to error in one of the goroutines") + return nil + case header := <-contractChannels[chIndex]: // new block, check if upkeep was performed + k.log.Trace().Interface("Header number", header.Number).Str("UpkeepID", upkeepIDCopy.String()).Msg("Started processing new header") + finished, headerErr := confirmer.ReceiveHeader(header) + if headerErr != nil { + k.log.Err(headerErr).Str("UpkeepID", upkeepIDCopy.String()).Msg("Error processing header") + return errors.Wrapf(headerErr, "error processing header for upkeep %s", upkeepIDCopy.String()) + } + + if finished { // observations should be completed as we are beyond block range, if there are not there's a bug in test code + finishedObservations.Add(1) + k.log.Info().Str("Done/Total", fmt.Sprintf("%d/%d", finishedObservations.Load(), startedObservations.Load())).Str("UpkeepID", upkeepIDCopy.String()).Msg("Upkeep observation completed") + + if confirmer.Complete() { + confirmer.LogDetails() + return nil + } + return fmt.Errorf("confimer has finished, but without completing observation, this should never happen. Review your code. UpkdeepID: %s", upkeepIDCopy.String()) + } + k.log.Trace().Interface("Header number", header.Number).Str("UpkeepID", upkeepIDCopy.String()).Msg("Finished processing new header") + } + } + }) } } - defer func() { // Cleanup the subscriptions - for rIndex := range k.keeperRegistries { - for index := range k.upkeepIDs[rIndex] { - k.chainClient.DeleteHeaderEventSubscription(fmt.Sprintf("Keeper Tracker %d %d", rIndex, index)) - } - } - }() + + if err := errgroup.Wait(); err != nil { + k.t.Fatalf("errored when waiting for upkeeps: %v", err) + } + + // Close header distribution channel once all observations are done + close(stopAllGoroutinesCh) // Main test loop k.observeUpkeepEvents() - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Error waiting for keeper subscriptions") // Collect logs for each registry to calculate test metrics // This test generates a LOT of logs, and we need to break up our reads, or risk getting rate-limited by the node @@ -329,7 +427,7 @@ func (k *KeeperBenchmarkTest) Run() { err = fmt.Errorf("initial error") // to ensure our for loop runs at least once for err != nil { ctx, cancel := context.WithTimeout(testcontext.Get(k.t), timeout) - logs, err = k.chainClient.FilterLogs(ctx, filterQuery) + logs, err = k.chainClient.Client.FilterLogs(ctx, filterQuery) cancel() if err != nil { k.log.Error(). @@ -391,7 +489,7 @@ func (k *KeeperBenchmarkTest) Run() { } k.TestReporter.Summary.Config.Geth, err = k.env.ResourcesSummary("app=geth") - if err != nil && k.Inputs.BlockchainClient.NetworkSimulated() { + if err != nil && k.Inputs.BlockchainClient.Cfg.IsSimulatedNetwork() { k.log.Error().Err(err).Msg("Error getting resource summary of geth node") } @@ -411,13 +509,13 @@ func (k *KeeperBenchmarkTest) Run() { // TearDownVals returns the networks that the test is running on func (k *KeeperBenchmarkTest) TearDownVals(t *testing.T) ( *testing.T, + *seth.Client, string, []*client.ChainlinkK8sClient, reportModel.TestReporter, reportModel.GrafanaURLProvider, - blockchain.EVMClient, ) { - return t, k.namespace, k.chainlinkNodes, &k.TestReporter, k.testConfig, k.chainClient + return t, k.chainClient, k.namespace, k.chainlinkNodes, &k.TestReporter, k.testConfig } // ********************* @@ -441,7 +539,7 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { } ctx, cancel := context.WithTimeout(testcontext.Get(k.t), 5*time.Second) - sub, err := k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) + sub, err := k.chainClient.Client.SubscribeFilterLogs(ctx, filterQuery, eventLogs) cancel() require.NoError(k.t, err, "Subscribing to upkeep performed events log shouldn't fail") @@ -464,7 +562,7 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { Msg("Error while subscribing to Keeper Event Logs. Resubscribing...") ctx, cancel := context.WithTimeout(testcontext.Get(k.t), backoff) - sub, err = k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) + sub, err = k.chainClient.Client.SubscribeFilterLogs(ctx, filterQuery, eventLogs) cancel() if err != nil { time.Sleep(backoff) @@ -518,10 +616,6 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { Str("Registry", k.keeperRegistries[rIndex].Address()). Msg("Got stale Upkeep report log on Registry") } - case <-k.chainClient.ConnectionIssue(): - k.log.Warn().Msg("RPC connection issue detected.") - case <-k.chainClient.ConnectionRestored(): - k.log.Info().Msg("RPC connection restored.") } } }() @@ -529,26 +623,7 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() { // contractABI returns the ABI of the proper keeper registry contract func (k *KeeperBenchmarkTest) contractABI(rIndex int) *abi.ABI { - var ( - contractABI *abi.ABI - err error - ) - switch k.Inputs.RegistryVersions[rIndex] { - case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: - contractABI, err = keeper_registry_wrapper1_1.KeeperRegistryMetaData.GetAbi() - case ethereum.RegistryVersion_1_2: - contractABI, err = keeper_registry_wrapper1_2.KeeperRegistryMetaData.GetAbi() - case ethereum.RegistryVersion_1_3: - contractABI, err = keeper_registry_wrapper1_3.KeeperRegistryMetaData.GetAbi() - case ethereum.RegistryVersion_2_0: - contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() - case ethereum.RegistryVersion_2_1: - contractABI, err = iregistry21.IKeeperRegistryMasterMetaData.GetAbi() - case ethereum.RegistryVersion_2_2: - contractABI, err = iregistry22.IAutomationRegistryMasterMetaData.GetAbi() - default: - contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() - } + contractABI, err := contracts.GetRegistryContractABI(k.Inputs.RegistryVersions[rIndex]) require.NoError(k.t, err, "Getting contract ABI shouldn't fail") return contractABI } @@ -625,21 +700,21 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { var ( registry contracts.KeeperRegistry registrar contracts.KeeperRegistrar + err error ) // Contract deployment is different for legacy keepers and OCR automation if registryVersion <= ethereum.RegistryVersion_1_3 { // Legacy keeper - v1.X - registry = actions.DeployKeeperRegistry(k.t, k.contractDeployer, k.chainClient, - &contracts.KeeperRegistryOpts{ - RegistryVersion: registryVersion, - LinkAddr: k.linkToken.Address(), - ETHFeedAddr: k.ethFeed.Address(), - GasFeedAddr: k.gasFeed.Address(), - TranscoderAddr: actions.ZeroAddress.Hex(), - RegistrarAddr: actions.ZeroAddress.Hex(), - Settings: *k.Inputs.KeeperRegistrySettings, - }, - ) + registry, err = contracts.DeployKeeperRegistry(k.chainClient, &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: k.linkToken.Address(), + ETHFeedAddr: k.ethFeed.Address(), + GasFeedAddr: k.gasFeed.Address(), + TranscoderAddr: actions.ZeroAddress.Hex(), + RegistrarAddr: actions.ZeroAddress.Hex(), + Settings: *k.Inputs.KeeperRegistrySettings, + }) + require.NoError(k.t, err, "Deploying registry contract shouldn't fail") // Fund the registry with 1 LINK * amount of AutomationConsumerBenchmark contracts err := k.linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(k.Inputs.Upkeeps.NumberOfUpkeeps)))) @@ -651,16 +726,19 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { RegistryAddr: registry.Address(), MinLinkJuels: big.NewInt(0), } - registrar = actions.DeployKeeperRegistrar(k.t, registryVersion, k.linkToken, registrarSettings, k.contractDeployer, k.chainClient, registry) + + registrar, err = contracts.DeployKeeperRegistrar(k.chainClient, registryVersion, k.linkToken.Address(), registrarSettings) + require.NoError(k.t, err, "Funding keeper registrar contract shouldn't fail") } else { // OCR automation - v2.X - registry, registrar = actions.DeployAutoOCRRegistryAndRegistrar( - k.t, registryVersion, *k.Inputs.KeeperRegistrySettings, k.linkToken, k.contractDeployer, k.chainClient, + registry, registrar = actions_seth.DeployAutoOCRRegistryAndRegistrar( + k.t, k.chainClient, registryVersion, *k.Inputs.KeeperRegistrySettings, k.linkToken, ) // Fund the registry with LINK err := k.linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(k.Inputs.Upkeeps.NumberOfUpkeeps)))) require.NoError(k.t, err, "Funding keeper registry contract shouldn't fail") ocrConfig, err := actions.BuildAutoOCR2ConfigVars(k.t, k.chainlinkNodes[1:], *k.Inputs.KeeperRegistrySettings, registrar.Address(), k.Inputs.DeltaStage, registry.ChainModuleAddress(), registry.ReorgProtectionEnabled()) + require.NoError(k.t, err, "Building OCR config shouldn't fail") k.log.Debug().Interface("KeeperRegistrySettings", *k.Inputs.KeeperRegistrySettings).Interface("OCRConfig", ocrConfig).Msg("Config") require.NoError(k.t, err, "Error building OCR config vars") if registryVersion == ethereum.RegistryVersion_2_0 { @@ -669,7 +747,6 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { err = registry.SetConfigTypeSafe(ocrConfig) } require.NoError(k.t, err, "Registry config should be be set successfully") - } consumer := k.DeployKeeperConsumersBenchmark() @@ -700,6 +777,7 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { Type: uint256Ty, }, } + for i := 0; i < upkeep.NumberOfUpkeeps; i++ { upkeepAddresses = append(upkeepAddresses, consumer.Address()) // Compute check data @@ -721,7 +799,10 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { linkFunds = big.NewInt(0).Add(linkFunds, minLinkBalance) - upkeepIds := actions.RegisterUpkeepContractsWithCheckData(k.t, k.linkToken, linkFunds, k.chainClient, uint32(upkeep.UpkeepGasLimit), registry, registrar, upkeep.NumberOfUpkeeps, upkeepAddresses, checkData, false, false) + err = actions_seth.DeployMultiCallAndFundDeploymentAddresses(k.chainClient, k.linkToken, upkeep.NumberOfUpkeeps, linkFunds) + require.NoError(k.t, err, "Sending link funds to deployment addresses shouldn't fail") + + upkeepIds := actions_seth.RegisterUpkeepContractsWithCheckData(k.t, k.chainClient, k.linkToken, linkFunds, uint32(upkeep.UpkeepGasLimit), registry, registrar, upkeep.NumberOfUpkeeps, upkeepAddresses, checkData, false, false) k.keeperRegistries[index] = registry k.keeperRegistrars[index] = registrar @@ -731,18 +812,29 @@ func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { func (k *KeeperBenchmarkTest) DeployKeeperConsumersBenchmark() contracts.AutomationConsumerBenchmark { // Deploy consumer - keeperConsumerInstance, err := k.contractDeployer.DeployKeeperConsumerBenchmark() - if err != nil { - k.log.Error().Err(err).Msg("Deploying AutomationConsumerBenchmark instance %d shouldn't fail") - keeperConsumerInstance, err = k.contractDeployer.DeployKeeperConsumerBenchmark() - require.NoError(k.t, err, "Error deploying AutomationConsumerBenchmark") + var err error + var keeperConsumerInstance contracts.AutomationConsumerBenchmark + if *k.testConfig.GetKeeperConfig().Resiliency.ContractCallLimit != 0 && k.testConfig.GetKeeperConfig().Resiliency.ContractCallInterval.Duration != 0 { + maxRetryAttempts := *k.testConfig.GetKeeperConfig().Resiliency.ContractCallLimit + callRetryDelay := k.testConfig.GetKeeperConfig().Resiliency.ContractCallInterval.Duration + keeperConsumerInstance, err = contracts.DeployKeeperConsumerBenchmarkWithRetry(k.chainClient, k.log, maxRetryAttempts, callRetryDelay) + if err != nil { + k.log.Error().Err(err).Msg("Deploying AutomationConsumerBenchmark instance shouldn't fail") + keeperConsumerInstance, err = contracts.DeployKeeperConsumerBenchmarkWithRetry(k.chainClient, k.log, maxRetryAttempts, callRetryDelay) + require.NoError(k.t, err, "Error deploying AutomationConsumerBenchmark") + } + } else { + keeperConsumerInstance, err = contracts.DeployKeeperConsumerBenchmark(k.chainClient) + if err != nil { + k.log.Error().Err(err).Msg("Deploying AutomationConsumerBenchmark instance %d shouldn't fail") + keeperConsumerInstance, err = contracts.DeployKeeperConsumerBenchmark(k.chainClient) + require.NoError(k.t, err, "Error deploying AutomationConsumerBenchmark") + } } k.log.Debug(). Str("Contract Address", keeperConsumerInstance.Address()). Msg("Deployed Keeper Benchmark Contract") - err = k.chainClient.WaitForEvents() - require.NoError(k.t, err, "Failed waiting for to deploy all keeper consumer contracts") k.log.Info().Msg("Successfully deployed all Keeper Consumer Contracts") return keeperConsumerInstance diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go index caf9da48e21..aaff28fcb10 100644 --- a/integration-tests/testsetups/ocr.go +++ b/integration-tests/testsetups/ocr.go @@ -168,15 +168,7 @@ func (o *OCRSoakTest) Setup(ocrTestConfig tt.OcrTestConfig) { ) network = utils.MustReplaceSimulatedNetworkUrlWithK8(o.log, network, *o.testEnvironment) - readSethCfg := ocrTestConfig.GetSethConfig() - require.NotNil(o.t, readSethCfg, "Seth config shouldn't be nil") - - sethCfg, err := utils.MergeSethAndEvmNetworkConfigs(network, *readSethCfg) - require.NoError(o.t, err, "Error merging seth and evm network configs") - err = utils.ValidateSethNetworkConfig(sethCfg.Network) - require.NoError(o.t, err, "Error validating seth network config") - - seth, err := seth.NewClientWithConfig(&sethCfg) + seth, err := actions_seth.GetChainClient(o.Config, network) require.NoError(o.t, err, "Error creating seth client") o.seth = seth diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go index 950a4d6de9d..d732ad4af5d 100644 --- a/integration-tests/universal/log_poller/helpers.go +++ b/integration-tests/universal/log_poller/helpers.go @@ -9,6 +9,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "testing" "time" @@ -17,11 +18,11 @@ import ( "github.com/ethereum/go-ethereum/common" geth_types "github.com/ethereum/go-ethereum/core/types" "github.com/jmoiron/sqlx" - "github.com/onsi/gomega" "github.com/rs/zerolog" "github.com/scylladb/go-reflectx" "github.com/stretchr/testify/require" + "github.com/smartcontractkit/seth" "github.com/smartcontractkit/wasp" commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" @@ -30,9 +31,9 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" - "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" + actions_seth "github.com/smartcontractkit/chainlink/integration-tests/actions/seth" "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum" @@ -232,28 +233,40 @@ func getStringSlice(length int) []string { } // emitEvents emits events from the provided log emitter concurrently according to the provided config -func emitEvents(ctx context.Context, l zerolog.Logger, logEmitter *contracts.LogEmitter, cfg *lp_config.Config, wg *sync.WaitGroup, results chan LogEmitterChannel) { +func emitEvents(ctx context.Context, l zerolog.Logger, client *seth.Client, logEmitter *contracts.LogEmitter, cfg *lp_config.Config, wg *sync.WaitGroup, results chan LogEmitterChannel) { address := (*logEmitter).Address().String() - localCounter := 0 defer wg.Done() + + var executionGroup sync.WaitGroup + + // Atomic counter is used to keep track of the number of logs emitted + var atomicCounter = atomic.Int32{} + for i := 0; i < *cfg.LoopedConfig.ExecutionCount; i++ { + executionGroup.Add(1) + } + + var emitAllEvents = func() { + defer executionGroup.Done() + current := atomicCounter.Add(1) + for _, event := range cfg.General.EventsToEmit { select { case <-ctx.Done(): l.Warn().Str("Emitter address", address).Msg("Context cancelled, not emitting events") return default: - l.Debug().Str("Emitter address", address).Str("Event type", event.Name).Str("index", fmt.Sprintf("%d/%d", (i+1), cfg.LoopedConfig.ExecutionCount)).Msg("Emitting log from emitter") + l.Debug().Str("Emitter address", address).Str("Event type", event.Name).Str("index", fmt.Sprintf("%d/%d", current, *cfg.LoopedConfig.ExecutionCount)).Msg("Emitting log from emitter") var err error switch event.Name { case "Log1": - _, err = (*logEmitter).EmitLogInts(getIntSlice(*cfg.General.EventsPerTx)) + _, err = client.Decode((*logEmitter).EmitLogIntsFromKey(getIntSlice(*cfg.General.EventsPerTx), client.AnySyncedKey())) case "Log2": - _, err = (*logEmitter).EmitLogIntsIndexed(getIntSlice(*cfg.General.EventsPerTx)) + _, err = client.Decode((*logEmitter).EmitLogIntsIndexedFromKey(getIntSlice(*cfg.General.EventsPerTx), client.AnySyncedKey())) case "Log3": - _, err = (*logEmitter).EmitLogStrings(getStringSlice(*cfg.General.EventsPerTx)) + _, err = client.Decode((*logEmitter).EmitLogStringsFromKey(getStringSlice(*cfg.General.EventsPerTx), client.AnySyncedKey())) case "Log4": - _, err = (*logEmitter).EmitLogIntMultiIndexed(1, 1, *cfg.General.EventsPerTx) + _, err = client.Decode((*logEmitter).EmitLogIntMultiIndexedFromKey(1, 1, *cfg.General.EventsPerTx, client.AnySyncedKey())) default: err = fmt.Errorf("unknown event name: %s", event.Name) } @@ -264,17 +277,42 @@ func emitEvents(ctx context.Context, l zerolog.Logger, logEmitter *contracts.Log } return } - localCounter += *cfg.General.EventsPerTx - randomWait(*cfg.LoopedConfig.MinEmitWaitTimeMs, *cfg.LoopedConfig.MaxEmitWaitTimeMs) } - if (i+1)%10 == 0 { - l.Info().Str("Emitter address", address).Str("Index", fmt.Sprintf("%d/%d", i+1, *cfg.LoopedConfig.ExecutionCount)).Msg("Emitted all three events") + if (current)%10 == 0 { + l.Info().Str("Emitter address", address).Str("Index", fmt.Sprintf("%d/%d", current, *cfg.LoopedConfig.ExecutionCount)).Msgf("Emitted all %d events", len(cfg.General.EventsToEmit)) } } } + clientNumber := int(*client.Cfg.EphemeralAddrs) + emissionsPerClient := *cfg.LoopedConfig.ExecutionCount / clientNumber + extraEmissions := *cfg.LoopedConfig.ExecutionCount % clientNumber + + l.Debug().Str("Emitter address", address). + Int("Total logs to emit", *cfg.LoopedConfig.ExecutionCount*len(cfg.General.EventsToEmit)*(*cfg.General.EventsPerTx)). + Int("Total clients", clientNumber). + Int("Emissions per client", emissionsPerClient). + Int("Extra emissions", extraEmissions). + Msg("Starting to emit events") + + for i := 0; i < clientNumber; i++ { + go func(key int) { + numTasks := emissionsPerClient + if key < extraEmissions { + numTasks++ + } + + for idx := 0; idx < numTasks; idx++ { + emitAllEvents() + } + }(i) + } + + executionGroup.Wait() + + localCounter := int(atomicCounter.Load()) * *cfg.General.EventsPerTx * len(cfg.General.EventsToEmit) l.Info().Str("Emitter address", address).Int("Total logs emitted", localCounter).Msg("Finished emitting events") results <- LogEmitterChannel{ @@ -498,7 +536,7 @@ func (m *MissingLogs) IsEmpty() bool { } // GetMissingLogs returns a map of CL node name to missing logs in that node compared to EVM node to which the provided evm client is connected -func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, clnodeCluster *test_env.ClCluster, l zerolog.Logger, coreLogger core_logger.SugaredLogger, cfg *lp_config.Config) (MissingLogs, error) { +func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, client *seth.Client, clnodeCluster *test_env.ClCluster, l zerolog.Logger, coreLogger core_logger.SugaredLogger, cfg *lp_config.Config) (MissingLogs, error) { wg := &sync.WaitGroup{} type dbQueryResult struct { @@ -523,7 +561,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit nodeName := clnodeCluster.Nodes[i].ContainerName l.Debug().Str("Node name", nodeName).Msg("Fetching log poller logs") - orm, db, err := NewORM(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb) + orm, db, err := NewORM(coreLogger, big.NewInt(client.ChainID), clnodeCluster.Nodes[i].PostgresDb) if err != nil { r <- dbQueryResult{ err: err, @@ -594,7 +632,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit return nil, dbError } - allLogsInEVMNode, err := getEVMLogs(ctx, startBlock, endBlock, logEmitters, evmClient, l, cfg) + allLogsInEVMNode, err := getEVMLogs(ctx, startBlock, endBlock, logEmitters, client, l, cfg) if err != nil { return nil, err } @@ -641,7 +679,7 @@ func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmit if len(missingLogs) > 0 { l.Warn().Int("Count", len(missingLogs)).Str("Node name", nodeName).Msg("Some EMV logs were missing from CL node") } else { - l.Info().Str("Node name", nodeName).Msg("All EVM logs were found in CL node") + l.Info().Str("Node name", nodeName).Str("Missing/Total logs", fmt.Sprintf("%d/%d", len(missingLogs), evmLogCount)).Msg("All EVM logs were found in CL node") } result <- missingLogResult{ @@ -722,13 +760,13 @@ func PrintMissingLogsInfo(missingLogs map[string][]geth_types.Log, l zerolog.Log // getEVMLogs returns a slice of all logs emitted by the provided log emitters in the provided block range, // which are present in the EVM node to which the provided evm client is connected -func getEVMLogs(ctx context.Context, startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *lp_config.Config) ([]geth_types.Log, error) { +func getEVMLogs(ctx context.Context, startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, client *seth.Client, l zerolog.Logger, cfg *lp_config.Config) ([]geth_types.Log, error) { allLogsInEVMNode := make([]geth_types.Log, 0) for j := 0; j < len(logEmitters); j++ { address := (*logEmitters[j]).Address() for _, event := range cfg.General.EventsToEmit { l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching logs from EVM node") - logsInEVMNode, err := evmClient.FilterLogs(ctx, geth.FilterQuery{ + logsInEVMNode, err := client.Client.FilterLogs(ctx, geth.FilterQuery{ Addresses: []common.Address{(address)}, Topics: [][]common.Hash{{event.ID}}, FromBlock: big.NewInt(startBlock), @@ -753,12 +791,12 @@ func getEVMLogs(ctx context.Context, startBlock, endBlock int64, logEmitters []* } // ExecuteGenerator executes the configured generator and returns the total number of logs emitted -func ExecuteGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contracts.LogEmitter) (int, error) { +func ExecuteGenerator(t *testing.T, cfg *lp_config.Config, client *seth.Client, logEmitters []*contracts.LogEmitter) (int, error) { if *cfg.General.Generator == lp_config.GeneratorType_WASP { return runWaspGenerator(t, cfg, logEmitters) } - return runLoopedGenerator(t, cfg, logEmitters) + return runLoopedGenerator(t, cfg, client, logEmitters) } // runWaspGenerator runs the wasp generator and returns the total number of logs emitted @@ -820,7 +858,7 @@ func runWaspGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contra } // runLoopedGenerator runs the looped generator and returns the total number of logs emitted -func runLoopedGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contracts.LogEmitter) (int, error) { +func runLoopedGenerator(t *testing.T, cfg *lp_config.Config, client *seth.Client, logEmitters []*contracts.LogEmitter) (int, error) { l := logging.GetTestLogger(t) // Start emitting events in parallel, each contract is emitting events in a separate goroutine @@ -833,7 +871,9 @@ func runLoopedGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*cont for i := 0; i < len(logEmitters); i++ { wg.Add(1) - go emitEvents(ctx, l, logEmitters[i], cfg, wg, emitterCh) + go func(idx int) { + emitEvents(ctx, l, client, logEmitters[idx], cfg, wg, emitterCh) + }(i) } var emitErr error @@ -893,10 +933,10 @@ type PauseData struct { var ChaosPauses = []PauseData{} // chaosPauseSyncFn pauses ranom container of the provided type for a random amount of time between 5 and 20 seconds -func chaosPauseSyncFn(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, testConfig *tc.TestConfig, targetComponent string) ChaosPauseData { +func chaosPauseSyncFn(l zerolog.Logger, client *seth.Client, cluster *test_env.ClCluster, targetComponent string) ChaosPauseData { rand.New(rand.NewSource(time.Now().UnixNano())) - randomNode := testEnv.ClCluster.Nodes[rand.Intn(len(testEnv.ClCluster.Nodes)-1)+1] + randomNode := cluster.Nodes[rand.Intn(len(cluster.Nodes)-1)+1] var component ctf_test_env.EnvComponent switch strings.ToLower(targetComponent) { @@ -908,15 +948,9 @@ func chaosPauseSyncFn(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, test return ChaosPauseData{Err: fmt.Errorf("unknown component %s", targetComponent)} } - network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := testEnv.GetEVMClient(network.ChainID) - if err != nil { - return ChaosPauseData{Err: err} - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - pauseStartBlock, err := evmClient.LatestBlockNumber(ctx) + pauseStartBlock, err := client.Client.BlockNumber(ctx) if err != nil { return ChaosPauseData{Err: err} } @@ -932,7 +966,7 @@ func chaosPauseSyncFn(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, test ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - pauseEndBlock, err := evmClient.LatestBlockNumber(ctx) + pauseEndBlock, err := client.Client.BlockNumber(ctx) if err != nil { return ChaosPauseData{Err: err} } @@ -960,6 +994,12 @@ func ExecuteChaosExperiment(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv chaosChan := make(chan ChaosPauseData, *testConfig.LogPoller.ChaosConfig.ExperimentCount) wg := &sync.WaitGroup{} + selectedNetwork := networks.MustGetSelectedNetworkConfig(testConfig.Network)[0] + sethClient, err := testEnv.GetSethClient(selectedNetwork.ChainID) + if err != nil { + errorCh <- err + } + go func() { // if we wanted to have more than 1 container paused, we'd need to make sure we aren't trying to pause an already paused one guardChan := make(chan struct{}, 1) @@ -975,7 +1015,7 @@ func ExecuteChaosExperiment(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv current := i + 1 l.Info().Str("Current/Total", fmt.Sprintf("%d/%d", current, *testConfig.LogPoller.ChaosConfig.ExperimentCount)).Msg("Done with experiment") }() - chaosChan <- chaosPauseSyncFn(l, testEnv, testConfig, *testConfig.LogPoller.ChaosConfig.TargetComponent) + chaosChan <- chaosPauseSyncFn(l, sethClient, testEnv.ClCluster, *testConfig.LogPoller.ChaosConfig.TargetComponent) time.Sleep(10 * time.Second) }() } @@ -1077,13 +1117,13 @@ func SetupLogPollerTestDocker( testConfig *tc.TestConfig, logScannerSettings test_env.ChainlinkNodeLogScannerSettings, ) ( - blockchain.EVMClient, + *seth.Client, []*client.ChainlinkClient, - contracts.ContractDeployer, contracts.LinkToken, contracts.KeeperRegistry, contracts.KeeperRegistrar, *test_env.CLClusterTestEnv, + *blockchain.EVMNetwork, ) { l := logging.GetTestLogger(t) @@ -1120,12 +1160,14 @@ func SetupLogPollerTestDocker( return chain } - var evmClientSettingsFn = func(network *blockchain.EVMNetwork) *blockchain.EVMNetwork { + var evmNetworkSettingsFn = func(network *blockchain.EVMNetwork) *blockchain.EVMNetwork { network.FinalityDepth = uint64(finalityDepth) network.FinalityTag = finalityTagEnabled return network } + evmNetworkSettingsFn(&network) + privateNetwork, err := actions.EthereumNetworkConfigFromConfig(l, testConfig) require.NoError(t, err, "Error building ethereum network config") @@ -1137,13 +1179,17 @@ func SetupLogPollerTestDocker( WithCLNodeConfig(clNodeConfig). WithFunding(big.NewFloat(chainlinkNodeFunding)). WithChainOptions(logPolllerSettingsFn). - EVMClientNetworkOptions(evmClientSettingsFn). + EVMNetworkOptions(evmNetworkSettingsFn). WithChainlinkNodeLogScanner(logScannerSettings). WithStandardCleanup(). + WithSeth(). Build() require.NoError(t, err, "Error deploying test environment") - env.ParallelTransactions(true) + selectedNetwork := networks.MustGetSelectedNetworkConfig(testConfig.Network)[0] + chainClient, err := env.GetSethClient(selectedNetwork.ChainID) + require.NoError(t, err, "Error getting seth client") + nodeClients := env.ClCluster.NodeAPIs() workerNodes := nodeClients[1:] @@ -1152,7 +1198,7 @@ func SetupLogPollerTestDocker( switch network.ChainID { // Simulated case 1337: - linkToken, err = env.ContractDeployer.DeployLinkTokenContract() + linkToken, err = contracts.DeployLinkTokenContract(l, chainClient) // Ethereum Sepolia case 11155111: linkToken, err = env.ContractLoader.LoadLINKToken("0x779877A7B0D9E8603169DdbD7836e478b4624789") @@ -1164,26 +1210,22 @@ func SetupLogPollerTestDocker( } require.NoError(t, err, "Error loading/deploying LINK token") - evmClient, err := env.GetEVMClient(network.ChainID) - require.NoError(t, err, "Getting EVM client shouldn't fail") - - linkBalance, err := evmClient.BalanceAt(context.Background(), common.HexToAddress(linkToken.Address())) + linkBalance, err := linkToken.BalanceOf(context.Background(), chainClient.MustGetRootKeyAddress().Hex()) require.NoError(t, err, "Error getting LINK balance") l.Info().Str("Balance", big.NewInt(0).Div(linkBalance, big.NewInt(1e18)).String()).Msg("LINK balance") minLinkBalanceSingleNode := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(9)) minLinkBalance := big.NewInt(0).Mul(minLinkBalanceSingleNode, big.NewInt(int64(upkeepsNeeded))) - if minLinkBalance.Cmp(linkBalance) < 0 { - require.FailNowf(t, "Not enough LINK", "Not enough LINK to run the test. Need at least %s", big.NewInt(0).Div(minLinkBalance, big.NewInt(1e18)).String()) + if linkBalance.Cmp(minLinkBalance) < 0 { + require.FailNowf(t, "Not enough LINK", "Not enough LINK to run the test. Need at least %s. but has only %s", big.NewInt(0).Div(minLinkBalance, big.NewInt(1e18)).String(), big.NewInt(0).Div(linkBalance, big.NewInt(1e18)).String()) } - registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + registry, registrar := actions_seth.DeployAutoOCRRegistryAndRegistrar( t, + chainClient, registryVersion, registryConfig, linkToken, - env.ContractDeployer, - evmClient, ) // Fund the registry with LINK @@ -1196,46 +1238,21 @@ func SetupLogPollerTestDocker( require.NoError(t, err, "Error building OCR config vars") err = registry.SetConfigTypeSafe(ocrConfig) require.NoError(t, err, "Registry config should be set successfully") - require.NoError(t, evmClient.WaitForEvents(), "Waiting for config to be set") - return evmClient, nodeClients, env.ContractDeployer, linkToken, registry, registrar, env + return chainClient, nodeClients, linkToken, registry, registrar, env, &network } -// UploadLogEmitterContractsAndWaitForFinalisation uploads the configured number of log emitter contracts and waits for the upload blocks to be finalised -func UploadLogEmitterContractsAndWaitForFinalisation(l zerolog.Logger, t *testing.T, testEnv *test_env.CLClusterTestEnv, testConfig *tc.TestConfig) []*contracts.LogEmitter { +// UploadLogEmitterContracts uploads the configured number of log emitter contracts +func UploadLogEmitterContracts(l zerolog.Logger, t *testing.T, client *seth.Client, testConfig *tc.TestConfig) []*contracts.LogEmitter { logEmitters := make([]*contracts.LogEmitter, 0) for i := 0; i < *testConfig.LogPoller.General.Contracts; i++ { - logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract() + logEmitter, err := contracts.DeployLogEmitterContract(l, client) logEmitters = append(logEmitters, &logEmitter) require.NoError(t, err, "Error deploying log emitter contract") l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed") time.Sleep(200 * time.Millisecond) } - network := networks.MustGetSelectedNetworkConfig(testConfig.GetNetworkConfig())[0] - evmClient, err := testEnv.GetEVMClient(network.ChainID) - require.NoError(t, err, "Error getting EVM client") - - afterUploadBlock, err := evmClient.LatestBlockNumber(testcontext.Get(t)) - require.NoError(t, err, "Error getting latest block number") - - gom := gomega.NewGomegaWithT(t) - gom.Eventually(func(g gomega.Gomega) { - targetBlockNumber := int64(afterUploadBlock + 1) - finalized, err := evmClient.GetLatestFinalizedBlockHeader(testcontext.Get(t)) - if err != nil { - l.Warn().Err(err).Msg("Error checking if contract were uploaded. Retrying...") - return - } - finalizedBlockNumber := finalized.Number.Int64() - - if finalizedBlockNumber < targetBlockNumber { - l.Debug().Int64("Finalized block", finalized.Number.Int64()).Int64("After upload block", int64(afterUploadBlock+1)).Msg("Waiting for contract upload to finalise") - } - - g.Expect(finalizedBlockNumber >= targetBlockNumber).To(gomega.BeTrue(), "Contract upload did not finalize in time") - }, "2m", "10s").Should(gomega.Succeed()) - return logEmitters } diff --git a/integration-tests/utils/seth.go b/integration-tests/utils/seth.go index ef9b331a447..cc5f1c60485 100644 --- a/integration-tests/utils/seth.go +++ b/integration-tests/utils/seth.go @@ -67,13 +67,25 @@ func MustReplaceSimulatedNetworkUrlWithK8(l zerolog.Logger, network blockchain.E return network } - if _, ok := testEnvironment.URLs["Simulated Geth"]; !ok { + networkKeys := []string{"Simulated Geth", "Simulated-Geth"} + var keyToUse string + + for _, key := range networkKeys { + _, ok := testEnvironment.URLs[key] + if ok { + keyToUse = key + break + } + } + + if keyToUse == "" { for k := range testEnvironment.URLs { l.Info().Str("Network", k).Msg("Available networks") } panic("no network settings for Simulated Geth") } - network.URLs = testEnvironment.URLs["Simulated Geth"] + + network.URLs = testEnvironment.URLs[keyToUse] return network } diff --git a/integration-tests/wrappers/contract_caller.go b/integration-tests/wrappers/contract_caller.go index 4be76ee74a1..0eea760e024 100644 --- a/integration-tests/wrappers/contract_caller.go +++ b/integration-tests/wrappers/contract_caller.go @@ -2,18 +2,25 @@ package wrappers import ( "context" + "fmt" "math/big" + "strings" + "time" + "github.com/avast/retry-go/v4" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/smartcontractkit/seth" - evmClient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" - "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + evmClient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" ) // WrappedContractBackend is a wrapper around the go-ethereum ContractBackend interface. It's a thin wrapper @@ -21,8 +28,12 @@ import ( // methods that send data both in "input" and "data" field for backwards compatibility with older clients. Other methods // are passed through to the underlying client. type WrappedContractBackend struct { - evmClient blockchain.EVMClient - sethClient *seth.Client + evmClient blockchain.EVMClient + sethClient *seth.Client + logger zerolog.Logger + maxAttempts uint + retryDelay time.Duration + withRetries bool } // MustNewWrappedContractBackend creates a new WrappedContractBackend with the given clients @@ -37,6 +48,22 @@ func MustNewWrappedContractBackend(evmClient blockchain.EVMClient, sethClient *s } } +// MustNewRetryingWrappedContractBackend creates a new WrappedContractBackend, which retries read-only operations every 'retryDelay' until +// 'maxAttempts' are reached. It works only with Seth, because EVMClient has some retrying capability already included. +func MustNewRetryingWrappedContractBackend(sethClient *seth.Client, logger zerolog.Logger, maxAttempts uint, retryDelay time.Duration) *WrappedContractBackend { + if sethClient == nil { + panic("Must provide at Seth client reference") + } + + return &WrappedContractBackend{ + sethClient: sethClient, + logger: logger, + maxAttempts: maxAttempts, + retryDelay: retryDelay, + withRetries: true, + } +} + func (w *WrappedContractBackend) getGethClient() *ethclient.Client { if w.sethClient != nil { return w.sethClient.Client @@ -50,81 +77,261 @@ func (w *WrappedContractBackend) getGethClient() *ethclient.Client { } func (w *WrappedContractBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - client := w.getGethClient() - return client.CodeAt(ctx, contract, blockNumber) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call CodeAt") + } + + var fn = func() ([]byte, error) { + client := w.getGethClient() + return client.CodeAt(ctx, contract, blockNumber) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("CodeAt", fn) } func (w *WrappedContractBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { - client := w.getGethClient() - return client.PendingCodeAt(ctx, contract) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call PendingCodeAt") + } + + var fn = func() ([]byte, error) { + client := w.getGethClient() + return client.PendingCodeAt(ctx, contract) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("PendingCodeAt", fn) } func (w *WrappedContractBackend) CodeAtHash(ctx context.Context, contract common.Address, blockHash common.Hash) ([]byte, error) { - client := w.getGethClient() - return client.CodeAtHash(ctx, contract, blockHash) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call CodeAtHash") + } + + var fn = func() ([]byte, error) { + client := w.getGethClient() + return client.CodeAtHash(ctx, contract, blockHash) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("CodeAtHash", fn) } func (w *WrappedContractBackend) CallContractAtHash(ctx context.Context, call ethereum.CallMsg, blockHash common.Hash) ([]byte, error) { - client := w.getGethClient() - return client.CallContractAtHash(ctx, call, blockHash) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call CallContractAtHash") + } + + var fn = func() ([]byte, error) { + client := w.getGethClient() + return client.CallContractAtHash(ctx, call, blockHash) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("CallContractAtHash", fn) } func (w *WrappedContractBackend) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - client := w.getGethClient() - return client.HeaderByNumber(ctx, number) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call HeaderByNumber") + } + + var fn = func() (*types.Header, error) { + client := w.getGethClient() + return client.HeaderByNumber(ctx, number) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[*types.Header](w) + return ethHeadBanger.retry("HeaderByNumber", fn) } func (w *WrappedContractBackend) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - client := w.getGethClient() - return client.PendingNonceAt(ctx, account) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return 0, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call PendingNonceAt") + } + + var fn = func() (uint64, error) { + client := w.getGethClient() + return client.PendingNonceAt(ctx, account) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[uint64](w) + return ethHeadBanger.retry("PendingNonceAt", fn) } func (w *WrappedContractBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - client := w.getGethClient() - return client.SuggestGasPrice(ctx) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call SuggestGasPrice") + } + + var fn = func() (*big.Int, error) { + client := w.getGethClient() + return client.SuggestGasPrice(ctx) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[*big.Int](w) + return ethHeadBanger.retry("SuggestGasPrice", fn) } func (w *WrappedContractBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - client := w.getGethClient() - return client.SuggestGasTipCap(ctx) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call SuggestGasTipCap") + } + + var fn = func() (*big.Int, error) { + client := w.getGethClient() + return client.SuggestGasTipCap(ctx) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[*big.Int](w) + return ethHeadBanger.retry("SuggestGasTipCap", fn) } func (w *WrappedContractBackend) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { - client := w.getGethClient() - return client.EstimateGas(ctx, call) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return 0, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call EstimateGas") + } + + var fn = func() (uint64, error) { + client := w.getGethClient() + return client.EstimateGas(ctx, call) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[uint64](w) + return ethHeadBanger.retry("EstimateGas", fn) } func (w *WrappedContractBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call SendTransaction") + } + client := w.getGethClient() return client.SendTransaction(ctx, tx) } func (w *WrappedContractBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) { - client := w.getGethClient() - return client.FilterLogs(ctx, query) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call FilterLogs") + } + + var fn = func() ([]types.Log, error) { + client := w.getGethClient() + return client.FilterLogs(ctx, query) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]types.Log](w) + return ethHeadBanger.retry("FilterLogs", fn) } func (w *WrappedContractBackend) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - client := w.getGethClient() - return client.SubscribeFilterLogs(ctx, query, ch) + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call SubscribeFilterLogs") + } + + var fn = func() (ethereum.Subscription, error) { + client := w.getGethClient() + return client.SubscribeFilterLogs(ctx, query, ch) + } + + ethHeadBanger := newEthHeadBangerFromWrapper[ethereum.Subscription](w) + return ethHeadBanger.retry("SubscribeFilterLogs", fn) } func (w *WrappedContractBackend) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - var hex hexutil.Bytes - client := w.getGethClient() - err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), evmClient.ToBackwardCompatibleBlockNumArg(blockNumber)) - if err != nil { - return nil, err + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call CallContract") } - return hex, nil + + var fn = func() ([]byte, error) { + var hex hexutil.Bytes + client := w.getGethClient() + err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), evmClient.ToBackwardCompatibleBlockNumArg(blockNumber)) + if err != nil { + return nil, err + } + return hex, nil + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("CallContract", fn) } func (w *WrappedContractBackend) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { - var hex hexutil.Bytes - client := w.getGethClient() - err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), "pending") - if err != nil { - return nil, err + if ctxErr := w.getErrorFromContext(ctx); ctxErr != nil { + return nil, errors.Wrapf(ctxErr, "the context you passed had an error set. Won't call PendingCallContract") + } + + var fn = func() ([]byte, error) { + var hex hexutil.Bytes + client := w.getGethClient() + err := client.Client().CallContext(ctx, &hex, "eth_call", evmClient.ToBackwardCompatibleCallArg(msg), "pending") + if err != nil { + return nil, err + } + return hex, nil + } + + ethHeadBanger := newEthHeadBangerFromWrapper[[]byte](w) + return ethHeadBanger.retry("PendingCallContract", fn) +} + +func (w *WrappedContractBackend) getErrorFromContext(ctx context.Context) error { + if ctxErr := ctx.Value(seth.ContextErrorKey{}); ctxErr != nil { + if v, ok := ctxErr.(error); ok { + return v + } + return errors.Wrapf(errors.New("unknown error type"), "error in context: %v", ctxErr) + } + + return nil +} + +// ethHeadBanger is just a fancy name for a struct that retries a function a number of times with a delay between each attempt +type ethHeadBanger[ReturnType any] struct { + logger zerolog.Logger + maxAttempts uint + retryDelay time.Duration +} + +func newEthHeadBangerFromWrapper[ResultType any](wrapper *WrappedContractBackend) ethHeadBanger[ResultType] { + return ethHeadBanger[ResultType]{ + logger: wrapper.logger, + maxAttempts: wrapper.maxAttempts, + retryDelay: wrapper.retryDelay, } - return hex, nil +} + +func (e ethHeadBanger[ReturnType]) retry(functionName string, fnToRetry func() (ReturnType, error)) (ReturnType, error) { + var result ReturnType + err := retry.Do(func() error { + var err error + result, err = fnToRetry() + + return err + }, + retry.RetryIf(func(err error) bool { + if err.Error() == rpc.ErrClientQuit.Error() || + err.Error() == rpc.ErrBadResult.Error() || + strings.Contains(err.Error(), "connection") || + strings.Contains(err.Error(), "EOF") { + return true + } + + e.logger.Error().Err(err).Msgf("Error in %s. Not retrying.", functionName) + + return false + }), + retry.Attempts(e.maxAttempts), + retry.Delay(e.retryDelay), + retry.OnRetry(func(n uint, err error) { + e.logger.Info(). + Str("Attempt", fmt.Sprintf("%d/%d", n+1, 10)). + Str("Error", err.Error()). + Msgf("Retrying %s", functionName) + }), + ) + + return result, err }