diff --git a/.github/actions/setup-create-base64-config-ccip/action.yml b/.github/actions/setup-create-base64-config-ccip/action.yml new file mode 100644 index 00000000000..88d9fe8078c --- /dev/null +++ b/.github/actions/setup-create-base64-config-ccip/action.yml @@ -0,0 +1,187 @@ +name: Create Base64 Config for CCIP Tests +description: A composite action that creates a base64-encoded config to be used by ccip integration tests + +inputs: + runId: + description: The run id + existingNamespace: + description: If test needs to run against already deployed namespace + testLogCollect: + description: Whether to always collect logs, even for passing tests + default: "false" + selectedNetworks: + description: The networks to run tests against + chainlinkImage: + description: The chainlink image to use + default: "public.ecr.aws/chainlink/chainlink" + chainlinkVersion: + description: The git commit sha to use for the image tag + upgradeImage: + description: The chainlink image to upgrade to + default: "" + upgradeVersion: + description: The git commit sha to use for the image tag + lokiEndpoint: + description: Loki push endpoint + lokiTenantId: + description: Loki tenant id + lokiBasicAuth: + description: Loki basic auth + logstreamLogTargets: + description: Where to send logs (e.g. file, loki) + grafanaUrl: + description: Grafana URL + grafanaDashboardUrl: + description: Grafana dashboard URL + grafanaBearerToken: + description: Grafana bearer token + customEvmNodes: + description: Custom EVM nodes to use in key=value format, where key is chain id and value is docker image to use. If they are provided the number of networksSelected must be equal to the number of customEvmNodes + evmNodeLogLevel: + description: Log level for the custom EVM nodes + default: "info" + +runs: + using: composite + steps: + - name: Prepare Base64 TOML override + shell: bash + id: base64-config-override + env: + RUN_ID: ${{ inputs.runId }} + SELECTED_NETWORKS: ${{ inputs.selectedNetworks }} + EXISTING_NAMESPACE: ${{ inputs.existingNamespace }} + TEST_LOG_COLLECT: ${{ inputs.testLogCollect }} + CHAINLINK_IMAGE: ${{ inputs.chainlinkImage }} + CHAINLINK_VERSION: ${{ inputs.chainlinkVersion }} + UPGRADE_IMAGE: ${{ inputs.upgradeImage }} + UPGRADE_VERSION: ${{ inputs.upgradeVersion }} + LOKI_ENDPOINT: ${{ inputs.lokiEndpoint }} + LOKI_TENANT_ID: ${{ inputs.lokiTenantId }} + LOKI_BASIC_AUTH: ${{ inputs.lokiBasicAuth }} + LOGSTREAM_LOG_TARGETS: ${{ inputs.logstreamLogTargets }} + GRAFANA_URL: ${{ inputs.grafanaUrl }} + GRAFANA_DASHBOARD_URL: ${{ inputs.grafanaDashboardUrl }} + GRAFANA_BEARER_TOKEN: ${{ inputs.grafanaBearerToken }} + CUSTOM_EVM_NODES: ${{ inputs.customEvmNodes }} + EVM_NODE_LOG_LEVEL: ${{ inputs.evmNodeLogLevel }} + run: | + echo ::add-mask::$CHAINLINK_IMAGE + function convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") + log_targets=$(convert_to_toml_array "$LOGSTREAM_LOG_TARGETS") + + if [ -n "$TEST_LOG_COLLECT" ]; then + test_log_collect=true + else + test_log_collect=false + fi + + # make sure the number of networks and nodes match + IFS=',' read -r -a networks_array <<< "$SELECTED_NETWORKS" + IFS=',' read -r -a nodes_array <<< "$CUSTOM_EVM_NODES" + + networks_count=${#networks_array[@]} + nodes_count=${#nodes_array[@]} + + # Initialize or clear CONFIG_TOML environment variable + custom_nodes_toml="" + + # Check if the number of CUSTOM_EVM_NODES is zero + if [ $nodes_count -eq 0 ]; then + echo "The number of CUSTOM_EVM_NODES is zero, won't output any custom private Ethereum network configurations." + else + if [ $networks_count -ne $nodes_count ]; then + echo "The number of elements in SELECTED_NETWORKS (${networks_count}) and CUSTOM_EVM_NODES does not match (${nodes_count})." + exit 1 + else + for i in "${!networks_array[@]}"; do + IFS='=' read -r chain_id docker_image <<< "${nodes_array[i]}" + custom_nodes_toml+=" + [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}] + ethereum_version=\"\" + execution_layer=\"\" + + [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.EthereumChainConfig] + seconds_per_slot=3 + slots_per_epoch=2 + genesis_delay=15 + validator_count=4 + chain_id=${chain_id} + addresses_to_fund=[\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\", \"0x70997970C51812dc3A010C7d01b50e0d17dc79C8\"] + node_log_level=\"${EVM_NODES_LOG_LEVEL}\" + + [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.EthereumChainConfig.HardForkEpochs] + Deneb=500 + + [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.CustomDockerImages] + execution_layer=\"${docker_image}\" + " + done + fi + fi + + grafana_bearer_token="" + if [ -n "$GRAFANA_BEARER_TOKEN" ]; then + grafana_bearer_token="bearer_token_secret=\"$GRAFANA_BEARER_TOKEN\"" + fi + + cat << EOF > config.toml + [CCIP] + [CCIP.Env] + EnvToConnect="$EXISTING_NAMESPACE" + [CCIP.Env.Network] + selected_networks = $selected_networks + [CCIP.Env.NewCLCluster] + [CCIP.Env.NewCLCluster.Common] + [CCIP.Env.NewCLCluster.Common.ChainlinkImage] + image="$CHAINLINK_IMAGE" + version="$CHAINLINK_VERSION" + + [CCIP.Env.NewCLCluster.Common.ChainlinkUpgradeImage] + image="$UPGRADE_IMAGE" + version="$UPGRADE_VERSION" + + $custom_nodes_toml + + [CCIP.Env.Logging] + test_log_collect=$test_log_collect + run_id="$RUN_ID" + + [CCIP.Env.Logging.LogStream] + log_targets=$log_targets + + [CCIP.Env.Logging.Loki] + tenant_id="$LOKI_TENANT_ID" + endpoint="$LOKI_ENDPOINT" + basic_auth_secret="$LOKI_BASIC_AUTH" + + [CCIP.Env.Logging.Grafana] + base_url="$GRAFANA_URL" + dashboard_url="$GRAFANA_DASHBOARD_URL" + $grafana_bearer_token + + [CCIP.Groups.load] + TestRunName = '$EXISTING_NAMESPACE' + + [CCIP.Groups.smoke] + TestRunName = '$EXISTING_NAMESPACE' + + EOF + + BASE64_CCIP_SECRETS_CONFIG=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CCIP_SECRETS_CONFIG + echo "BASE64_CCIP_SECRETS_CONFIG=$BASE64_CCIP_SECRETS_CONFIG" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_SECRETS_CONFIG=$BASE64_CCIP_SECRETS_CONFIG" >> $GITHUB_ENV diff --git a/.github/workflows/ccip-chaos-tests.yml b/.github/workflows/ccip-chaos-tests.yml new file mode 100644 index 00000000000..493322ae420 --- /dev/null +++ b/.github/workflows/ccip-chaos-tests.yml @@ -0,0 +1,253 @@ +name: CCIP Chaos Tests +on: + workflow_run: + workflows: [ CCIP Load Test ] + types: [ completed ] + branches: [ develop ] + workflow_dispatch: + + + +# Only run 1 of this workflow at a time per PR +concurrency: + group: chaos-ccip-tests-chainlink-${{ github.ref }} + cancel-in-progress: true + +env: + # TODO: TT-1470 - Update image names as we solidify new realease strategy + CL_ECR: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:${{ github.sha }} + MOD_CACHE_VERSION: 1 + +jobs: + build-chainlink: + environment: integration + permissions: + id-token: write + contents: read + name: Build Chainlink Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Check if image exists + id: check-image + uses: smartcontractkit/chainlink-github-actions/docker/image-exists@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + repository: chainlink + tag: ${{ github.sha }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + env: + GH_TOKEN: ${{ github.token }} + with: + cl_repo: smartcontractkit/chainlink + cl_ref: ${{ github.sha }} + push_tag: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink:${{ github.sha }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-chaos-tests-build-chainlink-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Chainlink Image + continue-on-error: true + + build-test-image: + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-chaos-tests-build-test-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + + ccip-chaos-tests: + environment: integration + permissions: + issues: read + checks: write + pull-requests: write + id-token: write + contents: read + name: CCIP Chaos Tests + runs-on: ubuntu-latest + needs: [ build-chainlink, build-test-image ] + env: + TEST_SUITE: chaos + TEST_ARGS: -test.timeout 30m + CHAINLINK_COMMIT_SHA: ${{ github.sha }} + CHAINLINK_ENV_USER: ${{ github.actor }} + TEST_TRIGGERED_BY: ccip-cron-chaos-eth + TEST_LOG_LEVEL: debug + DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable + GH_TOKEN: ${{ github.token }} + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-chaos-tests + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: CCIP Chaos Tests + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Prepare Base64 TOML override for CCIP secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + chainlinkImage: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + - name: Run Chaos Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 1h -count=1 -json -test.parallel 11 -run 'TestChaosCCIP' ./chaos 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci + test_download_vendor_packages_command: make gomod + cl_repo: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + cl_image_tag: ${{ github.sha }} + artifacts_location: ./integration-tests/chaos/logs + publish_check_name: CCIP Chaos Test Results + publish_report_paths: ./tests-chaos-report.xml + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + CGO_ENABLED: "1" + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + cache_key_id: ccip-load-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + ## Notify in slack if the job fails + - name: Notify Slack + if: failure() && github.event_name != 'workflow_dispatch' + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + with: + channel-id: "#ccip-testing" + slack-message: ":x: :mild-panic-intensifies: CCIP chaos tests failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}" + ## Run Cleanup if the job succeeds + - name: cleanup + if: always() + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/cleanup@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + + ccip-chaos-with-load-tests: + environment: integration + permissions: + issues: read + checks: write + pull-requests: write + id-token: write + contents: read + name: CCIP Load With Chaos Tests + if: false # Disabled until CCIP-2555 is resolved + runs-on: ubuntu-latest + needs: [ build-chainlink, build-test-image ] + env: + TEST_SUITE: load + TEST_ARGS: -test.timeout 1h + CHAINLINK_COMMIT_SHA: ${{ github.sha }} + CHAINLINK_ENV_USER: ${{ github.actor }} + TEST_TRIGGERED_BY: ccip-cron-chaos-and-load-eth + TEST_LOG_LEVEL: debug + DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable + GH_TOKEN: ${{ github.token }} + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-chaos-tests-with-load-test + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: CCIP load with chaos test + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Prepare Base64 TOML override for CCIP secrests + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + chainlinkImage: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/6vjVx-1V8/ccip-long-running-tests" + - name: Run Load With Chaos Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + test_command_to_run: cd ./integration-tests/ccip-tests && go test -timeout 2h -count=1 -json -test.parallel 4 -run '^TestLoadCCIPStableWithPodChaosDiffCommitAndExec' ./load 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: make gomod + cl_repo: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + cl_image_tag: ${{ github.sha }} + artifacts_location: ./integration-tests/load/logs + publish_check_name: CCIP Chaos With Load Test Results + publish_report_paths: ./tests-chaos-with-load-report.xml + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + CGO_ENABLED: "1" + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + cache_key_id: ccip-load-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + ## Notify in slack if the job fails + - name: Notify Slack + if: failure() && github.event_name != 'workflow_dispatch' + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + with: + channel-id: "#ccip-testing" + slack-message: ":x: :mild-panic-intensifies: CCIP chaos with load tests failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}" + ## Run Cleanup if the job succeeds + - name: cleanup + if: always() + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/cleanup@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + triggered_by: ${{ env.TEST_TRIGGERED_BY }} diff --git a/.github/workflows/ccip-client-compatibility-tests.yml b/.github/workflows/ccip-client-compatibility-tests.yml new file mode 100644 index 00000000000..ff0e4be25c6 --- /dev/null +++ b/.github/workflows/ccip-client-compatibility-tests.yml @@ -0,0 +1,743 @@ +name: CCIP Client Compatibility Tests +on: + schedule: + - cron: "30 5 * * TUE,FRI" # Run every Tuesday and Friday at midnight + 30min EST + push: + tags: + - "*" + merge_group: + pull_request: + workflow_dispatch: + inputs: + chainlinkVersion: + description: commit SHA or tag of the Chainlink version to test + required: true + type: string + evmImplementations: + description: comma separated list of EVM implementations to test (ignored if base64TestList is used) + required: true + type: string + default: "geth,besu,nethermind,erigon" + latestVersionsNumber: + description: how many of latest images of EVM implementations to test with (ignored if base64TestList is used) + required: true + type: number + default: 3 + base64TestList: + description: base64 encoded list of tests to run (same as base64-ed output of testlistgenerator tool) + required: false + type: string + +env: + # TODO: TT-1470 - Update image names as we solidify new realease strategy + CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/ccip + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + MOD_CACHE_VERSION: 2 + +jobs: + # Build Test Dependencies + + check-dependency-bump: + name: Check for go-ethereum dependency bump + if: github.event_name == 'pull_request' || github.event_name == 'merge_queue' + runs-on: ubuntu-latest + outputs: + dependency_changed: ${{ steps.changes.outputs.dependency_changed }} + steps: + - name: Checkout code + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + repository: smartcontractkit/chainlink + fetch-depth: 0 + - name: Check for go.mod changes + id: changes + run: | + git fetch origin ${{ github.base_ref }} + # if no match is found then grep exits with code 1, but if there is a match it exits with code 0 + # this will return a match if there are any changes on that corresponding line, for example if spacing was changed + DEPENDENCY_CHANGED=$(git diff -U0 origin/${{ github.base_ref }}...HEAD -- go.mod | grep -q 'github.com/ethereum/go-ethereum'; echo $?) + PR_VERSION=$(grep 'github.com/ethereum/go-ethereum' go.mod | awk '{print $2}') + + # here 0 means a match was found, 1 means no match was found + if [ "$DEPENDENCY_CHANGED" -eq 0 ]; then + # Dependency was changed in the PR, now compare with the base branch + git fetch origin ${{ github.base_ref }} + BASE_VERSION=$(git show origin/${{ github.base_ref }}:go.mod | grep 'github.com/ethereum/go-ethereum' | awk '{print $2}') + + echo "Base branch version: $BASE_VERSION" + echo "PR branch version: $PR_VERSION" + + echo "Dependency version changed in the PR compared to the base branch." + echo "dependency_changed=true" >> $GITHUB_OUTPUT + else + echo "No changes to ethereum/go-ethereum dependency in the PR." + echo "PR branch version: $PR_VERSION" + echo "dependency_changed=false" >> $GITHUB_OUTPUT + fi + + should-run: + if: always() + name: Check if the job should run + needs: check-dependency-bump + runs-on: ubuntu-latest + outputs: + should_run: ${{ steps.should-run.outputs.should_run }} + eth_implementations : ${{ steps.should-run.outputs.eth_implementations }} + env: + GITHUB_REF_TYPE: ${{ github.ref_type }} + steps: + - name: Check if the job should run + id: should-run + run: | + if [ "${{ needs.check-dependency-bump.outputs.dependency_changed }}" == "true" ]; then + echo "Will run tests, because go-ethereum dependency was bumped" + echo "should_run=true" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "schedule" ]; then + echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" + echo "should_run=true" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then + echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" + echo "should_run=true" >> $GITHUB_OUTPUT + elif [ "$GITHUB_REF_TYPE" = "tag" ]; then + echo "Will run tests, because new tag was created" + echo "should_run=true" >> $GITHUB_OUTPUT + else + echo "Will not run tests" + echo "should_run=false" >> $GITHUB_OUTPUT + fi + + select-versions: + if: always() && needs.should-run.outputs.should_run == 'true' + name: Select Versions + needs: should-run + runs-on: ubuntu-latest + env: + RELEASED_DAYS_AGO: 4 + GITHUB_REF_TYPE: ${{ github.ref_type }} + outputs: + evm_implementations : ${{ steps.select-implementations.outputs.evm_implementations }} + chainlink_version: ${{ steps.select-chainlink-version.outputs.chainlink_version }} + latest_image_count: ${{ steps.get-image-count.outputs.image_count }} + steps: + # ghlatestreleasechecker is a tool to check if new release is available for a given repo + - name: Set Up ghlatestreleasechecker + shell: bash + run: | + go install github.com/smartcontractkit/chainlink-testing-framework/tools/ghlatestreleasechecker@v1.0.0 + - name: Select EVM implementations to test + id: select-implementations + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then + echo "Checking for new releases" + implementations_arr=() + new_geth=$(ghlatestreleasechecker "ethereum/go-ethereum" $RELEASED_DAYS_AGO) + if [ "$new_geth" != "none" ]; then + echo "New geth release found: $new_geth" + implementations_arr+=("geth") + fi + new_besu=$(ghlatestreleasechecker "hyperledger/besu" $RELEASED_DAYS_AGO) + if [ "new_besu" != "none" ]; then + echo "New besu release found: $new_besu" + implementations_arr+=("besu") + fi + new_erigon=$(ghlatestreleasechecker "ledgerwatch/erigon" $RELEASED_DAYS_AGO) + if [ "new_erigon" != "none" ]; then + echo "New erigon release found: $new_erigon" + implementations_arr+=("erigon") + fi + new_nethermind=$(ghlatestreleasechecker "nethermindEth/nethermind" $RELEASED_DAYS_AGO) + if [ "new_nethermind" != "none" ]; then + echo "New nethermind release found: $new_nethermind" + implementations_arr+=("nethermind") + fi + IFS=',' + eth_implementations="${implementations_arr[*]}" + echo "Found new releases for: $eth_implementations" + echo "evm_implementations=$eth_implementations" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then + if [ -n "${{ github.event.inputs.base64TestList }}" ]; then + echo "Base64-ed Test Input provided, ignoring EVM implementations" + else + echo "Will test following EVM implementations: ${{ github.event.inputs.evmImplementations }}" + echo "evm_implementations=${{ github.event.inputs.evmImplementations }}" >> $GITHUB_OUTPUT + fi + else + echo "Will test all EVM implementations" + echo "evm_implementations=geth,besu,nethermind,erigon" >> $GITHUB_OUTPUT + fi + - name: Select Chainlink CCIP version + id: select-chainlink-version + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then + echo "Fetching latest Chainlink CCIP stable version" + implementations_arr=() + # we use 100 days since we really want the latest one, and it's highly improbable there won't be a release in last 100 days + chainlink_version=$(ghlatestreleasechecker "smartcontractkit/ccip" 100) + echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then + echo "Fetching Chainlink version from input" + if [ -n "${{ github.event.inputs.chainlinkVersion }}" ]; then + echo "Chainlink version provided in input" + chainlink_version="${{ github.event.inputs.chainlinkVersion }}" + else + echo "Chainlink version not provided in input. Using latest commit SHA." + chainlink_version=${{ github.sha }} + fi + echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then + echo "Fetching Chainlink version from PR's head commit" + chainlink_version="${{ github.event.pull_request.head.sha }}" + echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT + elif [ "$GITHUB_EVENT_NAME" = "merge_queue" ]; then + echo "Fetching Chainlink version from merge queue's head commit" + chainlink_version="${{ github.event.merge_group.head_sha }}" + echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT + elif [ "$GITHUB_REF_TYPE" = "tag" ]; then + echo "Fetching Chainlink version from tag" + chainlink_version="${{ github.ref_name }}" + echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT + else + echo "Unsupported trigger event. It's probably an issue with the pipeline definition. Please reach out to the Test Tooling team." + exit 1 + fi + echo "Will use following Chainlink version: $chainlink_version" + - name: Get image count + id: get-image-count + run: | + if [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then + echo "Fetching latest image count from input" + if [ -n "${{ github.event.inputs.base64TestList }}" ]; then + echo "Base64-ed Test Input provided, ignoring latest image count" + else + image_count="${{ github.event.inputs.latestVersionsNumber }}" + echo "image_count=$image_count" >> $GITHUB_OUTPUT + fi + else + echo "Fetching default latest image count" + image_count=3 + echo "image_count=$image_count" >> $GITHUB_OUTPUT + fi + echo "Will use following latest image count: $image_count" + + check-ecr-images-exist: + name: Check images used as test dependencies exist in ECR + if: always() && needs.should-run.outputs.should_run == 'true' + environment: integration + permissions: + id-token: write + contents: read + needs: [should-run] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + mirror: + - name: ethereum/client-go + expression: '^(alltools-v|v)[0-9]\.[0-9]+\.[0-9]+$' + - name: hyperledger/besu + expression: '^[0-9]+\.[0-9]+(\.[0-9]+)?$' + page_size: 300 + - name: thorax/erigon + expression: '^v[0-9]+\.[0-9]+\.[0-9]+$' + - name: nethermind/nethermind + expression: '^[0-9]+\.[0-9]+\.[0-9]+$' + - name: tofelb/ethereum-genesis-generator + expression: '^[0-9]+\.[0-9]+\.[0-9]+(\-slots\-per\-epoch)?' + steps: + - name: Update internal ECR if the latest Ethereum client image does not exist + uses: smartcontractkit/chainlink-testing-framework/.github/actions/update-internal-mirrors@5eea86ee4f7742b4e944561a570a6b268e712d9e # v1.30.3 + with: + aws_region: ${{ secrets.QA_AWS_REGION }} + role_to_assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + aws_account_number: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + image_name: ${{matrix.mirror.name}} + expression: ${{matrix.mirror.expression}} + page_size: ${{matrix.mirror.page_size}} + + build-chainlink: + if: always() && needs.should-run.outputs.should_run == 'true' + environment: integration + permissions: + id-token: write + contents: read + name: Build Chainlink Image + needs: [should-run, select-versions] + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: client-compatablility-build-chainlink + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Chainlink Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + repository: smartcontractkit/chainlink + ref: ${{ needs.select-versions.outputs.chainlink_version }} + - name: Build Chainlink Image + uses: ./.github/actions/build-chainlink-image + with: + tag_suffix: "" + dockerfile: core/chainlink.Dockerfile + git_commit_sha: ${{ needs.select-versions.outputs.chainlink_version }} + check_image_exists: 'true' + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + get-latest-available-images: + name: Get Latest EVM Implementation's Images + if: always() && needs.should-run.outputs.should_run == 'true' + environment: integration + runs-on: ubuntu-latest + needs: [check-ecr-images-exist, should-run, select-versions] + permissions: + id-token: write + contents: read + env: + LATEST_IMAGE_COUNT: ${{ needs.select-versions.outputs.latest_image_count }} + outputs: + geth_images: ${{ env.GETH_IMAGES }} + nethermind_images: ${{ env.NETHERMIND_IMAGES }} + besu_images: ${{ env.BESU_IMAGES }} + erigon_images: ${{ env.ERIGON_IMAGES }} + steps: + # Setup AWS creds + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-region: ${{ secrets.QA_AWS_REGION }} + role-to-assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + role-duration-seconds: 3600 + # Login to ECR + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 + with: + mask-password: "true" + env: + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + # ecrimagefetcher is a tool to get latest images from ECR + - name: Set Up ecrimagefetcher + shell: bash + run: | + go install github.com/smartcontractkit/chainlink-testing-framework/tools/ecrimagefetcher@v1.0.1 + - name: Get latest docker images from ECR + if: ${{ github.event.inputs.base64TestList == '' }} + env: + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then + geth_images=$(ecrimagefetcher 'ethereum/client-go' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) + echo "GETH_IMAGES=$geth_images" >> $GITHUB_ENV + echo "Geth latest images: $geth_images" + fi + + if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then + nethermind_images=$(ecrimagefetcher 'nethermind/nethermind' '^[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) + echo "NETHERMIND_IMAGES=$nethermind_images" >> $GITHUB_ENV + echo "Nethermind latest images: $nethermind_images" + fi + + if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then + # 24.3.3 is ignored as it doesn't support data & input fields in eth_call + besu_images=$(ecrimagefetcher 'hyperledger/besu' '^[0-9]+\.[0-9]+(\.[0-9]+)?$' ${{ env.LATEST_IMAGE_COUNT }} ">=24.5.1") + echo "BESU_IMAGES=$besu_images" >> $GITHUB_ENV + echo "Besu latest images: $besu_images" + fi + + if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then + # 2.60.0 and 2.60.1 are ignored as they stopped working with CL node + erigon_images=$(ecrimagefetcher 'thorax/erigon' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }} "> $GITHUB_ENV + echo "Erigon latest images: $erigon_images" + fi + + # End Build Test Dependencies + + prepare-compatibility-matrix: + name: Prepare Compatibility Matrix + if: always() && needs.should-run.outputs.should_run == 'true' + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [get-latest-available-images,should-run,select-versions] + runs-on: ubuntu-latest + env: + ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} + BASE64_TEST_LIST: ${{ github.event.inputs.base64TestList }} + outputs: + matrix: ${{ env.JOB_MATRIX_JSON }} + steps: + - name: Decode Base64 Test List Input if Set + if: env.BASE64_TEST_LIST != '' + run: | + echo "Decoding base64 tests list from the input" + DECODED_BASE64_TEST_LIST=$(echo $BASE64_TEST_LIST | base64 -d) + echo "Decoded input:" + echo "$DECODED_BASE64_TEST_LIST" + is_valid=$(echo "$DECODED_BASE64_TEST_LIST" | jq . > /dev/null 2>&1; echo $?) + if [ "$is_valid" -ne 0 ]; then + echo "Invalid base64 input. Please provide a valid base64 encoded JSON list of tests." + echo "Here is an example of valid JSON:" + cat <> $GITHUB_ENV + # testlistgenerator is a tool that builds a matrix of tests to run + - name: Set Up testlistgenerator + if: env.BASE64_TEST_LIST == '' + shell: bash + run: | + go install github.com/smartcontractkit/chainlink-testing-framework/tools/testlistgenerator@v1.1.0 + - name: Prepare matrix input + if: env.BASE64_TEST_LIST == '' + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then + echo "Will test compatibility with geth" + testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "ccip-geth-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest + else + echo "Will not test compatibility with geth" + fi + + if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then + echo "Will test compatibility with besu" + testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "ccip-besu-compatibility-test" -w "SIMULATED_BESU_NONDEV_1,SIMULATED_BESU_NONDEV_2" -c 1337,2337 -n ubuntu-latest + else + echo "Will not test compatibility with besu" + fi + + # TODO: Waiting for CCIP-2255 to be resolved + if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then + echo "Will test compatibility with erigon" + testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "ccip-erigon-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest + else + echo "Will not test compatibility with erigon" + fi + + # TODO: uncomment when nethermind flake reason is addressed + if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then + echo "Will not test compatibility with nethermind due to flakiness" + # echo "Will test compatibility with nethermind" + # testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "ccip-nethermind-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest + else + echo "Will not test compatibility with nethermind" + fi + + jq . compatibility_test_list.json + echo "Adding human-readable name" + jq 'map(. + {visible_name: (.docker_image | split(",")[0] | split("=")[1])})' compatibility_test_list.json > compatibility_test_list_modified.json + jq . compatibility_test_list_modified.json + JOB_MATRIX_JSON=$(jq -c . compatibility_test_list_modified.json) + echo "JOB_MATRIX_JSON=${JOB_MATRIX_JSON}" >> $GITHUB_ENV + + run-client-compatibility-matrix: + name: CCIP Compatibility with ${{ matrix.evm_node.visible_name }} + if: always() && needs.should-run.outputs.should_run == 'true' + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-chainlink, prepare-compatibility-matrix, should-run, select-versions] + env: + CHAINLINK_COMMIT_SHA: ${{ needs.select-versions.outputs.chainlink_version }} + CHAINLINK_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + evm_node: ${{fromJson(needs.prepare-compatibility-matrix.outputs.matrix)}} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + repository: smartcontractkit/chainlink + ref: ${{ needs.select-versions.outputs.chainlink_version }} + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: ${{ matrix.evm_node.networks }} + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ needs.select-versions.outputs.chainlink_version }} + pyroscopeServer: ${{ !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-ccip-bidirectional-lane-${{ matrix.evm_node.name }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL_CI }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + - name: Prepare Base64 TOML override for CCIP secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: ${{ matrix.evm_node.networks }} + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ needs.select-versions.outputs.chainlink_version }} + lokiEndpoint: ${{ secrets.LOKI_URL_CI }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + customEvmNodes: ${{ matrix.evm_node.docker_image }} + evmNodeLogLevel: "trace" + - name: Prepare test log name + run: | + replace_special_chars() { + if [ -z "$1" ]; then + echo "Please provide a string as an argument." + return 1 + fi + + local input_string="$1" + + # Replace '/' with '-' + local modified_string="${input_string//\//-}" + + # Replace ':' with '-' + modified_string="${modified_string//:/-}" + + # Replace '.' with '-' + modified_string="${modified_string//./-}" + + echo "$modified_string" + } + echo "TEST_LOG_NAME=$(replace_special_chars "ccip-${{ matrix.evm_node.name }}-test-logs")" >> $GITHUB_ENV + - name: Print Test details - ${{ matrix.evm_node.docker_image }} + run: | + echo "EVM Implementation Docker Image: ${{ matrix.evm_node.docker_image }}" + echo "EVM Implementation Networks: ${{ matrix.evm_node.networks }}" + echo "Test identifier: ${{ matrix.evm_node.name }}" + - name: Run Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@fc3e0df622521019f50d772726d6bf8dc919dd38 # v2.3.19 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=2 ${{ matrix.evm_node.run }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.CHAINLINK_IMAGE }} + cl_image_tag: ${{ needs.select-versions.outputs.chainlink_version }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: ${{ env.TEST_LOG_NAME }} + artifacts_location: | + ./integration-tests/smoke/logs/ + ./integration-tests/ccip-tests/smoke/logs/* + /tmp/gotest.log + publish_check_name: ${{ matrix.evm_node.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + should_tidy: "false" + - name: Print failed test summary + if: always() + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@1587f59bfd626b668d303abbc90fee41b12397e6 # v2.3.23 + with: + test_directories: ./integration-tests/smoke/,./integration-tests/ccip-tests/smoke/ + + start-slack-thread: + name: Start Slack Thread + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' }} + environment: integration + outputs: + thread_ts: ${{ steps.slack.outputs.thread_ts }} + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [run-client-compatibility-matrix,should-run,select-versions] + steps: + - name: Debug Result + run: echo ${{ join(needs.*.result, ',') }} + - name: Main Slack Notification + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + id: slack + with: + channel-id: ${{ secrets.QA_CCIP_SLACK_CHANNEL }} + payload: | + { + "attachments": [ + { + "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "CCIP Compatibility Test Results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", + "emoji": true + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ contains(join(needs.*.result, ','), 'failure') && 'Some tests failed! Notifying ' || 'All Good!' }}" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.select-versions.outputs.chainlink_version }}|${{ needs.select-versions.outputs.chainlink_version }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" + } + } + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + + parse-test-results: + name: Parse Test Results + if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [run-client-compatibility-matrix,should-run] + outputs: + base64_parsed_results: ${{ steps.get-test-results.outputs.base64_parsed_results }} + steps: + # workflowresultparser is a tool to get job results from a workflow run + - name: Set Up workflowresultparser + shell: bash + run: | + go install github.com/smartcontractkit/chainlink-testing-framework/tools/workflowresultparser@v1.0.0 + - name: Get and parse Test Results + shell: bash + id: get-test-results + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^CCIP Compatibility with (.*)$" -namedKey="CCIP" -outputFile=output.json + + echo "base64_parsed_results=$(base64 -w 0 output.json)" >> $GITHUB_OUTPUT + + display-test-results: + name: Aggregated test results + if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && needs.parse-test-results.result == 'success' + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [start-slack-thread, should-run, select-versions, parse-test-results] + steps: + # asciitable is a tool that prints results in a nice ASCII table + - name: Set Up asciitable + shell: bash + run: | + go install github.com/smartcontractkit/chainlink-testing-framework/tools/asciitable@v1.0.2 + - name: Print aggregated test results + shell: bash + run: | + PATH=$PATH:$(go env GOPATH)/bin + export PATH + + raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" + echo $raw_results > input.json + asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "CCIP" --namedKey "CCIP" + + echo + echo "AGGREGATED RESULTS" + cat output.txt + + echo "## Aggregated EVM Implementations compatibility results summary" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + cat output.txt >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + post-test-results-to-slack: + name: Post Test Results + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [start-slack-thread,should-run,select-versions] + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + ref: ${{ needs.select-versions.outputs.chainlink_version }} + - name: Get test results for CCIP + id: get-product-results + shell: bash + run: | + raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" + product_result=$(echo "$raw_results" | jq -c "select(has(\"CCIP\")) | .CCIP[]") + if [ -n "$product_result" ]; then + base64_result=$(echo $product_result | base64 -w 0) + echo "base64_result=$base64_result" >> $GITHUB_OUTPUT + else + echo "No results found for CCIP" + echo "base64_result=" >> $GITHUB_OUTPUT + fi + - name: Post Test Results to Slack + uses: ./.github/actions/notify-slack-jobs-result + with: + github_token: ${{ github.token }} + github_repository: ${{ github.repository }} + workflow_run_id: ${{ github.run_id }} + github_job_name_regex: ^CCIP Compatibility with (.*?)$ + message_title: CCIP Compatibility Test Results + slack_channel_id: ${{ secrets.QA_CCIP_SLACK_CHANNEL }} + slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} + slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} + base64_parsed_results: ${{ steps.get-product-results.outputs.base64_result }} diff --git a/.github/workflows/ccip-live-network-tests.yml b/.github/workflows/ccip-live-network-tests.yml new file mode 100644 index 00000000000..fa24614c8eb --- /dev/null +++ b/.github/workflows/ccip-live-network-tests.yml @@ -0,0 +1,312 @@ +name: CCIP Live Network Tests +on: + schedule: + - cron: '0 */6 * * *' + workflow_dispatch: + inputs: + base64_test_input : # base64 encoded toml for test input + description: 'Base64 encoded toml test input' + required: false + slackMemberID: + description: 'Slack member ID to notify' + required: false + test_type: + description: 'Type of test to run' + required: false + type: choice + options: + - 'load' + - 'smoke' + +# Only run 1 of this workflow at a time per PR +concurrency: + group: live-testnet-tests + cancel-in-progress: true + +env: + # TODO: TT-1470 - Update image names as we solidify new realease strategy + CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + CHAINLINK_VERSION: ${{ github.sha}} + CHAINLINK_TEST_VERSION: ${{ github.sha}} + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:${{ github.sha }} + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + AWS_ECR_REPO_PUBLIC_REGISTRY: public.ecr.aws + +jobs: + build-chainlink: + environment: integration + permissions: + id-token: write + contents: read + name: Build Chainlink Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Check if image exists + id: check-image + uses: smartcontractkit/chainlink-github-actions/docker/image-exists@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + with: + repository: chainlink + tag: ${{ env.CHAINLINK_VERSION }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + env: + GH_TOKEN: ${{ github.token }} + with: + cl_repo: smartcontractkit/chainlink-ccip + cl_ref: ${{ env.CHAINLINK_VERSION }} + push_tag: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink:${{ env.CHAINLINK_VERSION }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-on-demand-live-testnet-tests-build-chainlink-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Chainlink Image + continue-on-error: true + + build-test-image: + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-on-demand-live-testnet-tests-build-test-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + tag: ${{ env.CHAINLINK_TEST_VERSION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + + ccip-load-test: + name: CCIP Load Test + environment: integration + runs-on: ubuntu-latest + strategy: + matrix: + config: [mainnet.toml] + needs: [ build-chainlink, build-test-image ] + # if the event is a scheduled event or the test type is load and no previous job failed + if: ${{ (github.event_name == 'schedule' || inputs.test_type == 'load') && !contains(needs.*.result, 'failure') }} + permissions: + issues: read + checks: write + pull-requests: write + id-token: write + contents: read + env: + CHAINLINK_ENV_USER: ${{ github.actor }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_SLACK_CHANNEL }} + TEST_LOG_LEVEL: info + REF_NAME: ${{ github.head_ref || github.ref_name }} + ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests + BASE64_NETWORK_CONFIG: ${{ secrets.BASE64_NETWORK_CONFIG }} + + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-on-demand-live-testnet-tests-load-tests + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: CCIP Load Test + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + ref: ${{ env.REF_NAME }} + - name: Prepare Base64 TOML override + shell: bash + run: | + # this key secrets.QA_SHARED_803C_KEY has a story behind it. To know more, see CCIP-2875 and SECHD-16575 tickets. + BASE64_NETWORK_CONFIG=$(echo $BASE64_NETWORK_CONFIG | base64 -w 0 -d | sed -e 's/evm_key/${{ secrets.QA_SHARED_803C_KEY }}/g' | base64 -w 0) + echo ::add-mask::$BASE64_NETWORK_CONFIG + echo "BASE64_NETWORK_CONFIG=$BASE64_NETWORK_CONFIG" >> "$GITHUB_ENV" + SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) + echo ::add-mask::$SLACK_USER + echo "SLACK_USER=$SLACK_USER" >> "$GITHUB_ENV" + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + BASE64_CCIP_CONFIG_OVERRIDE=$(jq -r '.inputs.base64_test_input' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CCIP_CONFIG_OVERRIDE + echo "BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + fi + if [[ "${{ github.event_name }}" == "schedule" ]]; then + BASE64_CCIP_CONFIG_OVERRIDE=$(base64 -w 0 -i ./integration-tests/ccip-tests/testconfig/override/${{ matrix.config }}) + echo ::add-mask::$BASE64_CCIP_CONFIG_OVERRIDE + echo "BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + echo "SLACK_USER=${{ secrets.QA_SLACK_USER }}" >> $GITHUB_ENV + fi + - name: step summary + shell: bash + run: | + echo "### chainlink image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.CHAINLINK_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + echo "### chainlink-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.CHAINLINK_TEST_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + - name: Prepare Base64 TOML override for CCIP secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/6vjVx-1V8/ccip-long-running-tests" + - name: Run Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + env: + TEST_SUITE: load + TEST_ARGS: -test.timeout 900h + DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable + RR_MEM: 8Gi + RR_CPU: 4 + DETACH_RUNNER: true + TEST_TRIGGERED_BY: ccip-load-test-ci + with: + test_command_to_run: cd ./integration-tests/ccip-tests && go test -v -timeout 70m -count=1 -json -run ^TestLoadCCIPStableRPS$ ./load 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.CHAINLINK_IMAGE }} + cl_image_tag: ${{ env.CHAINLINK_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + artifacts_location: ./integration-tests/load/logs/payload_ccip.json + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + cache_key_id: ccip-load-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + should_cleanup: false + + ccip-smoke-test: + name: CCIP smoke Test + environment: integration + runs-on: ubuntu-latest + needs: [ build-chainlink, build-test-image ] + # if the event is a scheduled event or the test type is load and no previous job failed + if: ${{ github.event_name == 'workflow_dispatch' && inputs.test_type == 'smoke' && !contains(needs.*.result, 'failure') }} + permissions: + issues: read + checks: write + pull-requests: write + id-token: write + contents: read + env: + CHAINLINK_ENV_USER: ${{ github.actor }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_SLACK_CHANNEL }} + TEST_LOG_LEVEL: info + REF_NAME: ${{ github.head_ref || github.ref_name }} + ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests + BASE64_NETWORK_CONFIG: ${{ secrets.BASE64_NETWORK_CONFIG }} + + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-on-demand-live-testnet-tests-smoke-test + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: CCIP Smoke Test + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + ref: ${{ env.REF_NAME }} + - name: Prepare Base64 TOML override + shell: bash + run: | + BASE64_NETWORK_CONFIG=$(echo $BASE64_NETWORK_CONFIG | base64 -w 0 -d | sed -e 's/evm_key/${{ secrets.QA_SHARED_803C_KEY }}/g' | base64 -w 0) + echo ::add-mask::$BASE64_NETWORK_CONFIG + echo "BASE64_NETWORK_CONFIG=$BASE64_NETWORK_CONFIG" >> "$GITHUB_ENV" + SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) + echo ::add-mask::$SLACK_USER + echo "SLACK_USER=$SLACK_USER" >> "$GITHUB_ENV" + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + BASE64_CCIP_CONFIG_OVERRIDE=$(jq -r '.inputs.base64_test_input' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CCIP_CONFIG_OVERRIDE + echo "BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + fi + - name: step summary + shell: bash + run: | + echo "### chainlink image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.CHAINLINK_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + echo "### chainlink-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.CHAINLINK_TEST_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + - name: Prepare Base64 TOML override for CCIP secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + - name: Run Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 + env: + TEST_SUITE: smoke + TEST_ARGS: -test.timeout 900h + DETACH_RUNNER: true + DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable + RR_MEM: 8Gi + RR_CPU: 4 + TEST_TRIGGERED_BY: ccip-smoke-test-ci + with: + test_command_to_run: cd ./integration-tests/ccip-tests && go test -v -timeout 70m -count=1 -p 30 -json -run ^TestSmokeCCIPForBidirectionalLane$ ./smoke 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.CHAINLINK_IMAGE }} + cl_image_tag: ${{ env.CHAINLINK_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + artifacts_location: ./integration-tests/smoke/logs/payload_ccip.json + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + cache_key_id: ccip-smoke-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + should_cleanup: false \ No newline at end of file diff --git a/.github/workflows/ccip-load-tests.yml b/.github/workflows/ccip-load-tests.yml new file mode 100644 index 00000000000..8ddaba1199c --- /dev/null +++ b/.github/workflows/ccip-load-tests.yml @@ -0,0 +1,289 @@ +name: CCIP Load Test +on: + push: + branches: + - develop + tags: + - '*' + workflow_dispatch: + inputs: + base64_test_input: # base64 encoded toml for test input + description: 'Base64 encoded toml test input' + required: false + +# Only run 1 of this workflow at a time per PR +concurrency: + group: load-ccip-tests-chainlink-${{ github.ref }} + cancel-in-progress: true + +env: + # TODO: TT-1470 - Update image names as we solidify new realease strategy + CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink + CHAINLINK_VERSION: ${{ github.sha}} + INPUT_CHAINLINK_TEST_VERSION: ${{ github.sha}} + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:${{ github.sha }} + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + AWS_ECR_REPO_PUBLIC_REGISTRY: public.ecr.aws + MOD_CACHE_VERSION: 1 + +jobs: + build-chainlink: + environment: integration + permissions: + id-token: write + contents: read + name: Build Chainlink Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Check if image exists + id: check-image + uses: smartcontractkit/chainlink-github-actions/docker/image-exists@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + with: + repository: chainlink + tag: ${{ env.CHAINLINK_VERSION }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + env: + GH_TOKEN: ${{ github.token }} + with: + cl_repo: smartcontractkit/chainlink + cl_ref: ${{ env.CHAINLINK_VERSION }} + push_tag: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink:${{ env.CHAINLINK_VERSION }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-load-test-build-chainlink-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Chainlink Image + continue-on-error: true + + build-test-image: + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-load-test-build-test-image + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + tag: ${{ env.INPUT_CHAINLINK_TEST_VERSION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + + ccip-load-test: + environment: integration + needs: [ build-chainlink, build-test-image ] + if: ${{ always() && !contains(needs.*.result, 'failure') }} + permissions: + issues: read + checks: write + pull-requests: write + id-token: write + contents: read + env: + CHAINLINK_ENV_USER: ${{ github.actor }} + SLACK_USER: ${{ inputs.slackMemberID }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_SLACK_CHANNEL }} + TEST_LOG_LEVEL: info + REF_NAME: ${{ github.head_ref || github.ref_name }} + BASE64_NETWORK_CONFIG: ${{ secrets.BASE64_NETWORK_CONFIG }} + strategy: + fail-fast: false + matrix: + type: + - name: stable-load + run: ^TestLoadCCIPStableRPS$ + os: ubuntu-latest + - name: load-with-arm-curse-uncurse + run: ^TestLoadCCIPStableRPSAfterARMCurseAndUncurse$ + config_path: ./integration-tests/ccip-tests/testconfig/tomls/load-with-arm-curse-uncurse.toml + os: ubuntu-latest + runs-on: ${{ matrix.type.os }} + name: CCIP ${{ matrix.type.name }} + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 + with: + id: ccip-load-test-${{ matrix.type.name }} + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: CCIP ${{ matrix.type.name }} + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + ref: ${{ env.REF_NAME }} + - name: Sets env vars + shell: bash + run: | + # if the matrix.type.config_path is set, use it as the override config + if [ -n "${{ matrix.type.config_path }}" ]; then + echo "BASE64_CCIP_CONFIG_OVERRIDE=$(base64 -w 0 -i ${{ matrix.type.config_path }})" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_CONFIG_OVERRIDE=$(base64 -w 0 -i ${{ matrix.type.config_path }})" >> $GITHUB_ENV + fi + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + BASE64_CCIP_CONFIG_OVERRIDE=$(jq -r '.inputs.base64_test_input' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CCIP_CONFIG_OVERRIDE + if [ -n "${BASE64_CCIP_CONFIG_OVERRIDE}" && "$BASE64_CCIP_CONFIG_OVERRIDE" != "null"]; then + echo "BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + echo "TEST_BASE64_CCIP_CONFIG_OVERRIDE=$BASE64_CCIP_CONFIG_OVERRIDE" >> $GITHUB_ENV + fi + fi + - name: step summary + shell: bash + run: | + echo "### chainlink image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.CHAINLINK_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + echo "### chainlink-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.INPUT_CHAINLINK_TEST_VERSION }}\`" >> $GITHUB_STEP_SUMMARY + - name: Prepare Base64 TOML override for CCIP secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/6vjVx-1V8/ccip-long-running-tests" + - name: Run Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@b49a9d04744b0237908831730f8553f26d73a94b # v2.3.17 + env: + TEST_SUITE: load + TEST_ARGS: -test.timeout 900h + DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable + RR_MEM: 8Gi + RR_CPU: 4 + TEST_TRIGGERED_BY: ccip-load-test-ci-${{ matrix.type.name }} + with: + test_command_to_run: cd ./integration-tests/ccip-tests && go test -v -timeout 70m -count=1 -json -run ${{ matrix.type.run }} ./load 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.CHAINLINK_IMAGE }} + cl_image_tag: ${{ env.CHAINLINK_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + triggered_by: ${{ env.TEST_TRIGGERED_BY }} + publish_check_name: ${{ matrix.type.name }} + artifacts_location: ./integration-tests/load/logs/payload_ccip.json + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + cache_key_id: ccip-load-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + should_cleanup: "true" + + # Reporting Jobs + start-slack-thread: + name: Start Slack Thread + if: ${{ failure() && needs.ccip-load-test.result != 'skipped' && needs.ccip-load-test.result != 'cancelled' }} + environment: integration + outputs: + thread_ts: ${{ steps.slack.outputs.thread_ts }} + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [ccip-load-test] + steps: + - name: Debug Result + run: echo ${{ join(needs.*.result, ',') }} + - name: Main Slack Notification + uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 + id: slack + with: + channel-id: "#ccip-testing" + payload: | + { + "attachments": [ + { + "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "CCIP load tests results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${{ github.server_url }}/${{ github.repository }}/${{contains(github.ref_name, 'release') && 'releases/tag' || 'tree'}}/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" + } + } + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + + post-test-results-to-slack: + name: Post Test Results + if: ${{ failure() && needs.start-slack-thread.result != 'skipped' && needs.start-slack-thread.result != 'cancelled' }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: start-slack-thread + steps: + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Post Test Results + uses: ./.github/actions/notify-slack-jobs-result + with: + github_token: ${{ github.token }} + github_repository: ${{ github.repository }} + workflow_run_id: ${{ github.run_id }} + github_job_name_regex: ^CCIP (.*)$ + message_title: CCIP Jobs + slack_channel_id: "#ccip-testing" + slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} + slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} + + # End Reporting Jobs diff --git a/.github/workflows/chain-selectors-check.yml b/.github/workflows/chain-selectors-check.yml new file mode 100644 index 00000000000..c2b58e68d44 --- /dev/null +++ b/.github/workflows/chain-selectors-check.yml @@ -0,0 +1,39 @@ +name: Chain Selectors Version Check + +on: + push: + branches: + - develop + - release/* + tags: + - v* + pull_request: + branches: + - release/* + + +jobs: + verify-version: + runs-on: ubuntu-latest + steps: + - name: Checkout Repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + + - name: Setup Go + uses: ./.github/actions/setup-go + with: + only-modules: true + go-version-file: "go.mod" + + - name: Get chain-selectors version + id: get-chain-selectors-version + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + current_chain_selector_version=$(go list -m -f '{{.Version}}' github.com/smartcontractkit/chain-selectors) + latest_chain_selector_version=$(gh release view -R smartcontractkit/chain-selectors --json tagName --jq '.tagName') + if [[ "$current_chain_selector_version" != "$latest_chain_selector_version" ]]; then + echo "::error:: Chain Selectors version mismatch. Current version: $current_chain_selector_version, Latest version: $latest_chain_selector_version" + exit 1 + fi diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index fd5784df8c0..d821b20a30b 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -511,7 +511,323 @@ jobs: - name: Print failed test summary if: always() - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@70ccaef155381025e411cf7cd1fa5ef8f668ed75 # v2.3.25 + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25 + + eth-smoke-tests-matrix-ccip: + if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} + environment: integration + permissions: + actions: read + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-chainlink, changes, build-lint-integration-tests] + env: + SELECTED_NETWORKS: SIMULATED + CHAINLINK_COMMIT_SHA: ${{ inputs.evm-ref || github.sha }} + CHAINLINK_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + product: + - name: ccip-smoke + nodes: 1 + os: ubuntu-latest + file: ccip + dir: ccip-tests/smoke + run: -run ^TestSmokeCCIPForBidirectionalLane$ + - name: ccip-smoke-1.4-pools + nodes: 1 + os: ubuntu-latest + file: ccip + dir: ccip-tests/smoke + run: -run ^TestSmokeCCIPForBidirectionalLane$ + config_path: ./integration-tests/ccip-tests/testconfig/tomls/contract-version1.4.toml + - name: ccip-smoke-usdc + nodes: 1 + os: ubuntu-latest + file: ccip + dir: ccip-tests/smoke + run: -run ^TestSmokeCCIPForBidirectionalLane$ + config_path: ./integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml + - name: ccip-smoke-db-compatibility + nodes: 1 + os: ubuntu-latest + file: ccip + dir: ccip-tests/smoke + run: -run ^TestSmokeCCIPForBidirectionalLane$ + config_path: ./integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml + - name: ccip-smoke-rate-limit + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPRateLimit$ + - name: ccip-smoke-rate-limit + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPTokenPoolRateLimits$ + - name: ccip-smoke-multicall + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPMulticall$ + - name: ccip-smoke-manual-exec + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas$ + - name: ccip-smoke-on-ramp-limits + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPOnRampLimits$ + - name: ccip-smoke-off-ramp-capacity + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPOffRampCapacityLimit$ + - name: ccip-smoke-off-ramp-agg-rate-limit + nodes: 1 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPOffRampAggRateLimit$ + - name: ccip-smoke-leader-lane + nodes: 15 + dir: ccip-tests/smoke + os: ubuntu-latest + file: ccip + run: -run ^TestSmokeCCIPForBidirectionalLane$ + config_path: ./integration-tests/ccip-tests/testconfig/tomls/leader-lane.toml + runs-on: ${{ matrix.product.os }} + name: ETH Smoke Tests ${{ matrix.product.name }}${{ matrix.product.tag_suffix }} + steps: + # Handy for debugging resource usage + # - name: Collect Workflow Telemetry + # uses: catchpoint/workflow-telemetry-action@v2 + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 + with: + id: ${{ env.COLLECTION_ID }}-matrix-${{ matrix.product.id }} + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ETH Smoke Tests ${{ matrix.product.name }}${{ matrix.product.tag_suffix }} + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + with: + repository: smartcontractkit/chainlink + ref: ${{ inputs.cl_ref || github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Go Test Command + id: build-go-test-command + run: | + # if dir is provided use it, otherwise use the smoke dir + if [ "${{ matrix.product.dir }}" != "" ]; then + dir=${{ matrix.product.dir }} + else + dir=smoke + fi + # if the matrix.product.run is set, use it for a different command + if [ "${{ matrix.product.run }}" != "" ]; then + echo "run_command=${{ matrix.product.run }} ./${dir}/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" + else + echo "run_command=./${dir}/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" + fi + - name: Check for "enable tracing" label + id: check-label + run: | + label=$(jq -r '.pull_request.labels[]?.name // empty' "$GITHUB_EVENT_PATH") + + if [[ -n "$label" ]]; then + if [[ "$label" == "enable tracing" ]]; then + echo "Enable tracing label found." + echo "trace=true" >> $GITHUB_OUTPUT + else + echo "Enable tracing label not found." + echo "trace=false" >> $GITHUB_OUTPUT + fi + else + echo "No labels present or labels are null." + echo "trace=false" >> $GITHUB_OUTPUT + fi + + - name: Setup Grafana and OpenTelemetry + id: docker-setup + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + # Create network + docker network create --driver bridge tracing + + # Make trace directory + cd integration-tests/smoke/ + mkdir ./traces + chmod -R 777 ./traces + + # Switch directory + cd ../../.github/tracing + + # Create a Docker volume for traces + # docker volume create otel-traces + + # Start OpenTelemetry Collector + # Note the user must be set to the same user as the runner for the trace data to be accessible + docker run -d --network=tracing --name=otel-collector \ + -v $PWD/otel-collector-ci.yaml:/etc/otel-collector.yaml \ + -v $PWD/../../integration-tests/smoke/traces:/tracing \ + --user "$(id -u):$(id -g)" \ + -p 4317:4317 otel/opentelemetry-collector:0.88.0 --config=/etc/otel-collector.yaml + + - name: Locate Docker Volume + id: locate-volume + if: false + run: | + echo "VOLUME_PATH=$(docker volume inspect --format '{{ .Mountpoint }}' otel-traces)" >> $GITHUB_OUTPUT + + - name: Show Otel-Collector Logs + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + docker logs otel-collector + + - name: Set Override Config + id: set_override_config + run: | + # if the matrix.product.config_path is set, use it as the override config + if [ "${{ matrix.product.config_path }}" != "" ]; then + echo "base_64_override=$(base64 -w 0 -i ${{ matrix.product.config_path }})" >> "$GITHUB_OUTPUT" + fi + + - name: Setup GAP for Grafana + uses: smartcontractkit/.github/actions/setup-gap@d316f66b2990ea4daa479daa3de6fc92b00f863e # setup-gap@0.3.2 + id: setup-gap + with: + # aws inputs + aws-region: ${{ secrets.AWS_REGION }} + aws-role-arn: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} + api-gateway-host: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} + # other inputs + duplicate-authorization-header: "true" + + - name: Prepare Base64 CCIP TOML secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ${{ matrix.product.pyroscope_env }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: SIMULATED_1,SIMULATED_2 + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: https://${{ secrets.GRAFANA_INTERNAL_HOST }}/loki/api/v1/push + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: "http://localhost:8080/primary" + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + grafanaBearerToken: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} + + ## Run this step when changes that require tests to be run are made + - name: Run Tests + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@aa8eea635029ab8d95abd3c206f56dae1e22e623 # v2.3.28 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false -hidepassinglogs + test_download_vendor_packages_command: cd ./integration-tests && go mod download + test_config_chainlink_version: ${{ inputs.evm-ref || github.sha }} + test_config_selected_networks: ${{ env.SELECTED_NETWORKS }} + test_config_logging_run_id: ${{ github.run_id }} + test_config_logstream_log_targets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + test_config_test_log_collect: ${{ vars.TEST_LOG_COLLECT }} + cl_repo: ${{ env.CHAINLINK_IMAGE }} + cl_image_tag: ${{ inputs.evm-ref || github.sha }}${{ matrix.product.tag_suffix }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}-test-logs + artifacts_location: | + ./integration-tests/smoke/logs/ + ./integration-tests/smoke/db_dumps/ + /tmp/gotest.log + publish_check_name: ${{ matrix.product.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + should_tidy: "false" + go_coverage_src_dir: /var/tmp/go-coverage + go_coverage_dest_dir: ${{ github.workspace }}/.covdata + DEFAULT_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }} + DEFAULT_LOKI_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + DEFAULT_LOKI_ENDPOINT: https://${{ secrets.GRAFANA_INTERNAL_HOST }}/loki/api/v1/push + DEFAULT_LOKI_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + DEFAULT_GRAFANA_BASE_URL: "http://localhost:8080/primary" + DEFAULT_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + DEFAULT_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} + DEFAULT_PYROSCOPE_SERVER_URL: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + DEFAULT_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + DEFAULT_PYROSCOPE_ENVIRONMENT: ${{ matrix.product.pyroscope_env }} + DEFAULT_PYROSCOPE_ENABLED: ${{ matrix.product.pyroscope_env == '' || !startsWith(github.ref, 'refs/tags/') && 'false' || 'true' }} + + - name: Upload Coverage Data + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + timeout-minutes: 2 + continue-on-error: true + with: + name: cl-node-coverage-data-${{ matrix.product.name }}-${{ matrix.product.tag_suffix }} + path: .covdata + retention-days: 1 + + # Run this step when changes that do not need the test to run are made + - name: Run Setup + if: needs.changes.outputs.src == 'false' + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25 + with: + test_download_vendor_packages_command: cd ./integration-tests && go mod download + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + should_tidy: "false" + + - name: Show Otel-Collector Logs + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + docker logs otel-collector + + - name: Permissions on traces + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + ls -l ./integration-tests/smoke/traces + + - name: Upload Trace Data + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: trace-data + path: ./integration-tests/smoke/traces/trace-data.json + + - name: Print failed test summary + if: always() + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25 + with: + test_directories: ./integration-tests/smoke/ + eth-smoke-tests-matrix: if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} @@ -622,11 +938,17 @@ jobs: - name: Build Go Test Command id: build-go-test-command run: | + # if dir is provided use it, otherwise use the smoke dir + if [ "${{ matrix.product.dir }}" != "" ]; then + dir=${{ matrix.product.dir }} + else + dir=smoke + fi # if the matrix.product.run is set, use it for a different command if [ "${{ matrix.product.run }}" != "" ]; then - echo "run_command=${{ matrix.product.run }} ./smoke/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" + echo "run_command=${{ matrix.product.run }} ./${dir}/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" else - echo "run_command=./smoke/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" + echo "run_command=./${dir}/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" fi - name: Check for "enable tracing" label id: check-label @@ -682,6 +1004,14 @@ jobs: if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' run: | docker logs otel-collector + + - name: Set Override Config + id: set_override_config + run: | + # if the matrix.product.config_path is set, use it as the override config + if [ "${{ matrix.product.config_path }}" != "" ]; then + echo "base_64_override=$(base64 -w 0 -i ${{ matrix.product.config_path }})" >> "$GITHUB_OUTPUT" + fi - name: Setup GAP for Grafana uses: smartcontractkit/.github/actions/setup-gap@d316f66b2990ea4daa479daa3de6fc92b00f863e # setup-gap@0.3.2 @@ -694,6 +1024,25 @@ jobs: # other inputs duplicate-authorization-header: "true" + - name: Prepare Base64 CCIP TOML secrets + uses: ./.github/actions/setup-create-base64-config-ccip + with: + runId: ${{ github.run_id }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ${{ matrix.product.pyroscope_env }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: SIMULATED_1,SIMULATED_2 + chainlinkImage: ${{ env.CHAINLINK_IMAGE }} + chainlinkVersion: ${{ github.sha }} + lokiEndpoint: https://${{ secrets.GRAFANA_INTERNAL_HOST }}/loki/api/v1/push + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: "http://localhost:8080/primary" + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + grafanaBearerToken: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} + ## Run this step when changes that require tests to be run are made - name: Run Tests if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' @@ -811,6 +1160,30 @@ jobs: matrix-aggregator-status: ${{ needs.eth-smoke-tests-matrix.result }} continue-on-error: true + eth-smoke-tests-ccip: + if: always() + runs-on: ubuntu-latest + name: ETH Smoke Tests CCIP + needs: eth-smoke-tests-matrix-ccip + steps: + - name: Check smoke test matrix status + if: needs.eth-smoke-tests-matrix-ccip.result != 'success' + run: | + echo "ETH Smoke Tests CCIP: ${{ needs.eth-smoke-tests-matrix-ccip.result }}" + exit 1 + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 + with: + id: ${{ env.COLLECTION_ID }}-matrix-results-ccip + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ETH Smoke Tests CCIP + matrix-aggregator-status: ${{ needs.eth-smoke-tests-matrix-ccip.result }} + continue-on-error: true + cleanup: name: Clean up integration environment deployments if: always() diff --git a/.gitignore b/.gitignore index 07dc8baa13a..00962a94a39 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,8 @@ integration-tests/**/traces/ benchmark_report.csv benchmark_summary.json integration-tests/citool/output.csv +secrets.toml +tmp_laneconfig/ # goreleaser builds cosign.* diff --git a/integration-tests/ccip-tests/Makefile b/integration-tests/ccip-tests/Makefile new file mode 100644 index 00000000000..5a40f7ca0f6 --- /dev/null +++ b/integration-tests/ccip-tests/Makefile @@ -0,0 +1,70 @@ +## To Override the default config, and secret config: +# example usage: make set_config override_toml=../config/config.toml secret_toml=../config/secret.toml network_config_toml=../config/network.toml +.PHONY: set_config +set_config: + if [ -s "$(override_toml)" ]; then \ + echo "Overriding config with $(override_toml)"; \ + echo "export BASE64_CCIP_CONFIG_OVERRIDE=$$(base64 -i $(override_toml))" > ./testconfig/override/.env; \ + echo "export TEST_BASE64_CCIP_CONFIG_OVERRIDE=$$(base64 -i $(override_toml))" >> ./testconfig/override/.env; \ + else \ + echo "No override config found, using default config"; \ + echo > ./testconfig/override/.env; \ + fi + if [ -s "$(network_config_toml)" ]; then \ + echo "Overriding network config with $(network_config_toml)"; \ + echo "export BASE64_NETWORK_CONFIG=$$(base64 -i $(network_config_toml))" >> ./testconfig/override/.env; \ + fi + + @echo "setting secret config with $(secret_toml)" + @echo "export BASE64_CCIP_SECRETS_CONFIG=$$(base64 -i $(secret_toml))" >> ./testconfig/override/.env + @echo "export TEST_BASE64_CCIP_SECRETS_CONFIG=$$(base64 -i $(secret_toml))" >> ./testconfig/override/.env + @echo "BASE64_CCIP_SECRETS_CONFIG=$$(base64 -i $(secret_toml))" > ./testconfig/override/debug.env + @echo "TEST_BASE64_CCIP_SECRETS_CONFIG=$$(base64 -i $(secret_toml))" >> ./testconfig/override/debug.env + + +# example usage: make test_load_ccip testimage=chainlink-ccip-tests:latest testname=TestLoadCCIPStableRPS override_toml=./testconfig/override/config.toml secret_toml=./testconfig/tomls/secrets.toml network_config_toml=../config/network.toml +.PHONY: test_load_ccip +test_load_ccip: set_config + source ./testconfig/override/.env && \ + DATABASE_URL=postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable \ + ENV_JOB_IMAGE=$(testimage) \ + TEST_SUITE=load \ + TEST_ARGS="-test.timeout 900h" \ + DETACH_RUNNER=true \ + RR_MEM=16Gi \ + RR_CPU=4 \ + go test -timeout 24h -count=1 -v -run ^$(testname)$$ ./load + + +# example usage: make test_smoke_ccip testimage=chainlink-ccip-tests:latest testname=TestSmokeCCIPForBidirectionalLane override_toml=../testconfig/override/config.toml secret_toml=./testconfig/tomls/secrets.toml network_config_toml=../config/network.toml +.PHONY: test_smoke_ccip +test_smoke_ccip: set_config + source ./testconfig/override/.env && \ + DATABASE_URL=postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable \ + ENV_JOB_IMAGE=$(testimage) \ + TEST_SUITE=smoke \ + TEST_ARGS="-test.timeout 900h" \ + DETACH_RUNNER=true \ + go test -timeout 24h -count=1 -v -run ^$(testname)$$ ./smoke + +# run ccip smoke tests with default config; explicitly sets the override config to empty +# example usage: make test_smoke_ccip_default testname=TestSmokeCCIPForBidirectionalLane secret_toml=./testconfig/tomls/secrets.toml +.PHONY: test_smoke_ccip_default +test_smoke_ccip_default: set_config + source ./testconfig/override/.env && \ + DATABASE_URL=postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable \ + BASE64_CCIP_CONFIG_OVERRIDE="" \ + TEST_BASE64_CCIP_CONFIG_OVERRIDE="" \ + ENV_JOB_IMAGE="" \ + TEST_SUITE=smoke \ + TEST_ARGS="-test.timeout 900h" \ + DETACH_RUNNER=true \ + go test -timeout 24h -count=1 -v -run ^$(testname)$$ ./smoke + + +# image: the name for the chainlink image being built, example: image=chainlink +# tag: the tag for the chainlink image being built, example: tag=latest +# example usage: make build_ccip_image image=chainlink-ccip tag=latest +.PHONY: build_ccip_image +build_ccip_image: + docker build -f ../../core/chainlink.Dockerfile --build-arg COMMIT_SHA=$(git rev-parse HEAD) --build-arg CHAINLINK_USER=chainlink -t $(image):$(tag) ../../ diff --git a/integration-tests/ccip-tests/README.md b/integration-tests/ccip-tests/README.md new file mode 100644 index 00000000000..0e000561fa4 --- /dev/null +++ b/integration-tests/ccip-tests/README.md @@ -0,0 +1,138 @@ +# CCIP Tests + +Here lives the integration tests for ccip, utilizing our [chainlink-testing-framework](https://github.com/smartcontractkit/chainlink-testing-framework) and [integration-tests](https://github.com/smartcontractkit/ccip/tree/ccip-develop/integration-tests) + +## Setup the Tests + +CCIP tests are designed to be highly configurable. Instead of writing many tests to check specific scenarios, the philosophy is to write a few unique tests and make them adjustable through the use of test inputs and configurations. There are a few different ways to set this configuration: + +1. Default test input - set via TOML - If no specific input is set; the tests will run with default inputs mentioned in [default.toml](./testconfig/tomls/ccip-default.toml). +Please refer to the [testconfig README](../testconfig/README.md) for a more detailed look at how testconfig works. +2. If you want to run your test with a different config, you can override the default inputs. You can either write an [overrides.toml](../testconfig/README.md#configuration-and-overrides) file, or set the env var `BASE64_CCIP_CONFIG_OVERRIDE` containing the base64 encoded TOML file content with updated test input parameters. +For example, if you want to override the `Network` input in test and want to run your test on `avalanche testnet` and `arbitrum goerli` network, you need to: + 1. Create a TOML file with the following content: + + ```toml + [CCIP] + [CCIP.Env] + [CCIP.Env.Network] + selected_networks= ['AVALANCHE_FUJI', 'ARBITRUM_GOERLI'] + ``` + + 2. Encode it using the `base64` command + 3. Set the env var `BASE64_CCIP_CONFIG_OVERRIDE` with the encoded content. + + ```bash + export BASE64_CCIP_CONFIG_OVERRIDE=$(base64 -i ) + ``` + + [mainnet.toml](./testconfig/override/mainnet.toml), [override.toml](./testconfig/examples/override.toml.example) are some of the sample override TOML files. + + For example - In order to run the smoke test (TestSmokeCCIPForBidirectionalLane) on mainnet, run the test with following env var set: + + ```bash + export BASE64_CCIP_CONFIG_OVERRIDE=$(base64 -i ./testconfig/override/mainnet.toml) + ``` + +3. Secrets - You also need to set some secrets. This is a mandatory step needed to run the tests. Please refer to [sample-secrets.toml](./testconfig/examples/secrets.toml.example) for the list of secrets that are mandatory to run the tests. + - The chainlink image and tag are required secrets for all the tests. + - If you are running tests in live networks like testnet and mainnet, you need to set the secrets (rpc urls and private keys) for the respective networks. + - If you are running tests in simulated networks no network specific secrets are required. + here is a sample secrets.toml file, for running the tests in simulated networks, with the chainlink image and tag set as secrets: + + ```toml + [CCIP] + [CCIP.Env] + # ChainlinkImage is mandatory for all tests. + [CCIP.Env.NewCLCluster] + [CCIP.Env.NewCLCluster.Common] + [CCIP.Env.NewCLCluster.Common.ChainlinkImage] + image = "chainlink-ccip" + version = "latest" + ``` + + We consider secrets similar to test input overrides and encode them using `base64` command. + Once you have the secrets.toml file, you can encode it using `base64` command (similar to step 2) and set the env var `BASE64_CCIP_SECRETS_CONFIG` with the encoded content. + + ```bash + export BASE64_CCIP_SECRETS_CONFIG=$(base64 -i ./testconfig/tomls/secrets.toml) + ``` + +**Please note that the secrets should NOT be checked in to the repo and should be kept locally.** + +We recommend against changing the content of [sample-secrets.toml](./testconfig/examples/secrets.toml.example). Please create a new file and set it as the secrets file. +You can run the command to ignore the changes to the file. + +```bash +git update-index --skip-worktree +``` + +## Running the Tests + +There are two ways to run the tests: + +1. Using local docker containers +2. Using a remote kubernetes cluster + +### Using Local Docker Containers + +In order to run the tests locally, you need to have docker installed and running on your machine. +You can use a specific chainlink image and tag (if you already have one) for the tests. Otherwise, you can build the image using the following command: + +```bash +make build_ccip_image image=chainlink-ccip tag=latest-dev # please choose the image and tag name as per your choice +``` + +For a local run, tests creates two private geth networks and runs the tests on them. Running tests on testnet and mainnet is not supported yet for local docker tests and must be run in a kubernetes environment. + +1. [Setting the test inputs](#setup-the-tests) + 1. If required, create an `override.toml` with the required test inputs. If you want to run the tests with default parameters, you can skip this step. + 2. Create a TOML file with the secrets. +2. Run the following command to run the smoke tests with your custom override toml and secrets. + +```bash +# mark the testimage as empty for running the tests in local docker containers +make test_smoke_ccip testimage="" testname=TestSmokeCCIPForBidirectionalLane override_toml="" secret_toml="" +``` + +If you don't want to bother with any overrides, you can run with the default TOML settings with the below command. + +```bash +make test_smoke_ccip_default testname=TestSmokeCCIPForBidirectionalLane secret_toml="" +``` + +```mermaid +--- +title: Basic Docker Test Environment +--- +flowchart + subgraph SD[DON] + CL1[Node 1] + CL2[Node 2] + CL3[Node 3] + CL4[Node 4] + CL5[Node 5] + CL6[Node 6] + CL1---CL2 + CL2---CL3 + CL3---CL4 + CL4---CL5 + CL5---CL6 + end + subgraph Chains + SC1[[Private Chain 1]] + SC2[[Private Chain 2]] + end + SC1<-->SD + SC2<-->SD + MS([Mock Server]) + MS-->SD + TC[/Test Code\] + TC<-->MS + TC<-->Chains + TC<-->SD +``` + +### Using Remote Kubernetes Cluster + +For running more complex and intensive tests (like load and chaos tests) you need to connect the test to a Kubernetes cluster. These tests have more complex setup and running instructions. We endeavor to make these easier to run and configure, but for the time being please seek a member of the QA/Test Tooling team if you want to run these. diff --git a/integration-tests/ccip-tests/actions/ccip_helpers.go b/integration-tests/ccip-tests/actions/ccip_helpers.go new file mode 100644 index 00000000000..7594a9dc447 --- /dev/null +++ b/integration-tests/ccip-tests/actions/ccip_helpers.go @@ -0,0 +1,4380 @@ +package actions + +import ( + "context" + crypto_rand "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "math/big" + "net/http" + "runtime" + "strings" + "sync" + "testing" + "time" + + "dario.cat/mergo" + "github.com/AlekSi/pointer" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "golang.org/x/exp/rand" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" + + chainselectors "github.com/smartcontractkit/chain-selectors" + + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + ctftestenv "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/foundry" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts/laneconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" + testutils "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/client" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration" + bigmath "github.com/smartcontractkit/chainlink/v2/core/utils/big_math" +) + +const ( + ChaosGroupExecution = "ExecutionNodesAll" // all execution nodes + ChaosGroupCommit = "CommitNodesAll" // all commit nodes + ChaosGroupCommitFaultyPlus = "CommitMajority" // >f number of nodes + ChaosGroupCommitFaulty = "CommitMinority" // f number of nodes + ChaosGroupExecutionFaultyPlus = "ExecutionNodesMajority" // > f number of nodes + ChaosGroupExecutionFaulty = "ExecutionNodesMinority" // f number of nodes + + ChaosGroupCommitAndExecFaulty = "CommitAndExecutionNodesMinority" // f number of nodes + ChaosGroupCommitAndExecFaultyPlus = "CommitAndExecutionNodesMajority" // >f number of nodes + ChaosGroupCCIPGeth = "CCIPGeth" // both source and destination simulated geth networks + ChaosGroupNetworkACCIPGeth = "CCIPNetworkAGeth" + ChaosGroupNetworkBCCIPGeth = "CCIPNetworkBGeth" + + defaultUSDCDestBytesOverhead = 640 + defaultUSDCDestGasOverhead = 150_000 + DefaultDestinationGasLimit = 600_000 + // DefaultResubscriptionTimeout denotes the max backoff duration for resubscription for various watch events + // if the subscription keeps failing even after this duration, the test will fail + DefaultResubscriptionTimeout = 2 * time.Hour +) + +// TODO: These should be refactored along with the default CCIP test setup to use optional config functions +var ( + // DefaultPermissionlessExecThreshold denotes how long the DON will retry a transaction before giving up, + // otherwise known as the "Smart Execution Time Window". If a transaction fails to execute within this time window, + // the DON will give up and the transaction will need Manual Execution as detailed here: https://docs.chain.link/ccip/concepts/manual-execution#manual-execution + // For performance tests: the higher the load/throughput, the higher value we might need here to guarantee that nonces are not blocked + // 1 day should be enough for most of the cases + DefaultPermissionlessExecThreshold = time.Hour * 8 + DefaultMaxNoOfTokensInMsg uint16 = 50 +) + +type CCIPTOMLEnv struct { + Networks []blockchain.EVMNetwork +} + +var ( + NetworkChart = reorg.TXNodesAppLabel + NetworkName = func(name string) string { + return strings.ReplaceAll(strings.ToLower(name), " ", "-") + } + InflightExpiryExec = 3 * time.Minute + InflightExpiryCommit = 3 * time.Minute + BatchGasLimit = uint32(7_000_000) + + MaxDataBytes = uint32(50_000) + + RootSnoozeTime = 3 * time.Minute + GethLabel = func(name string) string { + name = NetworkName(name) + switch NetworkChart { + case reorg.TXNodesAppLabel: + return fmt.Sprintf("%s-ethereum-geth", name) + case foundry.ChartName: + return name + } + return "" + } + // ApprovedAmountToRouter is the default amount which gets approved for router so that it can transfer token and use the fee token for fee payment + ApprovedAmountToRouter = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1)) + ApprovedFeeAmountToRouter = new(big.Int).Mul(big.NewInt(int64(GasFeeMultiplier)), big.NewInt(1e5)) + GasFeeMultiplier uint64 = 12e17 + LinkToUSD = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20)) + WrappedNativeToUSD = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1.7e3)) +) + +func GetUSDCDomain(networkName string, simulated bool) (uint32, error) { + if simulated { + // generate a random domain for simulated networks + return rand.Uint32(), nil + } + lookup := map[string]uint32{ + networks.AvalancheFuji.Name: 1, + networks.OptimismGoerli.Name: 2, + networks.ArbitrumGoerli.Name: 3, + networks.BaseGoerli.Name: 6, + networks.PolygonMumbai.Name: 7, + } + if val, ok := lookup[networkName]; ok { + return val, nil + } + return 0, fmt.Errorf("USDC domain not found for chain %s", networkName) +} + +type CCIPCommon struct { + Logger *zerolog.Logger + ChainClient blockchain.EVMClient + // Deployer deploys all CCIP contracts + Deployer *contracts.CCIPContractsDeployer + // tokenDeployer is used exclusively for deploying self-serve tokens and their pools + tokenDeployer *contracts.CCIPContractsDeployer + FeeToken *contracts.LinkToken + BridgeTokens []*contracts.ERC20Token + PriceAggregators map[common.Address]*contracts.MockAggregator + NoOfTokensNeedingDynamicPrice int + BridgeTokenPools []*contracts.TokenPool + RateLimiterConfig contracts.RateLimiterConfig + ARMContract *common.Address + ARM *contracts.ARM // populate only if the ARM contracts is not a mock and can be used to verify various ARM events; keep this nil for mock ARM + Router *contracts.Router + PriceRegistry *contracts.PriceRegistry + TokenAdminRegistry *contracts.TokenAdminRegistry + WrappedNative common.Address + MulticallEnabled bool + MulticallContract common.Address + ExistingDeployment bool + USDCMockDeployment *bool + TokenMessenger *common.Address + TokenTransmitter *contracts.TokenTransmitter + IsConnectionRestoredRecently *atomic.Bool + + poolFunds *big.Int + tokenPriceUpdateWatcherMu *sync.Mutex + tokenPriceUpdateWatcher map[common.Address]*big.Int // key - token; value - timestamp of update + gasUpdateWatcherMu *sync.Mutex + gasUpdateWatcher map[uint64]*big.Int // key - destchain id; value - timestamp of update + GasUpdateEvents []contracts.GasUpdateEvent +} + +// FreeUpUnusedSpace sets nil to various elements of ccipModule which are only used +// during lane set up and not used for rest of the test duration +// this is called mainly by load test to keep the memory usage minimum for high number of lanes +func (ccipModule *CCIPCommon) FreeUpUnusedSpace() { + ccipModule.PriceAggregators = nil + ccipModule.BridgeTokenPools = nil + ccipModule.TokenMessenger = nil + ccipModule.TokenTransmitter = nil + runtime.GC() +} + +func (ccipModule *CCIPCommon) UnvoteToCurseARM() error { + if ccipModule.ARM != nil { + return fmt.Errorf("real ARM deployed. cannot curse through test") + } + if ccipModule.ARMContract == nil { + return fmt.Errorf("no ARM contract is set") + } + arm, err := mock_arm_contract.NewMockARMContract(*ccipModule.ARMContract, ccipModule.ChainClient.Backend()) + if err != nil { + return fmt.Errorf("error instantiating arm %w", err) + } + opts, err := ccipModule.ChainClient.TransactionOpts(ccipModule.ChainClient.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting owners for ARM OwnerUnvoteToCurse %w", err) + } + tx, err := arm.OwnerUnvoteToCurse0(opts, []mock_arm_contract.RMNUnvoteToCurseRecord{}) + if err != nil { + return fmt.Errorf("error in calling OwnerUnvoteToCurse %w", err) + } + err = ccipModule.ChainClient.ProcessTransaction(tx) + if err != nil { + return err + } + log.Info(). + Str("ARM", arm.Address().Hex()). + Msg("ARM is uncursed") + return ccipModule.ChainClient.WaitForEvents() +} + +func (ccipModule *CCIPCommon) IsCursed() (bool, error) { + if ccipModule.ARM != nil { + return false, fmt.Errorf("real ARM deployed. cannot validate cursing") + } + if ccipModule.ARMContract == nil { + return false, fmt.Errorf("no ARM contract is set") + } + arm, err := mock_arm_contract.NewMockARMContract(*ccipModule.ARMContract, ccipModule.ChainClient.Backend()) + if err != nil { + return false, fmt.Errorf("error instantiating arm %w", err) + } + return arm.IsCursed0(nil) +} + +func (ccipModule *CCIPCommon) CurseARM() (*types.Transaction, error) { + if ccipModule.ARM != nil { + return nil, fmt.Errorf("real ARM deployed. cannot curse through test") + } + if ccipModule.ARMContract == nil { + return nil, fmt.Errorf("no ARM contract is set") + } + arm, err := mock_arm_contract.NewMockARMContract(*ccipModule.ARMContract, ccipModule.ChainClient.Backend()) + if err != nil { + return nil, fmt.Errorf("error instantiating arm %w", err) + } + opts, err := ccipModule.ChainClient.TransactionOpts(ccipModule.ChainClient.GetDefaultWallet()) + if err != nil { + return nil, fmt.Errorf("error getting owners for ARM VoteToCurse %w", err) + } + tx, err := arm.VoteToCurse(opts, [32]byte{}) + if err != nil { + return nil, fmt.Errorf("error in calling VoteToCurse %w", err) + } + err = ccipModule.ChainClient.ProcessTransaction(tx) + if err != nil { + return tx, err + } + log.Info(). + Str("ARM", arm.Address().Hex()). + Str("Network", ccipModule.ChainClient.GetNetworkName()). + Msg("ARM is cursed") + return tx, ccipModule.ChainClient.WaitForEvents() +} + +func (ccipModule *CCIPCommon) LoadContractAddresses(conf *laneconfig.LaneConfig, noOfTokens *int) { + if conf != nil { + if common.IsHexAddress(conf.FeeToken) { + ccipModule.FeeToken = &contracts.LinkToken{ + EthAddress: common.HexToAddress(conf.FeeToken), + } + } + if conf.IsNativeFeeToken { + ccipModule.FeeToken = &contracts.LinkToken{ + EthAddress: common.HexToAddress("0x0"), + } + } + + if common.IsHexAddress(conf.Router) { + ccipModule.Router = &contracts.Router{ + EthAddress: common.HexToAddress(conf.Router), + } + } + if common.IsHexAddress(conf.ARM) { + addr := common.HexToAddress(conf.ARM) + ccipModule.ARMContract = &addr + if !conf.IsMockARM { + ccipModule.ARM = &contracts.ARM{ + EthAddress: addr, + } + } + } + if common.IsHexAddress(conf.PriceRegistry) { + ccipModule.PriceRegistry = &contracts.PriceRegistry{ + EthAddress: common.HexToAddress(conf.PriceRegistry), + } + } + if common.IsHexAddress(conf.WrappedNative) { + ccipModule.WrappedNative = common.HexToAddress(conf.WrappedNative) + } + if common.IsHexAddress(conf.Multicall) { + ccipModule.MulticallContract = common.HexToAddress(conf.Multicall) + } + if common.IsHexAddress(conf.TokenMessenger) { + addr := common.HexToAddress(conf.TokenMessenger) + ccipModule.TokenMessenger = &addr + } + if common.IsHexAddress(conf.TokenTransmitter) { + ccipModule.TokenTransmitter = &contracts.TokenTransmitter{ + ContractAddress: common.HexToAddress(conf.TokenTransmitter), + } + } + if len(conf.BridgeTokens) > 0 { + // if noOfTokens is set, then only take that many tokens from the list + // the lane config can have more tokens than required for the test + if noOfTokens != nil { + if len(conf.BridgeTokens) > *noOfTokens { + conf.BridgeTokens = conf.BridgeTokens[:*noOfTokens] + } + } + var tokens []*contracts.ERC20Token + for _, token := range conf.BridgeTokens { + if common.IsHexAddress(token) { + tokens = append(tokens, &contracts.ERC20Token{ + ContractAddress: common.HexToAddress(token), + }) + } + } + ccipModule.BridgeTokens = tokens + } + if len(conf.BridgeTokenPools) > 0 { + // if noOfTokens is set, then only take that many tokenpools from the list + // the lane config can have more tokenpools than required for the test + if noOfTokens != nil { + if len(conf.BridgeTokenPools) > *noOfTokens { + conf.BridgeTokenPools = conf.BridgeTokenPools[:*noOfTokens] + } + } + var pools []*contracts.TokenPool + for _, pool := range conf.BridgeTokenPools { + if common.IsHexAddress(pool) { + pools = append(pools, &contracts.TokenPool{ + EthAddress: common.HexToAddress(pool), + }) + } + } + ccipModule.BridgeTokenPools = pools + } + if len(conf.PriceAggregators) > 0 { + priceAggrs := make(map[common.Address]*contracts.MockAggregator) + for token, aggr := range conf.PriceAggregators { + if common.IsHexAddress(aggr) { + priceAggrs[common.HexToAddress(token)] = &contracts.MockAggregator{ + ContractAddress: common.HexToAddress(aggr), + } + } + } + ccipModule.PriceAggregators = priceAggrs + } + if common.IsHexAddress(conf.TokenAdminRegistry) { + ccipModule.TokenAdminRegistry = &contracts.TokenAdminRegistry{ + EthAddress: common.HexToAddress(conf.TokenAdminRegistry), + } + } + } +} + +// ApproveTokens approves tokens for the router to send usually a massive amount of tokens enough to cover all the ccip transfers +// to be triggered by the test. +// Also, if the test is using self-serve tokens and pools deployed by a separate `tokenDeployer` address, this sends some of those tokens +// to the default `ccipOwner` address to be used for the test. +func (ccipModule *CCIPCommon) ApproveTokens() error { + isApproved := false + for _, token := range ccipModule.BridgeTokens { + // TODO: We send half of token funds back to the CCIP Deployer account, which isn't particularly realistic. + // See CCIP-2477 + if token.OwnerWallet.Address() != ccipModule.ChainClient.GetDefaultWallet().Address() && + !ccipModule.ExistingDeployment { + tokenBalance, err := token.BalanceOf(context.Background(), token.OwnerWallet.Address()) + if err != nil { + return fmt.Errorf("failed to get balance of token %s: %w", token.ContractAddress.Hex(), err) + } + tokenBalance.Div(tokenBalance, big.NewInt(2)) // Send half of the balance to the default wallet + err = token.Transfer(token.OwnerWallet, ccipModule.ChainClient.GetDefaultWallet().Address(), tokenBalance) + if err != nil { + return fmt.Errorf("failed to transfer token from '%s' to '%s' %s: %w", + token.ContractAddress.Hex(), token.OwnerAddress.Hex(), ccipModule.ChainClient.GetDefaultWallet().Address(), err, + ) + } + } + + err := token.Approve(ccipModule.ChainClient.GetDefaultWallet(), ccipModule.Router.Address(), ApprovedAmountToRouter) + if err != nil { + return fmt.Errorf("failed to approve token %s: %w", token.ContractAddress.Hex(), err) + } + if token.ContractAddress == ccipModule.FeeToken.EthAddress { + isApproved = true + } + } + if ccipModule.FeeToken.EthAddress != common.HexToAddress("0x0") { + amount := ApprovedFeeAmountToRouter + if isApproved { + amount = new(big.Int).Add(ApprovedAmountToRouter, ApprovedFeeAmountToRouter) + } + allowance, err := ccipModule.FeeToken.Allowance(ccipModule.ChainClient.GetDefaultWallet().Address(), ccipModule.Router.Address()) + if err != nil { + return fmt.Errorf("failed to get allowance for token %s: %w", ccipModule.FeeToken.Address(), err) + } + if allowance.Cmp(amount) < 0 { + err := ccipModule.FeeToken.Approve(ccipModule.Router.Address(), amount) + if err != nil { + return fmt.Errorf("failed to approve fee token %s: %w", ccipModule.FeeToken.EthAddress.String(), err) + } + } + } + ccipModule.Logger.Info().Msg("Tokens approved") + + return nil +} + +func (ccipModule *CCIPCommon) CleanUp() error { + if !ccipModule.ExistingDeployment { + for i, pool := range ccipModule.BridgeTokenPools { + if !pool.IsLockRelease() { + continue + } + bal, err := ccipModule.BridgeTokens[i].BalanceOf(context.Background(), pool.Address()) + if err != nil { + return fmt.Errorf("error in getting pool balance %w", err) + } + if bal.Cmp(big.NewInt(0)) == 0 { + continue + } + err = pool.RemoveLiquidity(bal) + if err != nil { + return fmt.Errorf("error in removing liquidity %w", err) + } + } + err := ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for events %wfmt.Sprintf(\"Setting mockserver response\")", err) + } + } + return nil +} + +func (ccipModule *CCIPCommon) WaitForPriceUpdates( + ctx context.Context, + lggr *zerolog.Logger, + timeout time.Duration, + destChainId uint64, + allTokens []common.Address, +) error { + destChainSelector, err := chainselectors.SelectorFromChainId(destChainId) + if err != nil { + return err + } + // check if price is already updated + price, err := ccipModule.PriceRegistry.Instance.GetDestinationChainGasPrice(nil, destChainSelector) + if err != nil { + return err + } + + if price.Timestamp > 0 && price.Value.Cmp(big.NewInt(0)) > 0 { + lggr.Info(). + Str("Price Registry", ccipModule.PriceRegistry.Address()). + Uint64("dest chain", destChainId). + Str("source chain", ccipModule.ChainClient.GetNetworkName()). + Msg("Price already updated") + return nil + } + // if not, wait for price update + lggr.Info().Msgf("Waiting for UsdPerUnitGas and UsdPerTokenUpdated for dest chain %d Price Registry %s", destChainId, ccipModule.PriceRegistry.Address()) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + localCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + var tokensMissingForUpdate common.Address + for { + select { + case <-ticker.C: + ccipModule.gasUpdateWatcherMu.Lock() + timestampOfUpdate, ok := ccipModule.gasUpdateWatcher[destChainId] + ccipModule.gasUpdateWatcherMu.Unlock() + tokenPricesUpdated := false + if len(allTokens) > 0 { + ccipModule.tokenPriceUpdateWatcherMu.Lock() + for _, token := range allTokens { + timestampOfTokenUpdate, okToken := ccipModule.tokenPriceUpdateWatcher[token] + // we consider token prices updated only if all tokens have been updated + // if any token is missing, we retry + if !okToken || timestampOfTokenUpdate.Cmp(big.NewInt(0)) < 1 { + tokenPricesUpdated = false + tokensMissingForUpdate = token + break + } + tokenPricesUpdated = true + } + ccipModule.tokenPriceUpdateWatcherMu.Unlock() + } + + if tokenPricesUpdated && ok && timestampOfUpdate.Cmp(big.NewInt(0)) == 1 { + lggr.Info(). + Str("Price Registry", ccipModule.PriceRegistry.Address()). + Uint64("dest chain", destChainId). + Str("source chain", ccipModule.ChainClient.GetNetworkName()). + Msg("Price updated") + return nil + } + case <-localCtx.Done(): + if tokensMissingForUpdate != (common.Address{}) { + return fmt.Errorf("price Updates not found for token %s", tokensMissingForUpdate.Hex()) + } + return fmt.Errorf("price Updates not found for chain %d", destChainId) + } + } +} + +// WatchForPriceUpdates helps to ensure the price updates are happening in price registry by subscribing to a couple +// of price update events and add the event details to watchers. It subscribes to 'UsdPerUnitGasUpdated' +// and 'UsdPerTokenUpdated' event. +func (ccipModule *CCIPCommon) WatchForPriceUpdates(ctx context.Context, lggr *zerolog.Logger) error { + gasUpdateEventLatest := make(chan *price_registry.PriceRegistryUsdPerUnitGasUpdated) + tokenUpdateEvent := make(chan *price_registry.PriceRegistryUsdPerTokenUpdated) + sub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + lggr.Info().Msg("Subscribing to UsdPerUnitGasUpdated event") + eventSub, err := ccipModule.PriceRegistry.WatchUsdPerUnitGasUpdated(nil, gasUpdateEventLatest, nil) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to UsdPerUnitGasUpdated event") + } + return eventSub, err + }) + if sub == nil { + return fmt.Errorf("no event subscription found") + } + tokenUpdateSub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + lggr.Info().Msg("Subscribing to UsdPerTokenUpdated event") + eventSub, err := ccipModule.PriceRegistry.WatchUsdPerTokenUpdated(nil, tokenUpdateEvent) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to UsdPerTokenUpdated event") + } + return eventSub, err + }) + if tokenUpdateSub == nil { + return fmt.Errorf("no event subscription found") + } + processEvent := func(value, timestamp *big.Int, destChainSelector uint64, raw types.Log) error { + destChain, err := chainselectors.ChainIdFromSelector(destChainSelector) + if err != nil { + return err + } + ccipModule.gasUpdateWatcherMu.Lock() + ccipModule.gasUpdateWatcher[destChain] = timestamp + + ccipModule.GasUpdateEvents = append(ccipModule.GasUpdateEvents, contracts.GasUpdateEvent{ + Sender: raw.Address.Hex(), + Tx: raw.TxHash.Hex(), + Value: value, + DestChain: destChain, + Source: ccipModule.ChainClient.GetNetworkName(), + }) + ccipModule.gasUpdateWatcherMu.Unlock() + lggr.Info(). + Uint64("chainSelector", destChainSelector). + Uint64("dest_chain", destChain). + Str("price_registry", ccipModule.PriceRegistry.Address()). + Str("tx hash", raw.TxHash.Hex()). + Msgf("UsdPerUnitGasUpdated event received for dest chain: %d, source chain: %s", + destChain, ccipModule.ChainClient.GetNetworkName()) + return nil + } + go func() { + defer func() { + sub.Unsubscribe() + tokenUpdateSub.Unsubscribe() + ccipModule.gasUpdateWatcher = nil + ccipModule.gasUpdateWatcherMu = nil + ccipModule.GasUpdateEvents = nil + ccipModule.tokenPriceUpdateWatcher = nil + ccipModule.tokenPriceUpdateWatcherMu = nil + }() + for { + select { + case e := <-gasUpdateEventLatest: + err := processEvent(e.Value, e.Timestamp, e.DestChain, e.Raw) + if err != nil { + continue + } + case tk := <-tokenUpdateEvent: + ccipModule.tokenPriceUpdateWatcherMu.Lock() + ccipModule.tokenPriceUpdateWatcher[tk.Token] = tk.Timestamp + ccipModule.tokenPriceUpdateWatcherMu.Unlock() + lggr.Info(). + Str("token", tk.Token.Hex()). + Str("chain", ccipModule.ChainClient.GetNetworkName()). + Str("price_registry", ccipModule.PriceRegistry.Address()). + Msg("UsdPerTokenUpdated event received") + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +// UpdateTokenPricesAtRegularInterval updates aggregator contract with updated answer at regular interval. +// At each iteration of ticker it chooses one of the aggregator contracts and updates its round answer. +func (ccipModule *CCIPCommon) UpdateTokenPricesAtRegularInterval(ctx context.Context, lggr *zerolog.Logger, interval time.Duration, conf *laneconfig.LaneConfig) error { + if ccipModule.ExistingDeployment { + return nil + } + var aggregators []*contracts.MockAggregator + for _, aggregatorContract := range conf.PriceAggregators { + contract, err := ccipModule.Deployer.NewMockAggregator(common.HexToAddress(aggregatorContract)) + if err != nil { + return err + } + aggregators = append(aggregators, contract) + } + go func(aggregators []*contracts.MockAggregator) { + rand.NewSource(uint64(time.Now().UnixNano())) + ticker := time.NewTicker(interval) + for { + select { + case <-ticker.C: + // randomly choose an aggregator contract from slice of aggregators + randomIndex := rand.Intn(len(aggregators)) + err := aggregators[randomIndex].UpdateRoundData(nil, ptr.Ptr(-5), ptr.Ptr(2)) + if err != nil { + lggr.Error().Err(err).Msg("error in updating round data") + continue + } + case <-ctx.Done(): + return + } + } + }(aggregators) + return nil +} + +// SyncUSDCDomain makes domain updates to Source usdc pool domain with - +// 1. USDC domain from destination chain's token transmitter contract +// 2. Destination pool address as allowed caller +func (ccipModule *CCIPCommon) SyncUSDCDomain(destTransmitter *contracts.TokenTransmitter, destPools []*contracts.TokenPool, destChainID uint64) error { + // if not USDC new deployment, return + // if existing deployment, consider that no syncing is required and return + if ccipModule.ExistingDeployment || !ccipModule.IsUSDCDeployment() { + return nil + } + if destTransmitter == nil { + return fmt.Errorf("invalid address") + } + destChainSelector, err := chainselectors.SelectorFromChainId(destChainID) + if err != nil { + return fmt.Errorf("invalid chain id %w", err) + } + + // sync USDC domain + for i, pool := range ccipModule.BridgeTokenPools { + if !pool.IsUSDC() { + continue + } + if destPools[i] == nil { + return fmt.Errorf("invalid pool address") + } + if !destPools[i].IsUSDC() { + return fmt.Errorf("corresponding dest pool is not USDC pool") + } + err = pool.SyncUSDCDomain(destTransmitter, destPools[i].EthAddress, destChainSelector) + if err != nil { + return err + } + err = destPools[i].MintUSDCToUSDCPool() + if err != nil { + return err + } + } + + return ccipModule.ChainClient.WaitForEvents() +} + +func (ccipModule *CCIPCommon) PollRPCConnection(ctx context.Context, lggr *zerolog.Logger) { + for { + select { + case reconnectTime := <-ccipModule.ChainClient.ConnectionRestored(): + if ccipModule.IsConnectionRestoredRecently == nil { + ccipModule.IsConnectionRestoredRecently = atomic.NewBool(true) + } else { + ccipModule.IsConnectionRestoredRecently.Store(true) + } + lggr.Info().Time("Restored At", reconnectTime).Str("Network", ccipModule.ChainClient.GetNetworkName()).Msg("Connection Restored") + case issueTime := <-ccipModule.ChainClient.ConnectionIssue(): + if ccipModule.IsConnectionRestoredRecently == nil { + ccipModule.IsConnectionRestoredRecently = atomic.NewBool(false) + } else { + ccipModule.IsConnectionRestoredRecently.Store(false) + } + lggr.Info().Time("Started At", issueTime).Str("Network", ccipModule.ChainClient.GetNetworkName()).Msg("RPC Disconnected") + case <-ctx.Done(): + return + } + } +} + +func (ccipModule *CCIPCommon) IsUSDCDeployment() bool { + return pointer.GetBool(ccipModule.USDCMockDeployment) +} + +func (ccipModule *CCIPCommon) WriteLaneConfig(conf *laneconfig.LaneConfig) { + var btAddresses, btpAddresses []string + priceAggrs := make(map[string]string) + for i, bt := range ccipModule.BridgeTokens { + btAddresses = append(btAddresses, bt.Address()) + btpAddresses = append(btpAddresses, ccipModule.BridgeTokenPools[i].Address()) + } + for k, v := range ccipModule.PriceAggregators { + priceAggrs[k.Hex()] = v.ContractAddress.Hex() + } + conf.CommonContracts = laneconfig.CommonContracts{ + FeeToken: ccipModule.FeeToken.Address(), + BridgeTokens: btAddresses, + BridgeTokenPools: btpAddresses, + ARM: ccipModule.ARMContract.Hex(), + Router: ccipModule.Router.Address(), + PriceRegistry: ccipModule.PriceRegistry.Address(), + PriceAggregators: priceAggrs, + WrappedNative: ccipModule.WrappedNative.Hex(), + Multicall: ccipModule.MulticallContract.Hex(), + } + if ccipModule.TokenAdminRegistry != nil { + conf.CommonContracts.TokenAdminRegistry = ccipModule.TokenAdminRegistry.Address() + } + if ccipModule.TokenTransmitter != nil { + conf.CommonContracts.TokenTransmitter = ccipModule.TokenTransmitter.ContractAddress.Hex() + } + if ccipModule.TokenMessenger != nil { + conf.CommonContracts.TokenMessenger = ccipModule.TokenMessenger.Hex() + } + if ccipModule.ARM == nil { + conf.CommonContracts.IsMockARM = true + } +} + +func (ccipModule *CCIPCommon) AddPriceAggregatorToken(token common.Address, initialAns *big.Int) error { + // check if dynamic price update is enabled + if ccipModule.NoOfTokensNeedingDynamicPrice <= 0 { + return nil + } + var err error + if aggregator, ok := ccipModule.PriceAggregators[token]; !ok { + ccipModule.PriceAggregators[token], err = ccipModule.Deployer.DeployMockAggregator(18, initialAns) + if err != nil { + return fmt.Errorf("deploying mock aggregator contract shouldn't fail %w", err) + } + } else { + ccipModule.PriceAggregators[token], err = ccipModule.Deployer.NewMockAggregator(aggregator.ContractAddress) + if err != nil { + return fmt.Errorf("error instantiating price aggregator for token %s", token.Hex()) + } + } + ccipModule.NoOfTokensNeedingDynamicPrice-- + return nil +} + +// DeployContracts deploys the contracts which are necessary in both source and dest chain +// This reuses common contracts for bidirectional lanes +func (ccipModule *CCIPCommon) DeployContracts( + noOfTokens int, + tokenDeployerFns []blockchain.ContractDeployer, + conf *laneconfig.LaneConfig, +) error { + var err error + cd := ccipModule.Deployer + + ccipModule.LoadContractAddresses(conf, &noOfTokens) + if ccipModule.ARM != nil { + arm, err := cd.NewARMContract(ccipModule.ARM.EthAddress) + if err != nil { + return fmt.Errorf("getting new ARM contract shouldn't fail %w", err) + } + ccipModule.ARM = arm + } else { + // deploy a mock ARM contract + if ccipModule.ARMContract == nil { + if ccipModule.ExistingDeployment { + return fmt.Errorf("ARM contract address is not provided in lane config") + } + ccipModule.ARMContract, err = cd.DeployMockARMContract() + if err != nil { + return fmt.Errorf("deploying mock ARM contract shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for mock ARM deployment %w", err) + } + } + } + if ccipModule.WrappedNative == common.HexToAddress("0x0") { + if ccipModule.ExistingDeployment { + return fmt.Errorf("wrapped native contract address is not provided in lane config") + } + weth9addr, err := cd.DeployWrappedNative() + if err != nil { + return fmt.Errorf("deploying wrapped native shouldn't fail %w", err) + } + err = ccipModule.AddPriceAggregatorToken(*weth9addr, WrappedNativeToUSD) + if err != nil { + return fmt.Errorf("deploying mock aggregator contract shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for deploying wrapped native shouldn't fail %w", err) + } + ccipModule.WrappedNative = *weth9addr + } + + if ccipModule.Router == nil { + if ccipModule.ExistingDeployment { + return fmt.Errorf("router contract address is not provided in lane config") + } + ccipModule.Router, err = cd.DeployRouter(ccipModule.WrappedNative, *ccipModule.ARMContract) + if err != nil { + return fmt.Errorf("deploying router shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for router deployment %w", err) + } + } else { + r, err := cd.NewRouter(ccipModule.Router.EthAddress) + if err != nil { + return fmt.Errorf("getting new router contract shouldn't fail %w", err) + } + ccipModule.Router = r + } + if ccipModule.FeeToken == nil { + if ccipModule.ExistingDeployment { + return fmt.Errorf("FeeToken contract address is not provided in lane config") + } + // deploy link token + token, err := cd.DeployLinkTokenContract() + if err != nil { + return fmt.Errorf("deploying fee token contract shouldn't fail %w", err) + } + + ccipModule.FeeToken = token + err = ccipModule.AddPriceAggregatorToken(ccipModule.FeeToken.EthAddress, LinkToUSD) + if err != nil { + return fmt.Errorf("deploying mock aggregator contract shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for feetoken deployment %w", err) + } + } else { + token, err := cd.NewLinkTokenContract(common.HexToAddress(ccipModule.FeeToken.Address())) + if err != nil { + return fmt.Errorf("getting fee token contract shouldn't fail %w", err) + } + ccipModule.FeeToken = token + } + + // If the number of deployed bridge tokens does not match noOfTokens, deploy rest of the tokens in case ExistingDeployment is false + // In case of ExistingDeployment as true use whatever is provided in laneconfig + if len(ccipModule.BridgeTokens) < noOfTokens && !ccipModule.ExistingDeployment { + // deploy bridge token. + for i := len(ccipModule.BridgeTokens); i < noOfTokens; i++ { + var token *contracts.ERC20Token + + if len(tokenDeployerFns) != noOfTokens { + if ccipModule.IsUSDCDeployment() && i == 0 { + // if it's USDC deployment, we deploy the burn mint token 677 with decimal 6 and cast it to ERC20Token + usdcToken, err := ccipModule.tokenDeployer.DeployBurnMintERC677(new(big.Int).Mul(big.NewInt(1e6), big.NewInt(1e18))) + if err != nil { + return fmt.Errorf("deploying bridge usdc token contract shouldn't fail %w", err) + } + token, err = ccipModule.tokenDeployer.NewERC20TokenContract(usdcToken.ContractAddress) + if err != nil { + return fmt.Errorf("getting new bridge usdc token contract shouldn't fail %w", err) + } + if ccipModule.TokenTransmitter == nil { + domain, err := GetUSDCDomain(ccipModule.ChainClient.GetNetworkName(), ccipModule.ChainClient.NetworkSimulated()) + if err != nil { + return fmt.Errorf("error in getting USDC domain %w", err) + } + + ccipModule.TokenTransmitter, err = ccipModule.tokenDeployer.DeployTokenTransmitter(domain, usdcToken.ContractAddress) + if err != nil { + return fmt.Errorf("deploying token transmitter shouldn't fail %w", err) + } + } + if ccipModule.TokenMessenger == nil { + if ccipModule.TokenTransmitter == nil { + return fmt.Errorf("TokenTransmitter contract address is not provided") + } + ccipModule.TokenMessenger, err = ccipModule.tokenDeployer.DeployTokenMessenger(ccipModule.TokenTransmitter.ContractAddress) + if err != nil { + return fmt.Errorf("deploying token messenger shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for mock TokenMessenger and Transmitter deployment %w", err) + } + } + + // grant minter role to token messenger + err = usdcToken.GrantMintAndBurn(*ccipModule.TokenMessenger) + if err != nil { + return fmt.Errorf("granting minter role to token messenger shouldn't fail %w", err) + } + err = usdcToken.GrantMintAndBurn(ccipModule.TokenTransmitter.ContractAddress) + if err != nil { + return fmt.Errorf("granting minter role to token transmitter shouldn't fail %w", err) + } + } else { + // otherwise we deploy link token and cast it to ERC20Token + linkToken, err := ccipModule.tokenDeployer.DeployLinkTokenContract() + if err != nil { + return fmt.Errorf("deploying bridge token contract shouldn't fail %w", err) + } + token, err = ccipModule.tokenDeployer.NewERC20TokenContract(common.HexToAddress(linkToken.Address())) + if err != nil { + return fmt.Errorf("getting new bridge token contract shouldn't fail %w", err) + } + err = ccipModule.AddPriceAggregatorToken(linkToken.EthAddress, LinkToUSD) + if err != nil { + return fmt.Errorf("deploying mock aggregator contract shouldn't fail %w", err) + } + } + } else { + token, err = ccipModule.tokenDeployer.DeployERC20TokenContract(tokenDeployerFns[i]) + if err != nil { + return fmt.Errorf("deploying bridge token contract shouldn't fail %w", err) + } + err = ccipModule.AddPriceAggregatorToken(token.ContractAddress, LinkToUSD) + if err != nil { + return fmt.Errorf("deploying mock aggregator contract shouldn't fail %w", err) + } + } + ccipModule.BridgeTokens = append(ccipModule.BridgeTokens, token) + + } + if err = ccipModule.ChainClient.WaitForEvents(); err != nil { + return fmt.Errorf("error in waiting for bridge token deployment %w", err) + } + } + + var tokens []*contracts.ERC20Token + for _, token := range ccipModule.BridgeTokens { + newToken, err := ccipModule.tokenDeployer.NewERC20TokenContract(common.HexToAddress(token.Address())) + if err != nil { + return fmt.Errorf("getting new bridge token contract shouldn't fail %w", err) + } + tokens = append(tokens, newToken) + } + ccipModule.BridgeTokens = tokens + if len(ccipModule.BridgeTokenPools) != len(ccipModule.BridgeTokens) { + if ccipModule.ExistingDeployment { + return fmt.Errorf("bridge token pool contract address is not provided in lane config") + } + // deploy native token pool + for i := len(ccipModule.BridgeTokenPools); i < len(ccipModule.BridgeTokens); i++ { + token := ccipModule.BridgeTokens[i] + // usdc pool need to be the first one in the slice + if ccipModule.IsUSDCDeployment() && i == 0 { + // deploy usdc token pool in case of usdc deployment + if ccipModule.TokenMessenger == nil { + return fmt.Errorf("TokenMessenger contract address is not provided") + } + if ccipModule.TokenTransmitter == nil { + return fmt.Errorf("TokenTransmitter contract address is not provided") + } + usdcPool, err := ccipModule.tokenDeployer.DeployUSDCTokenPoolContract(token.Address(), *ccipModule.TokenMessenger, *ccipModule.ARMContract, ccipModule.Router.Instance.Address()) + if err != nil { + return fmt.Errorf("deploying bridge Token pool(usdc) shouldn't fail %w", err) + } + + ccipModule.BridgeTokenPools = append(ccipModule.BridgeTokenPools, usdcPool) + } else { + // deploy lock release token pool in case of non-usdc deployment + btp, err := ccipModule.tokenDeployer.DeployLockReleaseTokenPoolContract(token.Address(), *ccipModule.ARMContract, ccipModule.Router.Instance.Address()) + if err != nil { + return fmt.Errorf("deploying bridge Token pool(lock&release) shouldn't fail %w", err) + } + ccipModule.BridgeTokenPools = append(ccipModule.BridgeTokenPools, btp) + + err = btp.AddLiquidity(token, token.OwnerWallet, ccipModule.poolFunds) + if err != nil { + return fmt.Errorf("adding liquidity token to dest pool shouldn't fail %w", err) + } + } + } + } else { + var pools []*contracts.TokenPool + for _, pool := range ccipModule.BridgeTokenPools { + newPool, err := ccipModule.tokenDeployer.NewLockReleaseTokenPoolContract(pool.EthAddress) + if err != nil { + return fmt.Errorf("getting new bridge token pool contract shouldn't fail %w", err) + } + pools = append(pools, newPool) + } + ccipModule.BridgeTokenPools = pools + } + + // no need to have price registry for existing deployment, we consider that it's already deployed + if !ccipModule.ExistingDeployment { + if ccipModule.PriceRegistry == nil { + // we will update the price updates later based on source and dest PriceUpdates + ccipModule.PriceRegistry, err = cd.DeployPriceRegistry( + []common.Address{ + common.HexToAddress(ccipModule.FeeToken.Address()), + common.HexToAddress(ccipModule.WrappedNative.Hex()), + }) + if err != nil { + return fmt.Errorf("deploying PriceRegistry shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for PriceRegistry deployment %w", err) + } + } else { + ccipModule.PriceRegistry, err = cd.NewPriceRegistry(ccipModule.PriceRegistry.EthAddress) + if err != nil { + return fmt.Errorf("getting new PriceRegistry contract shouldn't fail %w", err) + } + } + } + if ccipModule.MulticallContract == (common.Address{}) && ccipModule.MulticallEnabled { + ccipModule.MulticallContract, err = cd.DeployMultiCallContract() + if err != nil { + return fmt.Errorf("deploying multicall contract shouldn't fail %w", err) + } + } + + // if the version is after 1.4.0, we need to deploy TokenAdminRegistry + // no need to have token admin registry for existing deployment, we consider that it's already deployed + if contracts.NeedTokenAdminRegistry() && !ccipModule.ExistingDeployment { + if ccipModule.TokenAdminRegistry == nil { + // deploy token admin registry + ccipModule.TokenAdminRegistry, err = cd.DeployTokenAdminRegistry() + if err != nil { + return fmt.Errorf("deploying token admin registry shouldn't fail %w", err) + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for token admin registry deployment %w", err) + } + + if len(ccipModule.BridgeTokens) != len(ccipModule.BridgeTokenPools) { + return fmt.Errorf("tokens number %d and pools number %d do not match", len(ccipModule.BridgeTokens), len(ccipModule.BridgeTokenPools)) + } + // add all pools to registry + for i, pool := range ccipModule.BridgeTokenPools { + token := ccipModule.BridgeTokens[i] + err := ccipModule.TokenAdminRegistry.SetAdminAndRegisterPool(token.ContractAddress, pool.EthAddress) + if err != nil { + return fmt.Errorf("error setting up token %s and pool %s on TokenAdminRegistry : %w", token.Address(), pool.Address(), err) + } + } + err = ccipModule.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error in waiting for token admin registry set up with tokens and pools %w", err) + } + } else { + ccipModule.TokenAdminRegistry, err = cd.NewTokenAdminRegistry(ccipModule.TokenAdminRegistry.EthAddress) + if err != nil { + return fmt.Errorf("getting new token admin registry contract shouldn't fail %w", err) + } + } + } + ccipModule.Logger.Info().Msg("Finished deploying common contracts") + // approve router to spend fee token + return ccipModule.ApproveTokens() +} + +func (ccipModule *CCIPCommon) AvgBlockTime(ctx context.Context) (time.Duration, error) { + return ccipModule.ChainClient.AvgBlockTime(ctx) +} + +// DynamicPriceGetterConfig specifies the configuration for the price getter in price pipeline. +// This should match pricegetter.DynamicPriceGetterConfig in core/services/ocr2/plugins/ccip/internal/pricegetter +type DynamicPriceGetterConfig struct { + AggregatorPrices map[common.Address]AggregatorPriceConfig `json:"aggregatorPrices"` + StaticPrices map[common.Address]StaticPriceConfig `json:"staticPrices"` +} + +func (d *DynamicPriceGetterConfig) AddPriceConfig( + tokenAddr string, + aggregatorMap map[common.Address]*contracts.MockAggregator, + price *big.Int, + chainID uint64, +) error { + aggregatorContract, ok := aggregatorMap[common.HexToAddress(tokenAddr)] + if !ok || aggregatorContract == nil { + return d.AddStaticPriceConfig(tokenAddr, chainID, price) + } + return d.AddAggregatorPriceConfig(tokenAddr, aggregatorMap, price) +} + +func (d *DynamicPriceGetterConfig) AddAggregatorPriceConfig( + tokenAddr string, + aggregatorMap map[common.Address]*contracts.MockAggregator, + price *big.Int, +) error { + aggregatorContract, ok := aggregatorMap[common.HexToAddress(tokenAddr)] + if !ok || aggregatorContract == nil { + return fmt.Errorf("aggregator contract not found for token %s", tokenAddr) + } + // update round Data + err := aggregatorContract.UpdateRoundData(price, nil, nil) + if err != nil { + return fmt.Errorf("error in updating round data %w", err) + } + + d.AggregatorPrices[common.HexToAddress(tokenAddr)] = AggregatorPriceConfig{ + ChainID: aggregatorContract.ChainID(), + AggregatorContractAddress: aggregatorContract.ContractAddress, + } + return nil +} + +func (d *DynamicPriceGetterConfig) AddStaticPriceConfig(tokenAddr string, chainID uint64, price *big.Int) error { + d.StaticPrices[common.HexToAddress(tokenAddr)] = StaticPriceConfig{ + ChainID: chainID, + Price: price, + } + return nil +} + +func (d *DynamicPriceGetterConfig) String() (string, error) { + tokenPricesConfigBytes, err := json.MarshalIndent(d, "", " ") + if err != nil { + return "", fmt.Errorf("error in marshalling token prices config %w", err) + } + return string(tokenPricesConfigBytes), nil +} + +// AggregatorPriceConfig specifies a price retrieved from an aggregator contract. +// This should match pricegetter.AggregatorPriceConfig in core/services/ocr2/plugins/ccip/internal/pricegetter +type AggregatorPriceConfig struct { + ChainID uint64 `json:"chainID,string"` + AggregatorContractAddress common.Address `json:"contractAddress"` +} + +// StaticPriceConfig specifies a price defined statically. +// This should match pricegetter.StaticPriceConfig in core/services/ocr2/plugins/ccip/internal/pricegetter +type StaticPriceConfig struct { + ChainID uint64 `json:"chainID,string"` + Price *big.Int `json:"price"` +} + +func NewCCIPCommonFromConfig( + logger *zerolog.Logger, + testGroupConf *testconfig.CCIPTestGroupConfig, + chainClient blockchain.EVMClient, + laneConfig *laneconfig.LaneConfig, +) (*CCIPCommon, error) { + newCCIPModule, err := DefaultCCIPModule(logger, testGroupConf, chainClient) + if err != nil { + return nil, err + } + newCD := newCCIPModule.Deployer + newCCIPModule.LoadContractAddresses(laneConfig, testGroupConf.TokenConfig.NoOfTokensPerChain) + if newCCIPModule.TokenAdminRegistry != nil { + newCCIPModule.TokenAdminRegistry, err = newCD.NewTokenAdminRegistry(common.HexToAddress(newCCIPModule.TokenAdminRegistry.Address())) + if err != nil { + return nil, err + } + } + var arm *contracts.ARM + if newCCIPModule.ARM != nil { + arm, err = newCD.NewARMContract(*newCCIPModule.ARMContract) + if err != nil { + return nil, err + } + newCCIPModule.ARM = arm + } + var pools []*contracts.TokenPool + for i := range newCCIPModule.BridgeTokenPools { + // if there is usdc token, the corresponding pool will always be added as first one in the slice + if newCCIPModule.IsUSDCDeployment() && i == 0 { + pool, err := newCCIPModule.tokenDeployer.NewUSDCTokenPoolContract(common.HexToAddress(newCCIPModule.BridgeTokenPools[i].Address())) + if err != nil { + return nil, err + } + pools = append(pools, pool) + } else { + pool, err := newCCIPModule.tokenDeployer.NewLockReleaseTokenPoolContract(common.HexToAddress(newCCIPModule.BridgeTokenPools[i].Address())) + if err != nil { + return nil, err + } + pools = append(pools, pool) + } + } + newCCIPModule.BridgeTokenPools = pools + var tokens []*contracts.ERC20Token + for i := range newCCIPModule.BridgeTokens { + token, err := newCCIPModule.tokenDeployer.NewERC20TokenContract(common.HexToAddress(newCCIPModule.BridgeTokens[i].Address())) + if err != nil { + return nil, err + } + tokens = append(tokens, token) + } + newCCIPModule.BridgeTokens = tokens + priceAggregators := make(map[common.Address]*contracts.MockAggregator) + for k, v := range newCCIPModule.PriceAggregators { + aggregator, err := newCD.NewMockAggregator(v.ContractAddress) + if err != nil { + return nil, err + } + priceAggregators[k] = aggregator + } + newCCIPModule.PriceAggregators = priceAggregators + newCCIPModule.FeeToken, err = newCCIPModule.Deployer.NewLinkTokenContract(common.HexToAddress(newCCIPModule.FeeToken.Address())) + if err != nil { + return nil, err + } + if newCCIPModule.PriceRegistry != nil { + newCCIPModule.PriceRegistry, err = newCCIPModule.Deployer.NewPriceRegistry(common.HexToAddress(newCCIPModule.PriceRegistry.Address())) + if err != nil { + return nil, err + } + } + newCCIPModule.Router, err = newCCIPModule.Deployer.NewRouter(common.HexToAddress(newCCIPModule.Router.Address())) + if err != nil { + return nil, err + } + if newCCIPModule.TokenTransmitter != nil { + newCCIPModule.TokenTransmitter, err = newCCIPModule.Deployer.NewTokenTransmitter(newCCIPModule.TokenTransmitter.ContractAddress) + if err != nil { + return nil, err + } + } + return newCCIPModule, nil +} + +func DefaultCCIPModule( + logger *zerolog.Logger, + testGroupConf *testconfig.CCIPTestGroupConfig, + chainClient blockchain.EVMClient, +) (*CCIPCommon, error) { + networkCfg := chainClient.GetNetworkConfig() + tokenDeployerChainClient, err := blockchain.ConcurrentEVMClient(*networkCfg, nil, chainClient, *logger) + if err != nil { + return nil, errors.WithStack(fmt.Errorf("failed to create token deployment chain client for %s: %w", networkCfg.Name, err)) + } + // If we want to deploy tokens as a non CCIP owner, we need to set the default wallet to something other than the first one. The first wallet is used as default CCIP owner for all other ccip contract deployment. + // This is not needed for existing deployment as the tokens and pools are already deployed. + if contracts.NeedTokenAdminRegistry() && + !pointer.GetBool(testGroupConf.TokenConfig.CCIPOwnerTokens) && + !pointer.GetBool(testGroupConf.ExistingDeployment) && + len(tokenDeployerChainClient.GetWallets()) > 1 { + if err = tokenDeployerChainClient.SetDefaultWallet(1); err != nil { + return nil, errors.WithStack(fmt.Errorf("failed to set default wallet for token deployment client %s: %w", networkCfg.Name, err)) + } + } + cd, err := contracts.NewCCIPContractsDeployer(logger, chainClient) + if err != nil { + return nil, err + } + tokenCD, err := contracts.NewCCIPContractsDeployer(logger, tokenDeployerChainClient) + if err != nil { + return nil, err + } + return &CCIPCommon{ + Logger: logger, + ChainClient: chainClient, + Deployer: cd, + tokenDeployer: tokenCD, + RateLimiterConfig: contracts.RateLimiterConfig{ + Rate: contracts.FiftyCoins, + Capacity: contracts.HundredCoins, + }, + ExistingDeployment: pointer.GetBool(testGroupConf.ExistingDeployment), + MulticallEnabled: pointer.GetBool(testGroupConf.MulticallInOneTx), + USDCMockDeployment: testGroupConf.USDCMockDeployment, + NoOfTokensNeedingDynamicPrice: pointer.GetInt(testGroupConf.TokenConfig.NoOfTokensWithDynamicPrice), + poolFunds: testhelpers.Link(5), + gasUpdateWatcherMu: &sync.Mutex{}, + gasUpdateWatcher: make(map[uint64]*big.Int), + tokenPriceUpdateWatcherMu: &sync.Mutex{}, + tokenPriceUpdateWatcher: make(map[common.Address]*big.Int), + PriceAggregators: make(map[common.Address]*contracts.MockAggregator), + }, nil +} + +type SourceCCIPModule struct { + Common *CCIPCommon + Sender common.Address + TransferAmount []*big.Int + MsgDataLength int64 + DestinationChainId uint64 + DestChainSelector uint64 + DestNetworkName string + OnRamp *contracts.OnRamp + SrcStartBlock uint64 + CCIPSendRequestedWatcher *sync.Map // map[string]*evm_2_evm_onramp.EVM2EVMOnRampCCIPSendRequested + NewFinalizedBlockNum atomic.Uint64 + NewFinalizedBlockTimestamp atomic.Time +} + +func (sourceCCIP *SourceCCIPModule) PayCCIPFeeToOwnerAddress() error { + isNativeFee := sourceCCIP.Common.FeeToken.EthAddress == common.HexToAddress("0x0") + if isNativeFee { + err := sourceCCIP.OnRamp.WithdrawNonLinkFees(sourceCCIP.Common.WrappedNative) + if err != nil { + return err + } + } else { + err := sourceCCIP.OnRamp.SetNops() + if err != nil { + return err + } + err = sourceCCIP.OnRamp.PayNops() + if err != nil { + return err + } + } + return nil +} + +func (sourceCCIP *SourceCCIPModule) LoadContracts(conf *laneconfig.LaneConfig) { + if conf != nil { + cfg, ok := conf.SrcContracts[sourceCCIP.DestNetworkName] + if ok { + if common.IsHexAddress(cfg.OnRamp) { + sourceCCIP.OnRamp = &contracts.OnRamp{ + EthAddress: common.HexToAddress(cfg.OnRamp), + } + } + if cfg.DeployedAt > 0 { + sourceCCIP.SrcStartBlock = cfg.DeployedAt + } + } + } +} + +// SetAllTokenTransferFeeConfigs sets a default transfer fee config for all BridgeTokens on the CCIP source chain. +// enableAggregateRateLimit is used to enable/disable aggregate rate limit for all BridgeTokens. +func (sourceCCIP *SourceCCIPModule) SetAllTokenTransferFeeConfigs(enableAggregateRateLimit bool) error { + var tokenTransferFeeConfig []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs + var tokens, pools []common.Address + if len(sourceCCIP.Common.BridgeTokens) != len(sourceCCIP.Common.BridgeTokenPools) { + return fmt.Errorf("tokens number %d and pools number %d do not match", len(sourceCCIP.Common.BridgeTokens), len(sourceCCIP.Common.BridgeTokenPools)) + } + for i, token := range sourceCCIP.Common.BridgeTokens { + tokens = append(tokens, token.ContractAddress) + pools = append(pools, sourceCCIP.Common.BridgeTokenPools[i].EthAddress) + conf := evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + Token: token.ContractAddress, + MinFeeUSDCents: 50, // $0.5 + MaxFeeUSDCents: 1_000_000_00, // $ 1 million + DeciBps: 5_0, // 5 bps + AggregateRateLimitEnabled: enableAggregateRateLimit, + } + if sourceCCIP.Common.BridgeTokenPools[i].IsUSDC() { + conf.DestBytesOverhead = defaultUSDCDestBytesOverhead + conf.DestGasOverhead = defaultUSDCDestGasOverhead + } + tokenTransferFeeConfig = append(tokenTransferFeeConfig, conf) + } + err := sourceCCIP.OnRamp.SetTokenTransferFeeConfig(tokenTransferFeeConfig) + if err != nil { + return fmt.Errorf("setting token transfer fee config shouldn't fail %w", err) + } + // this is required for v1.2.0 ramps + err = sourceCCIP.OnRamp.ApplyPoolUpdates(tokens, pools) + if err != nil { + return fmt.Errorf("applying pool updates shouldn't fail %w", err) + } + return nil +} + +// DeployContracts deploys all CCIP contracts specific to the source chain +func (sourceCCIP *SourceCCIPModule) DeployContracts(lane *laneconfig.LaneConfig) error { + var err error + contractDeployer := sourceCCIP.Common.Deployer + log.Info().Msg("Deploying source chain specific contracts") + + sourceCCIP.LoadContracts(lane) + sourceChainSelector, err := chainselectors.SelectorFromChainId(sourceCCIP.Common.ChainClient.GetChainID().Uint64()) + if err != nil { + return fmt.Errorf("getting chain selector shouldn't fail %w", err) + } + + if sourceCCIP.OnRamp == nil { + if sourceCCIP.Common.ExistingDeployment { + return fmt.Errorf("existing deployment is set to true but no onramp address is provided") + } + var tokensAndPools []evm_2_evm_onramp_1_2_0.InternalPoolUpdate + var tokenTransferFeeConfig []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs + + sourceCCIP.SrcStartBlock, err = sourceCCIP.Common.ChainClient.LatestBlockNumber(context.Background()) + if err != nil { + return fmt.Errorf("getting latest block number shouldn't fail %w", err) + } + var tokenAdminReg common.Address + if contracts.NeedTokenAdminRegistry() { + if sourceCCIP.Common.TokenAdminRegistry == nil { + return fmt.Errorf("token admin registry contract address is not provided in lane config") + } + tokenAdminReg = sourceCCIP.Common.TokenAdminRegistry.EthAddress + } + sourceCCIP.OnRamp, err = contractDeployer.DeployOnRamp( + sourceChainSelector, + sourceCCIP.DestChainSelector, + tokensAndPools, + *sourceCCIP.Common.ARMContract, + sourceCCIP.Common.Router.EthAddress, + sourceCCIP.Common.PriceRegistry.EthAddress, + tokenAdminReg, + sourceCCIP.Common.RateLimiterConfig, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: common.HexToAddress(sourceCCIP.Common.FeeToken.Address()), + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: GasFeeMultiplier, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + { + Token: sourceCCIP.Common.WrappedNative, + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: GasFeeMultiplier, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + }, tokenTransferFeeConfig, sourceCCIP.Common.FeeToken.EthAddress) + + if err != nil { + return fmt.Errorf("onRamp deployment shouldn't fail %w", err) + } + + err = sourceCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for onRamp deployment shouldn't fail %w", err) + } + + // update source Router with OnRamp address + err = sourceCCIP.Common.Router.SetOnRamp(sourceCCIP.DestChainSelector, sourceCCIP.OnRamp.EthAddress) + if err != nil { + return fmt.Errorf("setting onramp on the router shouldn't fail %w", err) + } + // now sync the pools and tokens + err := sourceCCIP.SetAllTokenTransferFeeConfigs(true) + if err != nil { + return err + } + } else { + sourceCCIP.OnRamp, err = contractDeployer.NewOnRamp(sourceCCIP.OnRamp.EthAddress) + if err != nil { + return fmt.Errorf("getting new onramp contractshouldn't fail %w", err) + } + } + return nil +} + +func (sourceCCIP *SourceCCIPModule) CollectBalanceRequirements() []testhelpers.BalanceReq { + var balancesReq []testhelpers.BalanceReq + for _, token := range sourceCCIP.Common.BridgeTokens { + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("BridgeToken-%s-Address-%s", token.Address(), sourceCCIP.Sender.Hex()), + Addr: sourceCCIP.Sender, + Getter: GetterForLinkToken(token.BalanceOf, sourceCCIP.Sender.Hex()), + }) + } + for i, pool := range sourceCCIP.Common.BridgeTokenPools { + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("BridgeToken-%s-TokenPool-%s", sourceCCIP.Common.BridgeTokens[i].Address(), pool.Address()), + Addr: pool.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.BridgeTokens[i].BalanceOf, pool.Address()), + }) + } + + if sourceCCIP.Common.FeeToken.Address() != common.HexToAddress("0x0").String() { + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-Address-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Sender.Hex()), + Addr: sourceCCIP.Sender, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Sender.Hex()), + }) + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-Router-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Common.Router.Address()), + Addr: sourceCCIP.Common.Router.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Common.Router.Address()), + }) + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-OnRamp-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.OnRamp.Address()), + Addr: sourceCCIP.OnRamp.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.OnRamp.Address()), + }) + balancesReq = append(balancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-Prices-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Common.PriceRegistry.Address()), + Addr: sourceCCIP.Common.PriceRegistry.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Common.PriceRegistry.Address()), + }) + } + return balancesReq +} + +func (sourceCCIP *SourceCCIPModule) UpdateBalance( + noOfReq int64, + totalFee *big.Int, + balances *BalanceSheet, +) { + if len(sourceCCIP.TransferAmount) > 0 { + for i := range sourceCCIP.TransferAmount { + if sourceCCIP.TransferAmount[i] == nil { // nil transfer amount means no transfer for this token + continue + } + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + token := sourceCCIP.Common.BridgeTokens[0] + if i < len(sourceCCIP.Common.BridgeTokens) { + token = sourceCCIP.Common.BridgeTokens[i] + } + name := fmt.Sprintf("BridgeToken-%s-Address-%s", token.Address(), sourceCCIP.Sender.Hex()) + balances.Update(name, BalanceItem{ + Address: sourceCCIP.Sender, + Getter: GetterForLinkToken(token.BalanceOf, sourceCCIP.Sender.Hex()), + AmtToSub: bigmath.Mul(big.NewInt(noOfReq), sourceCCIP.TransferAmount[i]), + }) + } + for i := range sourceCCIP.TransferAmount { + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + pool := sourceCCIP.Common.BridgeTokenPools[0] + index := 0 + if i < len(sourceCCIP.Common.BridgeTokenPools) { + pool = sourceCCIP.Common.BridgeTokenPools[i] + index = i + } + + name := fmt.Sprintf("BridgeToken-%s-TokenPool-%s", sourceCCIP.Common.BridgeTokens[index].Address(), pool.Address()) + balances.Update(name, BalanceItem{ + Address: pool.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.BridgeTokens[index].BalanceOf, pool.Address()), + AmtToAdd: bigmath.Mul(big.NewInt(noOfReq), sourceCCIP.TransferAmount[i]), + }) + } + } + if sourceCCIP.Common.FeeToken.Address() != common.HexToAddress("0x0").String() { + name := fmt.Sprintf("FeeToken-%s-Address-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Sender.Hex()) + balances.Update(name, BalanceItem{ + Address: sourceCCIP.Sender, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Sender.Hex()), + AmtToSub: totalFee, + }) + name = fmt.Sprintf("FeeToken-%s-Prices-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Common.PriceRegistry.Address()) + balances.Update(name, BalanceItem{ + Address: sourceCCIP.Common.PriceRegistry.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Common.PriceRegistry.Address()), + }) + name = fmt.Sprintf("FeeToken-%s-Router-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.Common.Router.Address()) + balances.Update(name, BalanceItem{ + Address: sourceCCIP.Common.Router.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.Common.Router.Address()), + }) + name = fmt.Sprintf("FeeToken-%s-OnRamp-%s", sourceCCIP.Common.FeeToken.Address(), sourceCCIP.OnRamp.Address()) + balances.Update(name, BalanceItem{ + Address: sourceCCIP.OnRamp.EthAddress, + Getter: GetterForLinkToken(sourceCCIP.Common.FeeToken.BalanceOf, sourceCCIP.OnRamp.Address()), + AmtToAdd: totalFee, + }) + } +} + +func (sourceCCIP *SourceCCIPModule) AssertSendRequestedLogFinalized( + lggr *zerolog.Logger, + txHash common.Hash, + sendReqData []*contracts.SendReqEventData, + prevEventAt time.Time, + reqStats []*testreporters.RequestStat, +) (time.Time, uint64, error) { + if len(sendReqData) != len(reqStats) { + return time.Time{}, 0, fmt.Errorf("sendReqData and reqStats length mismatch") + } + var gasUsed uint64 + receipt, err := sourceCCIP.Common.ChainClient.GetTxReceipt(txHash) + if err == nil { + gasUsed = receipt.GasUsed + } + lggr.Info().Msg("Waiting for CCIPSendRequested event log to be finalized") + finalizedBlockNum, finalizedAt, err := sourceCCIP.Common.ChainClient.WaitForFinalizedTx(txHash) + if err != nil || finalizedBlockNum == nil { + for i, stat := range reqStats { + stat.UpdateState(lggr, stat.SeqNum, testreporters.SourceLogFinalized, time.Since(prevEventAt), testreporters.Failure, &testreporters.TransactionStats{ + MsgID: fmt.Sprintf("0x%x", sendReqData[i].MessageId[:]), + Fee: sendReqData[i].Fee.String(), + NoOfTokensSent: sendReqData[i].NoOfTokens, + MessageBytesLength: int64(sendReqData[i].DataLength), + TxHash: txHash.Hex(), + }) + } + return time.Time{}, 0, fmt.Errorf("error waiting for CCIPSendRequested event log to be finalized - %w", err) + } + for i, stat := range reqStats { + stat.UpdateState(lggr, stat.SeqNum, testreporters.SourceLogFinalized, finalizedAt.Sub(prevEventAt), testreporters.Success, + &testreporters.TransactionStats{ + MsgID: fmt.Sprintf("0x%x", sendReqData[i].MessageId[:]), + Fee: sendReqData[i].Fee.String(), + GasUsed: gasUsed, + NoOfTokensSent: sendReqData[i].NoOfTokens, + MessageBytesLength: int64(sendReqData[i].DataLength), + TxHash: txHash.Hex(), + FinalizedByBlock: finalizedBlockNum.String(), + FinalizedAt: finalizedAt.String(), + }) + } + return finalizedAt, finalizedBlockNum.Uint64(), nil +} + +func (sourceCCIP *SourceCCIPModule) IsRequestTriggeredWithinTimeframe(timeframe *commonconfig.Duration) *time.Time { + if timeframe == nil { + return nil + } + var foundAt *time.Time + lastSeenTimestamp := time.Now().UTC().Add(-timeframe.Duration()) + sourceCCIP.CCIPSendRequestedWatcher.Range(func(_, value any) bool { + if sendRequestedEvents, exists := value.([]*evm_2_evm_onramp.EVM2EVMOnRampCCIPSendRequested); exists { + for _, sendRequestedEvent := range sendRequestedEvents { + raw := sendRequestedEvent.Raw + hdr, err := sourceCCIP.Common.ChainClient.HeaderByNumber(context.Background(), big.NewInt(int64(raw.BlockNumber))) + if err == nil { + if hdr.Timestamp.After(lastSeenTimestamp) { + foundAt = pointer.ToTime(hdr.Timestamp) + return false + } + } + } + } + return true + }) + return foundAt +} + +func (sourceCCIP *SourceCCIPModule) AssertEventCCIPSendRequested( + lggr *zerolog.Logger, + txHash string, + timeout time.Duration, + prevEventAt time.Time, + reqStat []*testreporters.RequestStat, +) ([]*contracts.SendReqEventData, time.Time, error) { + lggr.Info().Str("Timeout", timeout.String()).Msg("Waiting for CCIPSendRequested event") + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + timer := time.NewTimer(timeout) + defer timer.Stop() + resetTimer := 0 + for { + select { + case <-ticker.C: + value, ok := sourceCCIP.CCIPSendRequestedWatcher.Load(txHash) + if ok { + // if sendrequested events are found, check if the number of events are same as the number of requests + if sendRequestedEvents, exists := value.([]*contracts.SendReqEventData); exists && len(sendRequestedEvents) == len(reqStat) { + // if the value is processed, delete it from the map + sourceCCIP.CCIPSendRequestedWatcher.Delete(txHash) + for i, sendRequestedEvent := range sendRequestedEvents { + seqNum := sendRequestedEvent.SequenceNumber + lggr = ptr.Ptr(lggr.With(). + Uint64("SequenceNumber", seqNum). + Str("MsgID", fmt.Sprintf("0x%x", sendRequestedEvent.MessageId[:])). + Logger()) + // prevEventAt is the time when the message was successful, this should be same as the time when the event was emitted + reqStat[i].UpdateState(lggr, seqNum, testreporters.CCIPSendRe, 0, testreporters.Success, nil) + } + var err error + if len(sendRequestedEvents) == 0 { + err = fmt.Errorf("message logs not found, no CCIPSendRequested event found for tx %s", txHash) + } + return sendRequestedEvents, prevEventAt, err + } + } + case <-timer.C: + // if there is connection issue reset the timer : + if sourceCCIP.Common.IsConnectionRestoredRecently != nil && !sourceCCIP.Common.IsConnectionRestoredRecently.Load() { + if resetTimer > 2 { + for _, stat := range reqStat { + stat.UpdateState(lggr, 0, testreporters.CCIPSendRe, time.Since(prevEventAt), testreporters.Failure, + &testreporters.TransactionStats{ + TxHash: txHash, + }) + } + return nil, time.Now(), fmt.Errorf("possible RPC issue - CCIPSendRequested event is not found for tx %s", txHash) + } + resetTimer++ + timer.Reset(timeout) + lggr.Info().Int("count of reset", resetTimer).Msg("Resetting timer to validate CCIPSendRequested event") + continue + } + for _, stat := range reqStat { + stat.UpdateState(lggr, 0, testreporters.CCIPSendRe, time.Since(prevEventAt), testreporters.Failure, + &testreporters.TransactionStats{ + TxHash: txHash, + }) + } + return nil, time.Now(), fmt.Errorf("CCIPSendRequested event is not found for tx %s", txHash) + } + } +} + +// CCIPMsg constructs the message for a CCIP request +func (sourceCCIP *SourceCCIPModule) CCIPMsg( + receiver common.Address, + gasLimit *big.Int, +) (router.ClientEVM2AnyMessage, error) { + length := sourceCCIP.MsgDataLength + var data string + if length > 0 { + b := make([]byte, length) + _, err := crypto_rand.Read(b) + if err != nil { + return router.ClientEVM2AnyMessage{}, fmt.Errorf("failed generating random string: %w", err) + } + randomString := base64.URLEncoding.EncodeToString(b) + data = randomString[:length] + } + + tokenAndAmounts := []router.ClientEVMTokenAmount{} + for i, amount := range sourceCCIP.TransferAmount { + if amount == nil { // make nil transfer amount 0 to avoid panics + sourceCCIP.TransferAmount[i] = big.NewInt(0) + } + token := sourceCCIP.Common.BridgeTokens[0] + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + if i < len(sourceCCIP.Common.BridgeTokens) { + token = sourceCCIP.Common.BridgeTokens[i] + } + if amount == nil || amount.Cmp(big.NewInt(0)) == 0 { + log.Warn(). + Str("Token Address", token.Address()). + Int("Token Index", i). + Msg("Not sending a request for token transfer as the amount is 0 or nil") + continue + } + tokenAndAmounts = append(tokenAndAmounts, router.ClientEVMTokenAmount{ + Token: common.HexToAddress(token.Address()), Amount: amount, + }) + } + + receiverAddr, err := utils.ABIEncode(`[{"type":"address"}]`, receiver) + if err != nil { + return router.ClientEVM2AnyMessage{}, fmt.Errorf("failed encoding the receiver address: %w", err) + } + + extraArgsV1, err := testhelpers.GetEVMExtraArgsV1(gasLimit, false) + if err != nil { + return router.ClientEVM2AnyMessage{}, fmt.Errorf("failed encoding the options field: %w", err) + } + // form the message for transfer + return router.ClientEVM2AnyMessage{ + Receiver: receiverAddr, + Data: []byte(data), + TokenAmounts: tokenAndAmounts, + FeeToken: common.HexToAddress(sourceCCIP.Common.FeeToken.Address()), + ExtraArgs: extraArgsV1, + }, nil +} + +// SendRequest sends a CCIP request to the source chain's router contract +func (sourceCCIP *SourceCCIPModule) SendRequest( + receiver common.Address, + gasLimit *big.Int, +) (common.Hash, time.Duration, *big.Int, error) { + var d time.Duration + destChainSelector, err := chainselectors.SelectorFromChainId(sourceCCIP.DestinationChainId) + if err != nil { + return common.Hash{}, d, nil, fmt.Errorf("failed getting the chain selector: %w", err) + } + // form the message for transfer + msg, err := sourceCCIP.CCIPMsg(receiver, gasLimit) + if err != nil { + return common.Hash{}, d, nil, fmt.Errorf("failed forming the ccip msg: %w", err) + } + + fee, err := sourceCCIP.Common.Router.GetFee(destChainSelector, msg) + if err != nil { + log.Info().Interface("Msg", msg).Msg("CCIP msg") + reason, _ := blockchain.RPCErrorFromError(err) + if reason != "" { + return common.Hash{}, d, nil, fmt.Errorf("failed getting the fee: %s", reason) + } + return common.Hash{}, d, nil, fmt.Errorf("failed getting the fee: %w", err) + } + log.Info().Str("Fee", fee.String()).Msg("Calculated fee") + + var sendTx *types.Transaction + timeNow := time.Now() + feeToken := common.HexToAddress(sourceCCIP.Common.FeeToken.Address()) + // initiate the transfer + // if the fee token address is 0x0 it will use Native as fee token and the fee amount should be mentioned in bind.TransactOpts's value + if feeToken != (common.Address{}) { + sendTx, err = sourceCCIP.Common.Router.CCIPSendAndProcessTx(destChainSelector, msg, nil) + if err != nil { + txHash := common.Hash{} + if sendTx != nil { + txHash = sendTx.Hash() + } + return txHash, time.Since(timeNow), nil, fmt.Errorf("failed initiating the transfer ccip-send: %w", err) + } + } else { + sendTx, err = sourceCCIP.Common.Router.CCIPSendAndProcessTx(destChainSelector, msg, fee) + if err != nil { + txHash := common.Hash{} + if sendTx != nil { + txHash = sendTx.Hash() + } + return txHash, time.Since(timeNow), nil, fmt.Errorf("failed initiating the transfer ccip-send: %w", err) + } + } + + log.Info(). + Str("Network", sourceCCIP.Common.ChainClient.GetNetworkName()). + Str("Send token transaction", sendTx.Hash().String()). + Str("lane", fmt.Sprintf("%s-->%s", sourceCCIP.Common.ChainClient.GetNetworkName(), sourceCCIP.DestNetworkName)). + Msg("Sending token") + return sendTx.Hash(), time.Since(timeNow), fee, nil +} + +func DefaultSourceCCIPModule( + logger *zerolog.Logger, + testConf *testconfig.CCIPTestGroupConfig, + chainClient blockchain.EVMClient, + destChainId uint64, + destChain string, + laneConf *laneconfig.LaneConfig, +) (*SourceCCIPModule, error) { + cmn, err := NewCCIPCommonFromConfig( + logger, testConf, chainClient, laneConf, + ) + if err != nil { + return nil, err + } + + destChainSelector, err := chainselectors.SelectorFromChainId(destChainId) + if err != nil { + return nil, fmt.Errorf("failed getting the chain selector: %w", err) + } + source := &SourceCCIPModule{ + Common: cmn, + TransferAmount: testConf.MsgDetails.TransferAmounts(), + MsgDataLength: pointer.GetInt64(testConf.MsgDetails.DataLength), + DestinationChainId: destChainId, + DestChainSelector: destChainSelector, + DestNetworkName: destChain, + Sender: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + CCIPSendRequestedWatcher: &sync.Map{}, + } + + return source, nil +} + +type DestCCIPModule struct { + Common *CCIPCommon + SourceChainId uint64 + SourceChainSelector uint64 + SourceNetworkName string + CommitStore *contracts.CommitStore + ReceiverDapp *contracts.ReceiverDapp + OffRamp *contracts.OffRamp + ReportAcceptedWatcher *sync.Map + ExecStateChangedWatcher *sync.Map + ReportBlessedWatcher *sync.Map + ReportBlessedBySeqNum *sync.Map + NextSeqNumToCommit *atomic.Uint64 + DestStartBlock uint64 +} + +func (destCCIP *DestCCIPModule) LoadContracts(conf *laneconfig.LaneConfig) { + if conf != nil { + cfg, ok := conf.DestContracts[destCCIP.SourceNetworkName] + if ok { + if common.IsHexAddress(cfg.OffRamp) { + destCCIP.OffRamp = &contracts.OffRamp{ + EthAddress: common.HexToAddress(cfg.OffRamp), + } + } + if common.IsHexAddress(cfg.CommitStore) { + destCCIP.CommitStore = &contracts.CommitStore{ + EthAddress: common.HexToAddress(cfg.CommitStore), + } + } + if common.IsHexAddress(cfg.ReceiverDapp) { + destCCIP.ReceiverDapp = &contracts.ReceiverDapp{ + EthAddress: common.HexToAddress(cfg.ReceiverDapp), + } + } + } + } +} + +func (destCCIP *DestCCIPModule) SyncTokensAndPools(srcTokens []*contracts.ERC20Token) error { + if destCCIP.OffRamp.Instance.V1_2_0 == nil { + return nil + } + var sourceTokens, pools []common.Address + + for _, token := range srcTokens { + sourceTokens = append(sourceTokens, common.HexToAddress(token.Address())) + } + + for i := range destCCIP.Common.BridgeTokenPools { + pools = append(pools, destCCIP.Common.BridgeTokenPools[i].EthAddress) + } + if len(sourceTokens) != len(pools) { + return fmt.Errorf("source token and destination pool length mismatch") + } + // if number of tokens are more than 10, then we need to split the tokens in batch of 10 and call sync + // otherwise the tx gets too large and we will get out of gas error + if len(sourceTokens) > 10 { + for i := 0; i < len(sourceTokens); i += 10 { + end := i + 10 + if end > len(sourceTokens) { + end = len(sourceTokens) + } + err := destCCIP.OffRamp.SyncTokensAndPools(sourceTokens[i:end], pools[i:end]) + if err != nil { + return err + } + } + return nil + } + return destCCIP.OffRamp.SyncTokensAndPools(sourceTokens, pools) +} + +// AddRateLimitTokens adds token pairs to the OffRamp's rate limiting +func (destCCIP *DestCCIPModule) AddRateLimitTokens(srcTokens, destTokens []*contracts.ERC20Token) error { + if destCCIP.OffRamp.Instance.Latest == nil { + return nil + } + if srcTokens == nil || destTokens == nil { + return fmt.Errorf("source or destination tokens are nil") + } + + if len(srcTokens) != len(destTokens) { + return fmt.Errorf("source and destination token length mismatch") + } + + var sourceTokenAddresses, destTokenAddresses []common.Address + + for i, token := range srcTokens { + sourceTokenAddresses = append(sourceTokenAddresses, common.HexToAddress(token.Address())) + destTokenAddresses = append(destTokenAddresses, common.HexToAddress(destTokens[i].Address())) + } + + // if number of tokens are more than 10, then we need to split the tokens in batch of 10 and update the rate limit + // otherwise the tx gets too large and we will get out of gas error + if len(sourceTokenAddresses) > 10 { + for i := 0; i < len(sourceTokenAddresses); i += 10 { + end := i + 10 + if end > len(sourceTokenAddresses) { + end = len(sourceTokenAddresses) + } + err := destCCIP.OffRamp.AddRateLimitTokens(sourceTokenAddresses[i:end], destTokenAddresses[i:end]) + if err != nil { + return err + } + } + return nil + } + + return destCCIP.OffRamp.AddRateLimitTokens(sourceTokenAddresses, destTokenAddresses) +} + +// RemoveRateLimitTokens removes token pairs from the OffRamp's rate limiting. +// If you ask to remove a token pair that doesn't exist, it will return an error. +func (destCCIP *DestCCIPModule) RemoveRateLimitTokens(ctx context.Context, srcTokens, destTokens []*contracts.ERC20Token) error { + if srcTokens == nil || destTokens == nil { + return fmt.Errorf("source or destination tokens are nil") + } + + if len(srcTokens) != len(destTokens) { + return fmt.Errorf("source and destination token length mismatch") + } + + var sourceTokenAddresses, destTokenAddresses []common.Address + + for i, token := range srcTokens { + sourceTokenAddresses = append(sourceTokenAddresses, common.HexToAddress(token.Address())) + destTokenAddresses = append(destTokenAddresses, common.HexToAddress(destTokens[i].Address())) + } + + return destCCIP.OffRamp.RemoveRateLimitTokens(ctx, sourceTokenAddresses, destTokenAddresses) +} + +// RemoveAllRateLimitTokens removes all token pairs from the OffRamp's rate limiting. +func (destCCIP *DestCCIPModule) RemoveAllRateLimitTokens(ctx context.Context) error { + return destCCIP.OffRamp.RemoveAllRateLimitTokens(ctx) +} + +// DeployContracts deploys all CCIP contracts specific to the destination chain +func (destCCIP *DestCCIPModule) DeployContracts( + sourceCCIP SourceCCIPModule, + lane *laneconfig.LaneConfig, +) error { + var err error + contractDeployer := destCCIP.Common.Deployer + log.Info().Msg("Deploying destination chain specific contracts") + destCCIP.LoadContracts(lane) + destChainSelector, err := chainselectors.SelectorFromChainId(destCCIP.Common.ChainClient.GetChainID().Uint64()) + if err != nil { + return fmt.Errorf("failed to get chain selector for destination chain id %d: %w", destCCIP.Common.ChainClient.GetChainID().Uint64(), err) + } + destCCIP.DestStartBlock, err = destCCIP.Common.ChainClient.LatestBlockNumber(context.Background()) + if err != nil { + return fmt.Errorf("getting latest block number shouldn't fail %w", err) + } + if !destCCIP.Common.ExistingDeployment && len(sourceCCIP.Common.BridgeTokenPools) != len(destCCIP.Common.BridgeTokenPools) { + return fmt.Errorf("source and destination token pool number does not match") + } + + if destCCIP.CommitStore == nil { + if destCCIP.Common.ExistingDeployment { + return fmt.Errorf("commit store address not provided in lane config") + } + // commitStore responsible for validating the transfer message + destCCIP.CommitStore, err = contractDeployer.DeployCommitStore( + destCCIP.SourceChainSelector, + destChainSelector, + sourceCCIP.OnRamp.EthAddress, + *destCCIP.Common.ARMContract, + ) + if err != nil { + return fmt.Errorf("deploying commitstore shouldn't fail %w", err) + } + err = destCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for commitstore deployment shouldn't fail %w", err) + } + + // CommitStore can update + err = destCCIP.Common.PriceRegistry.AddPriceUpdater(destCCIP.CommitStore.EthAddress) + if err != nil { + return fmt.Errorf("setting commitstore as fee updater shouldn't fail %w", err) + } + err = destCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for setting commitstore as fee updater shouldn't fail %w", err) + } + } else { + destCCIP.CommitStore, err = contractDeployer.NewCommitStore(destCCIP.CommitStore.EthAddress) + if err != nil { + return fmt.Errorf("getting new commitstore shouldn't fail %w", err) + } + } + + if destCCIP.OffRamp == nil { + if destCCIP.Common.ExistingDeployment { + return fmt.Errorf("offramp address not provided in lane config") + } + var tokenAdminReg common.Address + if contracts.NeedTokenAdminRegistry() { + if destCCIP.Common.TokenAdminRegistry == nil { + return fmt.Errorf("token admin registry contract address is not provided in lane config") + } + tokenAdminReg = destCCIP.Common.TokenAdminRegistry.EthAddress + } + destCCIP.OffRamp, err = contractDeployer.DeployOffRamp( + destCCIP.SourceChainSelector, + destChainSelector, + destCCIP.CommitStore.EthAddress, + sourceCCIP.OnRamp.EthAddress, + destCCIP.Common.RateLimiterConfig, + []common.Address{}, + []common.Address{}, + *destCCIP.Common.ARMContract, + tokenAdminReg, + ) + if err != nil { + return fmt.Errorf("deploying offramp shouldn't fail %w", err) + } + err = destCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for offramp deployment shouldn't fail %w", err) + } + + // apply offramp updates + _, err = destCCIP.Common.Router.AddOffRamp(destCCIP.OffRamp.EthAddress, destCCIP.SourceChainSelector) + if err != nil { + return fmt.Errorf("setting offramp as fee updater shouldn't fail %w", err) + } + + err = destCCIP.AddRateLimitTokens(sourceCCIP.Common.BridgeTokens, destCCIP.Common.BridgeTokens) + if err != nil { + return fmt.Errorf("setting rate limited tokens shouldn't fail %w", err) + } + err = destCCIP.SyncTokensAndPools(sourceCCIP.Common.BridgeTokens) + if err != nil { + return fmt.Errorf("syncing tokens and pools shouldn't fail %w", err) + } + err = destCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for events on destination contract shouldn't fail %w", err) + } + } else { + destCCIP.OffRamp, err = contractDeployer.NewOffRamp(destCCIP.OffRamp.EthAddress) + if err != nil { + return fmt.Errorf("getting new offramp shouldn't fail %w", err) + } + } + if destCCIP.ReceiverDapp == nil { + // ReceiverDapp + destCCIP.ReceiverDapp, err = contractDeployer.DeployReceiverDapp(false) + if err != nil { + return fmt.Errorf("receiverDapp contract should be deployed successfully %w", err) + } + err = destCCIP.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("waiting for events on destination contract deployments %w", err) + } + } else { + destCCIP.ReceiverDapp, err = contractDeployer.NewReceiverDapp(destCCIP.ReceiverDapp.EthAddress) + if err != nil { + return fmt.Errorf("getting new receiverDapp shouldn't fail %w", err) + } + } + return nil +} + +func (destCCIP *DestCCIPModule) CollectBalanceRequirements() []testhelpers.BalanceReq { + var destBalancesReq []testhelpers.BalanceReq + for _, token := range destCCIP.Common.BridgeTokens { + destBalancesReq = append(destBalancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("BridgeToken-%s-Address-%s", token.Address(), destCCIP.ReceiverDapp.Address()), + Addr: destCCIP.ReceiverDapp.EthAddress, + Getter: GetterForLinkToken(token.BalanceOf, destCCIP.ReceiverDapp.Address()), + }) + } + for i, pool := range destCCIP.Common.BridgeTokenPools { + destBalancesReq = append(destBalancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("BridgeToken-%s-TokenPool-%s", destCCIP.Common.BridgeTokens[i].Address(), pool.Address()), + Addr: pool.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.BridgeTokens[i].BalanceOf, pool.Address()), + }) + } + if destCCIP.Common.FeeToken.Address() != common.HexToAddress("0x0").String() { + destBalancesReq = append(destBalancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-Address-%s", destCCIP.Common.FeeToken.Address(), destCCIP.ReceiverDapp.Address()), + Addr: destCCIP.ReceiverDapp.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.FeeToken.BalanceOf, destCCIP.ReceiverDapp.Address()), + }) + destBalancesReq = append(destBalancesReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("FeeToken-%s-OffRamp-%s", destCCIP.Common.FeeToken.Address(), destCCIP.OffRamp.Address()), + Addr: destCCIP.OffRamp.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.FeeToken.BalanceOf, destCCIP.OffRamp.Address()), + }) + } + return destBalancesReq +} + +func (destCCIP *DestCCIPModule) UpdateBalance( + transferAmount []*big.Int, + noOfReq int64, + balance *BalanceSheet, +) { + if len(transferAmount) > 0 { + for i := range transferAmount { + token := destCCIP.Common.BridgeTokens[0] + if i < len(destCCIP.Common.BridgeTokens) { + token = destCCIP.Common.BridgeTokens[i] + } + name := fmt.Sprintf("BridgeToken-%s-Address-%s", token.Address(), destCCIP.ReceiverDapp.Address()) + balance.Update(name, BalanceItem{ + Address: destCCIP.ReceiverDapp.EthAddress, + Getter: GetterForLinkToken(token.BalanceOf, destCCIP.ReceiverDapp.Address()), + AmtToAdd: bigmath.Mul(big.NewInt(noOfReq), transferAmount[i]), + }) + } + for i := range transferAmount { + pool := destCCIP.Common.BridgeTokenPools[0] + index := 0 + if i < len(destCCIP.Common.BridgeTokenPools) { + pool = destCCIP.Common.BridgeTokenPools[i] + index = i + } + name := fmt.Sprintf("BridgeToken-%s-TokenPool-%s", destCCIP.Common.BridgeTokens[index].Address(), pool.Address()) + balance.Update(name, BalanceItem{ + Address: pool.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.BridgeTokens[index].BalanceOf, pool.Address()), + AmtToSub: bigmath.Mul(big.NewInt(noOfReq), transferAmount[i]), + }) + } + } + if destCCIP.Common.FeeToken.Address() != common.HexToAddress("0x0").String() { + name := fmt.Sprintf("FeeToken-%s-OffRamp-%s", destCCIP.Common.FeeToken.Address(), destCCIP.OffRamp.Address()) + balance.Update(name, BalanceItem{ + Address: destCCIP.OffRamp.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.FeeToken.BalanceOf, destCCIP.OffRamp.Address()), + }) + + name = fmt.Sprintf("FeeToken-%s-Address-%s", destCCIP.Common.FeeToken.Address(), destCCIP.ReceiverDapp.Address()) + balance.Update(name, BalanceItem{ + Address: destCCIP.ReceiverDapp.EthAddress, + Getter: GetterForLinkToken(destCCIP.Common.FeeToken.BalanceOf, destCCIP.ReceiverDapp.Address()), + }) + } +} + +// AssertNoReportAcceptedEventReceived validates that no ExecutionStateChangedEvent is emitted for mentioned timeRange after lastSeenTimestamp +func (destCCIP *DestCCIPModule) AssertNoReportAcceptedEventReceived(lggr *zerolog.Logger, timeRange time.Duration, lastSeenTimestamp time.Time) error { + ctx, cancel := context.WithTimeout(context.Background(), timeRange) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + var eventFoundAfterCursing *time.Time + // verify if CommitReportAccepted is received, it's not generated after provided lastSeenTimestamp + destCCIP.ReportAcceptedWatcher.Range(func(_, value any) bool { + e, exists := value.(*evm_2_evm_offramp.EVM2EVMOffRampExecutionStateChanged) + if exists { + vLogs := e.Raw + hdr, err := destCCIP.Common.ChainClient.HeaderByNumber(ctx, big.NewInt(int64(vLogs.BlockNumber))) + if err != nil { + return true + } + if hdr.Timestamp.After(lastSeenTimestamp) { + eventFoundAfterCursing = pointer.ToTime(hdr.Timestamp) + return false + } + } + return true + }) + if eventFoundAfterCursing != nil { + return fmt.Errorf("CommitReportAccepted Event detected at %s after %s", lastSeenTimestamp, eventFoundAfterCursing.String()) + } + case <-ctx.Done(): + lggr.Info().Msgf("successfully validated that no CommitReportAccepted detected after %s for %s", lastSeenTimestamp, timeRange) + return nil + } + } +} + +// AssertNoExecutionStateChangedEventReceived validates that no ExecutionStateChangedEvent is emitted for mentioned timeRange after lastSeenTimestamp +func (destCCIP *DestCCIPModule) AssertNoExecutionStateChangedEventReceived( + lggr *zerolog.Logger, + timeRange time.Duration, + lastSeenTimestamp time.Time, +) error { + ctx, cancel := context.WithTimeout(context.Background(), timeRange) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + lggr.Info().Str("Wait Time", timeRange.String()).Time("Since", lastSeenTimestamp).Msg("Waiting to ensure no ExecutionStateChanged event") + for { + select { + case <-ticker.C: + var eventFoundAfterCursing *time.Time + // verify if ExecutionStateChanged is received, it's not generated after provided lastSeenTimestamp + destCCIP.ExecStateChangedWatcher.Range(func(_, value any) bool { + e, exists := value.(*contracts.EVM2EVMOffRampExecutionStateChanged) + if exists { + vLogs := e.LogInfo + hdr, err := destCCIP.Common.ChainClient.HeaderByNumber(ctx, big.NewInt(int64(vLogs.BlockNumber))) + if err != nil { + return true + } + if hdr.Timestamp.After(lastSeenTimestamp) { + eventFoundAfterCursing = pointer.ToTime(hdr.Timestamp) + return false + } + } + return true + }) + if eventFoundAfterCursing != nil { + return fmt.Errorf("ExecutionStateChanged Event detected at %s after %s", lastSeenTimestamp, eventFoundAfterCursing.String()) + } + case <-ctx.Done(): + lggr.Info().Msgf("Successfully validated that no ExecutionStateChanged detected after %s for %s", lastSeenTimestamp, timeRange) + return nil + } + } +} + +func (destCCIP *DestCCIPModule) AssertEventExecutionStateChanged( + lggr *zerolog.Logger, + seqNum uint64, + timeout time.Duration, + timeNow time.Time, + reqStat *testreporters.RequestStat, + execState testhelpers.MessageExecutionState, +) (uint8, error) { + lggr.Info().Int64("seqNum", int64(seqNum)).Str("Timeout", timeout.String()).Msg("Waiting for ExecutionStateChanged event") + timer := time.NewTimer(timeout) + defer timer.Stop() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + resetTimer := 0 + for { + select { + case <-ticker.C: + value, ok := destCCIP.ExecStateChangedWatcher.Load(seqNum) + if ok && value != nil { + e, exists := value.(*contracts.EVM2EVMOffRampExecutionStateChanged) + // find the type of the value + if exists { + // if the value is processed, delete it from the map + destCCIP.ExecStateChangedWatcher.Delete(seqNum) + vLogs := e.LogInfo + receivedAt := time.Now().UTC() + hdr, err := destCCIP.Common.ChainClient.HeaderByNumber(context.Background(), big.NewInt(int64(vLogs.BlockNumber))) + if err == nil { + receivedAt = hdr.Timestamp + } + receipt, err := destCCIP.Common.ChainClient.GetTxReceipt(vLogs.TxHash) + if err != nil { + lggr.Warn().Msg("Failed to get receipt for ExecStateChanged event") + } + var gasUsed uint64 + if receipt != nil { + gasUsed = receipt.GasUsed + } + if testhelpers.MessageExecutionState(e.State) == execState { + lggr.Info().Int64("seqNum", int64(seqNum)).Uint8("ExecutionState", e.State).Msg("ExecutionStateChanged event received") + reqStat.UpdateState(lggr, seqNum, testreporters.ExecStateChanged, receivedAt.Sub(timeNow), + testreporters.Success, + &testreporters.TransactionStats{ + TxHash: vLogs.TxHash.Hex(), + MsgID: fmt.Sprintf("0x%x", e.MessageId[:]), + GasUsed: gasUsed, + }, + ) + return e.State, nil + } + reqStat.UpdateState(lggr, seqNum, testreporters.ExecStateChanged, time.Since(timeNow), testreporters.Failure, nil) + return e.State, fmt.Errorf("ExecutionStateChanged event state - expected %d actual - %d with data %x for seq num %v for lane %d-->%d", + execState, testhelpers.MessageExecutionState(e.State), e.ReturnData, seqNum, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + } + case <-timer.C: + // if there is connection issue reset the context : + if destCCIP.Common.IsConnectionRestoredRecently != nil && !destCCIP.Common.IsConnectionRestoredRecently.Load() { + // if timer already has been reset 2 times we fail with warning + if resetTimer > 2 { + reqStat.UpdateState(lggr, seqNum, testreporters.ExecStateChanged, time.Since(timeNow), testreporters.Failure, nil) + return 0, fmt.Errorf("possible RPC issues - ExecutionStateChanged event not found for seq num %d for lane %d-->%d", + seqNum, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + timer.Reset(timeout) + resetTimer++ + lggr.Info().Int("count of reset", resetTimer).Msg("Resetting timer to validate ExecutionStateChanged event") + continue + } + reqStat.UpdateState(lggr, seqNum, testreporters.ExecStateChanged, time.Since(timeNow), testreporters.Failure, nil) + return 0, fmt.Errorf("ExecutionStateChanged event not found for seq num %d for lane %d-->%d", + seqNum, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + } +} + +func (destCCIP *DestCCIPModule) AssertEventReportAccepted( + lggr *zerolog.Logger, + seqNum uint64, + timeout time.Duration, + prevEventAt time.Time, + reqStat *testreporters.RequestStat, +) (*contracts.CommitStoreReportAccepted, time.Time, error) { + lggr.Info().Int64("seqNum", int64(seqNum)).Str("Timeout", timeout.String()).Msg("Waiting for ReportAccepted event") + timer := time.NewTimer(timeout) + defer timer.Stop() + resetTimerCount := 0 + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + value, ok := destCCIP.ReportAcceptedWatcher.Load(seqNum) + if ok && value != nil { + reportAccepted, exists := value.(*contracts.CommitStoreReportAccepted) + if exists { + // if the value is processed, delete it from the map + destCCIP.ReportAcceptedWatcher.Delete(seqNum) + receivedAt := time.Now().UTC() + hdr, err := destCCIP.Common.ChainClient.HeaderByNumber(context.Background(), big.NewInt(int64(reportAccepted.LogInfo.BlockNumber))) + if err == nil { + receivedAt = hdr.Timestamp + } + + totalTime := receivedAt.Sub(prevEventAt) + // we cannot calculate the exact time at which block was finalized + // as a result sometimes we get a time which is slightly after the block was marked as finalized + // in such cases we get a negative time difference between finalized and report accepted if the commit + // has happened almost immediately after block being finalized + // in such cases we set the time difference to 1 second + if totalTime < 0 { + lggr.Warn(). + Uint64("seqNum", seqNum). + Time("finalized at", prevEventAt). + Time("ReportAccepted at", receivedAt). + Msg("ReportAccepted event received before finalized timestamp") + totalTime = time.Second + } + receipt, err := destCCIP.Common.ChainClient.GetTxReceipt(reportAccepted.LogInfo.TxHash) + if err != nil { + lggr.Warn().Msg("Failed to get receipt for ReportAccepted event") + } + var gasUsed uint64 + if receipt != nil { + gasUsed = receipt.GasUsed + } + reqStat.UpdateState(lggr, seqNum, testreporters.Commit, totalTime, testreporters.Success, + &testreporters.TransactionStats{ + GasUsed: gasUsed, + TxHash: reportAccepted.LogInfo.TxHash.Hex(), + CommitRoot: fmt.Sprintf("%x", reportAccepted.MerkleRoot), + }) + return reportAccepted, receivedAt, nil + } + } + case <-timer.C: + // if there is connection issue reset the context : + if destCCIP.Common.IsConnectionRestoredRecently != nil && !destCCIP.Common.IsConnectionRestoredRecently.Load() { + if resetTimerCount > 2 { + reqStat.UpdateState(lggr, seqNum, testreporters.Commit, time.Since(prevEventAt), testreporters.Failure, nil) + return nil, time.Now().UTC(), fmt.Errorf("possible RPC issue - ReportAccepted is not found for seq num %d lane %d-->%d", + seqNum, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + timer.Reset(timeout) + resetTimerCount++ + lggr.Info().Int("count of reset", resetTimerCount).Msg("Resetting timer to validate ReportAccepted event") + continue + } + reqStat.UpdateState(lggr, seqNum, testreporters.Commit, time.Since(prevEventAt), testreporters.Failure, nil) + return nil, time.Now().UTC(), fmt.Errorf("ReportAccepted is not found for seq num %d lane %d-->%d", + seqNum, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + } +} + +func (destCCIP *DestCCIPModule) AssertReportBlessed( + lggr *zerolog.Logger, + seqNum uint64, + timeout time.Duration, + CommitReport contracts.CommitStoreReportAccepted, + prevEventAt time.Time, + reqStat *testreporters.RequestStat, +) (time.Time, error) { + if destCCIP.Common.ARM == nil { + lggr.Info(). + Uint64("commit store interval Min", CommitReport.Min). + Uint64("commit store interval Max", CommitReport.Max). + Hex("Root", CommitReport.MerkleRoot[:]). + Msg("Skipping ReportBlessed check for mock ARM") + return prevEventAt, nil + } + lggr.Info(). + Str("Timeout", timeout.String()). + Uint64("commit store interval Min", CommitReport.Min). + Uint64("commit store interval Max", CommitReport.Max). + Msg("Waiting for Report To be blessed") + timer := time.NewTimer(timeout) + defer timer.Stop() + resetTimerCount := 0 + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + var value any + var foundAsRoot, ok bool + value, foundAsRoot = destCCIP.ReportBlessedWatcher.Load(CommitReport.MerkleRoot) + receivedAt := time.Now().UTC() + ok = foundAsRoot + if !foundAsRoot { + // if the value is not found as root, check if it is found as sequence number + value, ok = destCCIP.ReportBlessedBySeqNum.Load(seqNum) + } + if ok && value != nil { + vLogs, exists := value.(*contracts.LogInfo) + if exists { + // if the root is found, set the value for all the sequence numbers in the interval and delete the root from the map + if foundAsRoot { + // set the value for all the sequence numbers in the interval + for i := CommitReport.Min; i <= CommitReport.Max; i++ { + destCCIP.ReportBlessedBySeqNum.Store(i, vLogs) + } + // if the value is processed, delete it from the map + destCCIP.ReportBlessedWatcher.Delete(CommitReport.MerkleRoot) + } else { + // if the value is processed, delete it from the map + destCCIP.ReportBlessedBySeqNum.Delete(seqNum) + } + hdr, err := destCCIP.Common.ChainClient.HeaderByNumber(context.Background(), big.NewInt(int64(vLogs.BlockNumber))) + if err == nil { + receivedAt = hdr.Timestamp + } + receipt, err := destCCIP.Common.ChainClient.GetTxReceipt(vLogs.TxHash) + if err != nil { + lggr.Warn().Err(err).Msg("Failed to get receipt for ReportBlessed event") + } + var gasUsed uint64 + if receipt != nil { + gasUsed = receipt.GasUsed + } + reqStat.UpdateState(lggr, seqNum, testreporters.ReportBlessed, receivedAt.Sub(prevEventAt), testreporters.Success, + &testreporters.TransactionStats{ + GasUsed: gasUsed, + TxHash: vLogs.TxHash.String(), + CommitRoot: fmt.Sprintf("%x", CommitReport.MerkleRoot), + }) + return receivedAt, nil + } + } + case <-timer.C: + // if there is connection issue reset the context : + if destCCIP.Common.IsConnectionRestoredRecently != nil && !destCCIP.Common.IsConnectionRestoredRecently.Load() { + if resetTimerCount > 2 { + reqStat.UpdateState(lggr, seqNum, testreporters.ReportBlessed, time.Since(prevEventAt), testreporters.Failure, nil) + return time.Now().UTC(), fmt.Errorf("possible RPC issue - ReportBlessed is not found for interval min - %d max - %d lane %d-->%d", + CommitReport.Min, CommitReport.Max, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + timer.Reset(timeout) + resetTimerCount++ + lggr.Info().Int("count of reset", resetTimerCount).Msg("Resetting timer to validate ReportBlessed event") + continue + } + reqStat.UpdateState(lggr, seqNum, testreporters.ReportBlessed, time.Since(prevEventAt), testreporters.Failure, nil) + return time.Now().UTC(), fmt.Errorf("ReportBlessed is not found for interval min - %d max - %d lane %d-->%d", + CommitReport.Min, CommitReport.Max, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + } +} + +func (destCCIP *DestCCIPModule) AssertSeqNumberExecuted( + lggr *zerolog.Logger, + seqNumberBefore uint64, + timeout time.Duration, + timeNow time.Time, + reqStat *testreporters.RequestStat, +) error { + lggr.Info().Int64("seqNum", int64(seqNumberBefore)).Str("Timeout", timeout.String()).Msg("Waiting to be processed by commit store") + timer := time.NewTimer(timeout) + defer timer.Stop() + resetTimerCount := 0 + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if destCCIP.NextSeqNumToCommit.Load() > seqNumberBefore { + return nil + } + seqNumberAfter, err := destCCIP.CommitStore.Instance.GetExpectedNextSequenceNumber(nil) + if err != nil { + // if we get error instead of returning error we continue, in case it's a temporary RPC failure . + continue + } + if seqNumberAfter > seqNumberBefore { + destCCIP.NextSeqNumToCommit.Store(seqNumberAfter) + return nil + } + case <-timer.C: + // if there is connection issue reset the context : + if destCCIP.Common.IsConnectionRestoredRecently != nil && !destCCIP.Common.IsConnectionRestoredRecently.Load() { + if resetTimerCount > 2 { + reqStat.UpdateState(lggr, seqNumberBefore, testreporters.Commit, time.Since(timeNow), testreporters.Failure, nil) + return fmt.Errorf("possible RPC issue - sequence number is not increased for seq num %d lane %d-->%d", + seqNumberBefore, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + timer.Reset(timeout) + resetTimerCount++ + lggr.Info().Int("count of reset", resetTimerCount).Msg("Resetting timer to validate seqnumber increase in commit store") + continue + } + reqStat.UpdateState(lggr, seqNumberBefore, testreporters.Commit, time.Since(timeNow), testreporters.Failure, nil) + return fmt.Errorf("sequence number is not increased for seq num %d lane %d-->%d", + seqNumberBefore, destCCIP.SourceChainId, destCCIP.Common.ChainClient.GetChainID()) + } + } +} + +func DefaultDestinationCCIPModule( + logger *zerolog.Logger, + testConf *testconfig.CCIPTestGroupConfig, + chainClient blockchain.EVMClient, + sourceChainId uint64, + sourceChain string, + laneConf *laneconfig.LaneConfig, +) (*DestCCIPModule, error) { + cmn, err := NewCCIPCommonFromConfig( + logger, testConf, chainClient, laneConf, + ) + if err != nil { + return nil, err + } + + sourceChainSelector, err := chainselectors.SelectorFromChainId(sourceChainId) + if err != nil { + return nil, fmt.Errorf("failed to get chain selector for source chain id %d: %w", sourceChainId, err) + } + return &DestCCIPModule{ + Common: cmn, + SourceChainId: sourceChainId, + SourceChainSelector: sourceChainSelector, + SourceNetworkName: sourceChain, + NextSeqNumToCommit: atomic.NewUint64(1), + ReportBlessedWatcher: &sync.Map{}, + ReportBlessedBySeqNum: &sync.Map{}, + ExecStateChangedWatcher: &sync.Map{}, + ReportAcceptedWatcher: &sync.Map{}, + }, nil +} + +type CCIPRequest struct { + ReqNo int64 + txHash string + txConfirmationTimestamp time.Time + RequestStat *testreporters.RequestStat +} + +func CCIPRequestFromTxHash(txHash common.Hash, chainClient blockchain.EVMClient) (CCIPRequest, *types.Receipt, error) { + rcpt, err := chainClient.GetTxReceipt(txHash) + if err != nil { + return CCIPRequest{}, nil, err + } + + hdr, err := chainClient.HeaderByNumber(context.Background(), rcpt.BlockNumber) + if err != nil { + return CCIPRequest{}, nil, err + } + txConfirmationTimestamp := hdr.Timestamp + + return CCIPRequest{ + txHash: txHash.Hex(), + txConfirmationTimestamp: txConfirmationTimestamp, + }, rcpt, nil +} + +type CCIPLane struct { + Test *testing.T + Logger *zerolog.Logger + SourceNetworkName string + DestNetworkName string + SourceChain blockchain.EVMClient + DestChain blockchain.EVMClient + Source *SourceCCIPModule + Dest *DestCCIPModule + NumberOfReq int + Reports *testreporters.CCIPLaneStats + Balance *BalanceSheet + SentReqs map[common.Hash][]CCIPRequest + TotalFee *big.Int // total fee for all the requests. Used for balance validation. + ValidationTimeout time.Duration + Context context.Context + SrcNetworkLaneCfg *laneconfig.LaneConfig + DstNetworkLaneCfg *laneconfig.LaneConfig + PriceReportingDisabled bool +} + +func (lane *CCIPLane) TokenPricesConfig() (string, error) { + d := &DynamicPriceGetterConfig{ + AggregatorPrices: make(map[common.Address]AggregatorPriceConfig), + StaticPrices: make(map[common.Address]StaticPriceConfig), + } + // for each token if there is a price aggregator, add it to the aggregator prices + // else add it to the static prices + for _, token := range lane.Dest.Common.BridgeTokens { + err := d.AddPriceConfig(token.Address(), lane.Dest.Common.PriceAggregators, LinkToUSD, lane.DestChain.GetChainID().Uint64()) + if err != nil { + return "", fmt.Errorf("error in adding PriceConfig for source bridge token %s: %w", token.Address(), err) + } + } + err := d.AddPriceConfig(lane.Dest.Common.FeeToken.Address(), lane.Dest.Common.PriceAggregators, LinkToUSD, lane.DestChain.GetChainID().Uint64()) + if err != nil { + return "", fmt.Errorf("error adding PriceConfig for dest Fee token %s: %w", lane.Dest.Common.FeeToken.Address(), err) + } + err = d.AddPriceConfig(lane.Dest.Common.WrappedNative.Hex(), lane.Dest.Common.PriceAggregators, WrappedNativeToUSD, lane.DestChain.GetChainID().Uint64()) + if err != nil { + return "", fmt.Errorf("error in adding PriceConfig for dest WrappedNative token %s: %w", lane.Dest.Common.WrappedNative.Hex(), err) + } + err = d.AddPriceConfig(lane.Source.Common.WrappedNative.Hex(), lane.Source.Common.PriceAggregators, WrappedNativeToUSD, lane.SourceChain.GetChainID().Uint64()) + if err != nil { + return "", fmt.Errorf("error in adding PriceConfig for source WrappedNative token %s: %w", lane.Source.Common.WrappedNative.Hex(), err) + } + return d.String() +} + +func (lane *CCIPLane) SetRemoteChainsOnPool() error { + if lane.Source.Common.ExistingDeployment { + return nil + } + if len(lane.Source.Common.BridgeTokenPools) != len(lane.Dest.Common.BridgeTokenPools) { + return fmt.Errorf("source (%d) and dest (%d) bridge token pools length should be same", + len(lane.Source.Common.BridgeTokenPools), len(lane.Dest.Common.BridgeTokenPools), + ) + } + for i, srcPool := range lane.Source.Common.BridgeTokenPools { + sourceToken := lane.Source.Common.BridgeTokens[i] + destToken := lane.Dest.Common.BridgeTokens[i] + dstPool := lane.Dest.Common.BridgeTokenPools[i] + + err := srcPool.SetRemoteChainOnPool(lane.Source.DestChainSelector, dstPool.EthAddress, destToken.ContractAddress) + if err != nil { + return err + } + err = dstPool.SetRemoteChainOnPool(lane.Dest.SourceChainSelector, srcPool.EthAddress, sourceToken.ContractAddress) + if err != nil { + return err + } + } + return nil +} + +// OptimizeStorage sets nil to various elements of CCIPLane which are only used +// during lane set up and not used for rest of the test duration +// this is called mainly by load test to keep the memory usage minimum for high number of lanes +func (lane *CCIPLane) OptimizeStorage() { + lane.Source.Common.FreeUpUnusedSpace() + lane.Dest.Common.FreeUpUnusedSpace() + lane.DstNetworkLaneCfg = nil + lane.SrcNetworkLaneCfg = nil + // close all header subscriptions for dest chains + queuedEvents := lane.Dest.Common.ChainClient.GetHeaderSubscriptions() + for subName := range queuedEvents { + lane.Dest.Common.ChainClient.DeleteHeaderEventSubscription(subName) + } + // close all header subscriptions for source chains except for finalized header + queuedEvents = lane.Source.Common.ChainClient.GetHeaderSubscriptions() + for subName := range queuedEvents { + if subName == blockchain.FinalizedHeaderKey { + continue + } + lane.Source.Common.ChainClient.DeleteHeaderEventSubscription(subName) + } +} + +func (lane *CCIPLane) UpdateLaneConfig() { + lane.Source.Common.WriteLaneConfig(lane.SrcNetworkLaneCfg) + lane.SrcNetworkLaneCfg.SrcContractsMu.Lock() + lane.SrcNetworkLaneCfg.SrcContracts[lane.Source.DestNetworkName] = laneconfig.SourceContracts{ + OnRamp: lane.Source.OnRamp.Address(), + DeployedAt: lane.Source.SrcStartBlock, + } + lane.SrcNetworkLaneCfg.SrcContractsMu.Unlock() + lane.Dest.Common.WriteLaneConfig(lane.DstNetworkLaneCfg) + lane.DstNetworkLaneCfg.DestContractsMu.Lock() + lane.DstNetworkLaneCfg.DestContracts[lane.Dest.SourceNetworkName] = laneconfig.DestContracts{ + OffRamp: lane.Dest.OffRamp.Address(), + CommitStore: lane.Dest.CommitStore.Address(), + ReceiverDapp: lane.Dest.ReceiverDapp.Address(), + } + lane.DstNetworkLaneCfg.DestContractsMu.Unlock() +} + +func (lane *CCIPLane) RecordStateBeforeTransfer() { + // collect the balance assert.ment to verify balances after transfer + bal, err := testhelpers.GetBalances(lane.Test, lane.Source.CollectBalanceRequirements()) + require.NoError(lane.Test, err, "fetching source balance") + lane.Balance.RecordBalance(bal) + + bal, err = testhelpers.GetBalances(lane.Test, lane.Dest.CollectBalanceRequirements()) + require.NoError(lane.Test, err, "fetching dest balance") + lane.Balance.RecordBalance(bal) + + // save the current block numbers to use in various filter log requests + lane.TotalFee = big.NewInt(0) + lane.NumberOfReq = 0 + lane.SentReqs = make(map[common.Hash][]CCIPRequest) +} + +func (lane *CCIPLane) AddToSentReqs(txHash common.Hash, reqStats []*testreporters.RequestStat) (*types.Receipt, error) { + request, rcpt, err := CCIPRequestFromTxHash(txHash, lane.Source.Common.ChainClient) + if err != nil { + for _, stat := range reqStats { + stat.UpdateState(lane.Logger, 0, testreporters.TX, 0, testreporters.Failure, nil) + } + return rcpt, fmt.Errorf("could not get request from tx hash %s: %w", txHash.Hex(), err) + } + var allRequests []CCIPRequest + for _, stat := range reqStats { + allRequests = append(allRequests, CCIPRequest{ + ReqNo: stat.ReqNo, + txHash: rcpt.TxHash.Hex(), + txConfirmationTimestamp: request.txConfirmationTimestamp, + RequestStat: stat, + }) + lane.NumberOfReq++ + } + lane.SentReqs[rcpt.TxHash] = allRequests + return rcpt, nil +} + +// Multicall sends multiple ccip-send requests in a single transaction +// It will create one transaction for all the requests and will wait for the confirmation +func (lane *CCIPLane) Multicall(noOfRequests int, multiSendAddr common.Address) error { + var ccipMultipleMsg []contracts.CCIPMsgData + feeToken := common.HexToAddress(lane.Source.Common.FeeToken.Address()) + genericMsg, err := lane.Source.CCIPMsg(lane.Dest.ReceiverDapp.EthAddress, big.NewInt(DefaultDestinationGasLimit)) + if err != nil { + return fmt.Errorf("failed to form the ccip message: %w", err) + } + destChainSelector, err := chainselectors.SelectorFromChainId(lane.Source.DestinationChainId) + if err != nil { + return fmt.Errorf("failed getting the chain selector: %w", err) + } + var reqStats []*testreporters.RequestStat + var txstats []*testreporters.TransactionStats + for i := 1; i <= noOfRequests; i++ { + // form the message for transfer + msg := genericMsg + msg.Data = []byte(fmt.Sprintf("msg %d", i)) + sendData := contracts.CCIPMsgData{ + Msg: msg, + RouterAddr: lane.Source.Common.Router.EthAddress, + ChainSelector: destChainSelector, + } + + fee, err := lane.Source.Common.Router.GetFee(destChainSelector, msg) + if err != nil { + reason, _ := blockchain.RPCErrorFromError(err) + if reason != "" { + return fmt.Errorf("failed getting the fee: %s", reason) + } + return fmt.Errorf("failed getting the fee: %w", err) + } + log.Info().Str("fee", fee.String()).Msg("calculated fee") + sendData.Fee = fee + lane.TotalFee = new(big.Int).Add(lane.TotalFee, fee) + ccipMultipleMsg = append(ccipMultipleMsg, sendData) + // if token transfer is required, transfer the token amount to multisend + for j, amount := range lane.Source.TransferAmount { + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + token := lane.Source.Common.BridgeTokens[0] + if j < len(lane.Source.Common.BridgeTokens) { + token = lane.Source.Common.BridgeTokens[j] + } + err = token.Transfer(lane.SourceChain.GetDefaultWallet(), multiSendAddr.Hex(), amount) + if err != nil { + return err + } + } + stat := testreporters.NewCCIPRequestStats(int64(lane.NumberOfReq+i), lane.SourceNetworkName, lane.DestNetworkName) + txstats = append(txstats, &testreporters.TransactionStats{ + Fee: fee.String(), + NoOfTokensSent: len(msg.TokenAmounts), + MessageBytesLength: int64(len(msg.Data)), + }) + reqStats = append(reqStats, stat) + } + isNative := true + // transfer the fee amount to multisend + if feeToken != (common.Address{}) { + isNative = false + err = lane.Source.Common.FeeToken.Transfer(multiSendAddr.Hex(), lane.TotalFee) + if err != nil { + return err + } + } + + tx, err := contracts.MultiCallCCIP(lane.Source.Common.ChainClient, multiSendAddr.Hex(), ccipMultipleMsg, isNative) + if err != nil { + // update the stats as failure for all the requests in the multicall tx + for _, stat := range reqStats { + stat.UpdateState(lane.Logger, 0, testreporters.TX, 0, testreporters.Failure, nil) + } + return fmt.Errorf("failed to send the multicall: %w", err) + } + rcpt, err := lane.AddToSentReqs(tx.Hash(), reqStats) + if err != nil { + return err + } + var gasUsed uint64 + if rcpt != nil { + gasUsed = rcpt.GasUsed + } + // update the stats for all the requests in the multicall tx + for i, stat := range reqStats { + txstats[i].GasUsed = gasUsed + txstats[i].TxHash = tx.Hash().Hex() + stat.UpdateState(lane.Logger, 0, testreporters.TX, 0, testreporters.Success, txstats[i]) + } + return nil +} + +// SendRequests sends individual ccip-send requests in different transactions +// It will create noOfRequests transactions +func (lane *CCIPLane) SendRequests(noOfRequests int, gasLimit *big.Int) error { + for i := 1; i <= noOfRequests; i++ { + stat := testreporters.NewCCIPRequestStats(int64(lane.NumberOfReq+i), lane.SourceNetworkName, lane.DestNetworkName) + txHash, txConfirmationDur, fee, err := lane.Source.SendRequest( + lane.Dest.ReceiverDapp.EthAddress, + gasLimit, + ) + if err != nil { + stat.UpdateState(lane.Logger, 0, testreporters.TX, txConfirmationDur, testreporters.Failure, nil) + return fmt.Errorf("could not send request: %w", err) + } + err = lane.Source.Common.ChainClient.WaitForEvents() + if err != nil { + stat.UpdateState(lane.Logger, 0, testreporters.TX, txConfirmationDur, testreporters.Failure, nil) + return fmt.Errorf("could not send request: %w", err) + } + + noOfTokens := 0 + for _, tokenAmount := range lane.Source.TransferAmount { // Only count tokens that are actually sent + if tokenAmount != nil && tokenAmount.Cmp(big.NewInt(0)) > 0 { + noOfTokens++ + } + } + _, err = lane.AddToSentReqs(txHash, []*testreporters.RequestStat{stat}) + if err != nil { + return err + } + stat.UpdateState(lane.Logger, 0, testreporters.TX, txConfirmationDur, testreporters.Success, nil) + lane.TotalFee = bigmath.Add(lane.TotalFee, fee) + } + + return nil +} + +// manualExecutionOpts modify how ExecuteManually behaves +type manualExecutionOpts struct { + timeout time.Duration +} + +// ManualExecutionOption is a function that modifies ExecuteManually behavior +type ManualExecutionOption func(*manualExecutionOpts) + +// WithConfirmationTimeout sets a custom timeout for waiting for the confirmation of the manual execution +func WithConfirmationTimeout(timeout time.Duration) ManualExecutionOption { + return func(opts *manualExecutionOpts) { + opts.timeout = timeout + } +} + +// ExecuteManually attempts to execute pending CCIP transactions manually. +// This is necessary in situations where Smart Execution window for that message is over and Offchain plugin +// will not attempt to execute the message.In such situation any further message from same sender will not be executed until +// the blocking message is executed by the OffRamp. +// More info: https://docs.chain.link/ccip/concepts/manual-execution#manual-execution +func (lane *CCIPLane) ExecuteManually(options ...ManualExecutionOption) error { + var opts manualExecutionOpts + for _, opt := range options { + if opt != nil { + opt(&opts) + } + } + if opts.timeout == 0 { + opts.timeout = lane.ValidationTimeout + } + + onRampABI, err := abi.JSON(strings.NewReader(evm_2_evm_onramp.EVM2EVMOnRampABI)) + if err != nil { + return err + } + sendReqTopic := onRampABI.Events["CCIPSendRequested"].ID + for txHash, req := range lane.SentReqs { + for _, ccipReq := range req { + lane.Logger.Info().Str("ccip-send", txHash.Hex()).Msg("Executing request manually") + seqNum := ccipReq.RequestStat.SeqNum + sendReqReceipt, err := lane.Source.Common.ChainClient.GetTxReceipt(txHash) + if err != nil { + return err + } + if sendReqReceipt == nil { + return fmt.Errorf("could not find the receipt for tx %s", txHash.Hex()) + } + commitStat, ok := ccipReq.RequestStat.StatusByPhase[testreporters.Commit] + if !ok { + return fmt.Errorf("could not find the commit phase in the request stats, reqNo %d", ccipReq.RequestStat.ReqNo) + } + commitTx := commitStat.SendTransactionStats.TxHash + commitReceipt, err := lane.DestChain.GetTxReceipt(common.HexToHash(commitTx)) + if err != nil { + return err + } + var logIndex uint + // find the send request log index sendReqReceipt + for _, sendReqLog := range sendReqReceipt.Logs { + if sendReqLog.Topics[0] == sendReqTopic { + logSeqNum, err := lane.Source.OnRamp.Instance.ParseCCIPSendRequested(*sendReqLog) + if err != nil { + return err + } + if logSeqNum == seqNum { + logIndex = sendReqLog.Index + } + } + } + destChainSelector, err := chainselectors.SelectorFromChainId(lane.DestChain.GetChainID().Uint64()) + if err != nil { + return err + } + sourceChainSelector, err := chainselectors.SelectorFromChainId(lane.SourceChain.GetChainID().Uint64()) + if err != nil { + return err + } + // Calling `TransactionOpts` will automatically increase the nonce, so if this fails, any other destination transactions will time out + destUser, err := lane.DestChain.TransactionOpts(lane.DestChain.GetDefaultWallet()) + if err != nil { + return err + } + args := testhelpers.ManualExecArgs{ + SourceChainID: sourceChainSelector, + DestChainID: destChainSelector, + DestUser: destUser, + SourceChain: lane.SourceChain.Backend(), + DestChain: lane.DestChain.Backend(), + SourceStartBlock: sendReqReceipt.BlockNumber, + DestStartBlock: commitReceipt.BlockNumber.Uint64(), + SendReqTxHash: txHash.Hex(), + CommitStore: lane.Dest.CommitStore.Address(), + OnRamp: lane.Source.OnRamp.Address(), + OffRamp: lane.Dest.OffRamp.Address(), + SendReqLogIndex: logIndex, + GasLimit: big.NewInt(DefaultDestinationGasLimit), + } + timeNow := time.Now().UTC() + tx, err := args.ExecuteManually() + if err != nil { + return fmt.Errorf("could not execute manually: %w seqNum %d", err, seqNum) + } + + ctx, cancel := context.WithTimeout(context.Background(), opts.timeout) + rec, err := bind.WaitMined(ctx, lane.DestChain.DeployBackend(), tx) + if err != nil { + cancel() + return fmt.Errorf("could not get receipt: %w seqNum %d", err, seqNum) + } + cancel() + if rec.Status != 1 { + return fmt.Errorf( + "manual execution failed for seqNum %d with receipt status %d, use the revert-reason script on this transaction hash '%s' and this sender address '%s'", + seqNum, rec.Status, tx.Hash().Hex(), destUser.From.Hex(), + ) + } + lane.Logger.Info().Uint64("seqNum", seqNum).Msg("Manual Execution completed") + _, err = lane.Dest.AssertEventExecutionStateChanged(lane.Logger, seqNum, opts.timeout, + timeNow, ccipReq.RequestStat, testhelpers.ExecutionStateSuccess, + ) + if err != nil { + return fmt.Errorf("could not validate ExecutionStateChanged event: %w", err) + } + } + } + return nil +} + +// validationOptions are used in the ValidateRequests function to specify which phase is expected to fail and how +type validationOptions struct { + phaseExpectedToFail testreporters.Phase // the phase expected to fail + expectedErrorMessage string // if provided, we're looking for a specific error message + timeout time.Duration // timeout for the validation +} + +// ValidationOptionFunc is a function that can be passed to ValidateRequests to specify which phase is expected to fail +type ValidationOptionFunc func(opts *validationOptions) + +// PhaseSpecificValidationOptionFunc can specify how exactly you want a phase to fail +type PhaseSpecificValidationOptionFunc func(*validationOptions) + +// WithErrorMessage specifies the expected error message for the phase that is expected to fail. +func WithErrorMessage(expectedErrorMessage string) PhaseSpecificValidationOptionFunc { + return func(opts *validationOptions) { + opts.expectedErrorMessage = expectedErrorMessage + } +} + +// WithTimeout specifies a custom timeout for validating that the phase failed. +func WithTimeout(timeout time.Duration) PhaseSpecificValidationOptionFunc { + return func(opts *validationOptions) { + opts.timeout = timeout + } +} + +// ExpectPhaseToFail specifies that a specific phase is expected to fail. +// You can optionally provide an expected error message, if you don't have one in mind, just pass an empty string. +// shouldExist is used to specify whether the phase should exist or not, which is only applicable to the `ExecStateChanged` phase. +// If you expect the `ExecStateChanged` events to be there, but in a "failed" state, set this to true. +// It will otherwise be ignored. +func ExpectPhaseToFail(phase testreporters.Phase, phaseSpecificOptions ...PhaseSpecificValidationOptionFunc) ValidationOptionFunc { + return func(opts *validationOptions) { + opts.phaseExpectedToFail = phase + for _, f := range phaseSpecificOptions { + if f != nil { + f(opts) + } + } + } +} + +// ValidateRequests validates all sent request events. +// If you expect a specific phase to fail, you can pass a validationOptionFunc to specify exactly which one. +// If not, just pass in nil. +func (lane *CCIPLane) ValidateRequests(validationOptionFuncs ...ValidationOptionFunc) { + var opts validationOptions + for _, f := range validationOptionFuncs { + if f != nil { + f(&opts) + } + } + for txHash, ccipReqs := range lane.SentReqs { + require.Greater(lane.Test, len(ccipReqs), 0, "no ccip requests found for tx hash") + require.NoError(lane.Test, lane.ValidateRequestByTxHash(txHash, opts), "validating request events by tx hash") + } + if len(validationOptionFuncs) > 0 { + return + } + // Asserting balances reliably work only for simulated private chains. The testnet contract balances might get updated by other transactions + // verify the fee amount is deducted from sender, added to receiver token balances and + if len(lane.Source.TransferAmount) > 0 && len(lane.Source.Common.BridgeTokens) > 0 { + lane.Source.UpdateBalance(int64(lane.NumberOfReq), lane.TotalFee, lane.Balance) + lane.Dest.UpdateBalance(lane.Source.TransferAmount, int64(lane.NumberOfReq), lane.Balance) + } +} + +// ValidateRequestByTxHash validates the request events by tx hash. +// If a phaseExpectedToFail is provided, it will return no error if that phase fails, but will error if it succeeds. +func (lane *CCIPLane) ValidateRequestByTxHash(txHash common.Hash, opts validationOptions) error { + var ( + reqStats []*testreporters.RequestStat + timeout = lane.ValidationTimeout + ccipRequests = lane.SentReqs[txHash] + txConfirmation = ccipRequests[0].txConfirmationTimestamp + ) + require.Greater(lane.Test, len(ccipRequests), 0, "no ccip requests found for tx hash") + + defer func() { + for _, req := range ccipRequests { + lane.Reports.UpdatePhaseStatsForReq(req.RequestStat) + } + }() + for _, req := range ccipRequests { + reqStats = append(reqStats, req.RequestStat) + } + + if opts.phaseExpectedToFail == testreporters.CCIPSendRe && opts.timeout != 0 { + timeout = opts.timeout + } + msgLogs, ccipSendReqGenAt, err := lane.Source.AssertEventCCIPSendRequested( + lane.Logger, txHash.Hex(), timeout, txConfirmation, reqStats, + ) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.CCIPSendRe, opts, err); shouldReturn { + return phaseErr + } + + sourceLogFinalizedAt, _, err := lane.Source.AssertSendRequestedLogFinalized(lane.Logger, txHash, msgLogs, ccipSendReqGenAt, reqStats) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.SourceLogFinalized, opts, err); shouldReturn { + return phaseErr + } + for _, msgLog := range msgLogs { + seqNumber := msgLog.SequenceNumber + lane.Logger = ptr.Ptr(lane.Logger.With().Str("msgId", fmt.Sprintf("0x%x", msgLog.MessageId[:])).Logger()) + var reqStat *testreporters.RequestStat + for _, stat := range reqStats { + if stat.SeqNum == seqNumber { + reqStat = stat + break + } + } + if reqStat == nil { + return fmt.Errorf("could not find request stat for seq number %d", seqNumber) + } + + if opts.phaseExpectedToFail == testreporters.Commit && opts.timeout != 0 { + timeout = opts.timeout + } + err = lane.Dest.AssertSeqNumberExecuted(lane.Logger, seqNumber, timeout, sourceLogFinalizedAt, reqStat) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.Commit, opts, err); shouldReturn { + return phaseErr + } + + // Verify whether commitStore has accepted the report + commitReport, reportAcceptedAt, err := lane.Dest.AssertEventReportAccepted( + lane.Logger, seqNumber, timeout, sourceLogFinalizedAt, reqStat, + ) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.Commit, opts, err); shouldReturn { + return phaseErr + } + + if opts.phaseExpectedToFail == testreporters.ReportBlessed && opts.timeout != 0 { + timeout = opts.timeout + } + reportBlessedAt, err := lane.Dest.AssertReportBlessed(lane.Logger, seqNumber, timeout, *commitReport, reportAcceptedAt, reqStat) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.ReportBlessed, opts, err); shouldReturn { + return phaseErr + } + + if opts.phaseExpectedToFail == testreporters.ExecStateChanged && opts.timeout != 0 { + timeout = opts.timeout + } + // Verify whether the execution state is changed and the transfer is successful + _, err = lane.Dest.AssertEventExecutionStateChanged( + lane.Logger, seqNumber, + timeout, + reportBlessedAt, + reqStat, + testhelpers.ExecutionStateSuccess, + ) + if shouldReturn, phaseErr := isPhaseValid(lane.Logger, testreporters.ExecStateChanged, opts, err); shouldReturn { + return phaseErr + } + } + return nil +} + +// isPhaseValid checks if the phase is in a valid state or not given expectations. +// If `shouldComplete` is true, it means that the phase validation is meant to end and we should return from the calling function. +func isPhaseValid( + logger *zerolog.Logger, + currentPhase testreporters.Phase, + opts validationOptions, + err error, +) (shouldComplete bool, validationError error) { + // If no phase is expected to fail or the current phase is not the one expected to fail, we just return what we were given + if opts.phaseExpectedToFail == "" || currentPhase != opts.phaseExpectedToFail { + return err != nil, err + } + if err == nil && currentPhase == opts.phaseExpectedToFail { + return true, fmt.Errorf("expected phase '%s' to fail, but it passed", opts.phaseExpectedToFail) + } + logmsg := logger.Info().Str("Failed with Error", err.Error()).Str("Phase", string(currentPhase)) + if opts.expectedErrorMessage != "" { + if !strings.Contains(err.Error(), opts.expectedErrorMessage) { + return true, fmt.Errorf("expected phase '%s' to fail with error message '%s' but got error '%s'", currentPhase, opts.expectedErrorMessage, err.Error()) + } + logmsg.Str("Expected Error Message", opts.expectedErrorMessage) + } + logmsg.Msg("Expected phase to fail and it did") + return true, nil +} + +// DisableAllRateLimiting disables all rate limiting for the lane, including ARL and token pool rate limits +func (lane *CCIPLane) DisableAllRateLimiting() error { + src := lane.Source + dest := lane.Dest + + // Tell OnRamp to not include any tokens in ARL + err := src.SetAllTokenTransferFeeConfigs(false) + if err != nil { + return fmt.Errorf("error disabling token transfer fee config for OnRamp: %w", err) + } + err = dest.RemoveAllRateLimitTokens(context.Background()) + if err != nil { + return fmt.Errorf("error removing rate limited tokens for OffRamp: %w", err) + } + // Disable ARL for OnRamp and OffRamp + err = src.OnRamp.SetRateLimit(evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }) + if err != nil { + return fmt.Errorf("error disabling rate limit for source onramp: %w", err) + } + err = dest.OffRamp.SetRateLimit(contracts.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }) + if err != nil { + return fmt.Errorf("error disabling rate limit for destination offramp: %w", err) + } + // Disable individual token pool rate limits + for i, tokenPool := range src.Common.BridgeTokenPools { + err = tokenPool.SetRemoteChainRateLimits(src.DestChainSelector, token_pool.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }) + if err != nil { + return fmt.Errorf("error disabling rate limit for token pool %d: %w", i, err) + } + } + for i, tokenPool := range dest.Common.BridgeTokenPools { + err = tokenPool.SetRemoteChainRateLimits(dest.SourceChainSelector, token_pool.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + }) + if err != nil { + return fmt.Errorf("error disabling rate limit for token pool %d: %w", i, err) + } + } + err = src.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error waiting for source chain events: %w", err) + } + err = dest.Common.ChainClient.WaitForEvents() + if err != nil { + return fmt.Errorf("error waiting for destination chain events: %w", err) + } + lane.Logger.Info().Msg("Disabled all rate limiting") + return nil +} + +func (lane *CCIPLane) StartEventWatchers() error { + lane.Logger.Info().Msg("Starting event watchers") + if lane.Source.Common.ChainClient.GetNetworkConfig().FinalityDepth == 0 { + err := lane.Source.Common.ChainClient.PollFinality() + if err != nil { + return err + } + } + + go lane.Source.Common.PollRPCConnection(lane.Context, lane.Logger) + go lane.Dest.Common.PollRPCConnection(lane.Context, lane.Logger) + + sendReqEventLatest := make(chan *evm_2_evm_onramp.EVM2EVMOnRampCCIPSendRequested) + senReqSub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + sub, err := lane.Source.OnRamp.WatchCCIPSendRequested(nil, sendReqEventLatest) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to CCIPSendRequested event") + } + return sub, err + }) + if senReqSub == nil { + return fmt.Errorf("failed to subscribe to CCIPSendRequested event") + } + go func(sub event.Subscription) { + defer sub.Unsubscribe() + for { + select { + case e := <-sendReqEventLatest: + lane.Logger.Info().Msgf("CCIPSendRequested event received for seq number %d", e.Message.SequenceNumber) + eventsForTx, ok := lane.Source.CCIPSendRequestedWatcher.Load(e.Raw.TxHash.Hex()) + if ok { + lane.Source.CCIPSendRequestedWatcher.Store(e.Raw.TxHash.Hex(), append(eventsForTx.([]*contracts.SendReqEventData), + &contracts.SendReqEventData{ + MessageId: e.Message.MessageId, + SequenceNumber: e.Message.SequenceNumber, + DataLength: len(e.Message.Data), + NoOfTokens: len(e.Message.TokenAmounts), + LogInfo: contracts.LogInfo{ + BlockNumber: e.Raw.BlockNumber, + TxHash: e.Raw.TxHash, + }, + Fee: e.Message.FeeTokenAmount, + })) + } else { + lane.Source.CCIPSendRequestedWatcher.Store(e.Raw.TxHash.Hex(), []*contracts.SendReqEventData{ + { + MessageId: e.Message.MessageId, + SequenceNumber: e.Message.SequenceNumber, + DataLength: len(e.Message.Data), + NoOfTokens: len(e.Message.TokenAmounts), + LogInfo: contracts.LogInfo{ + BlockNumber: e.Raw.BlockNumber, + TxHash: e.Raw.TxHash, + }, + Fee: e.Message.FeeTokenAmount, + }, + }) + } + + lane.Source.CCIPSendRequestedWatcher = testutils.DeleteNilEntriesFromMap(lane.Source.CCIPSendRequestedWatcher) + case <-lane.Context.Done(): + return + } + } + }(senReqSub) + + reportAcceptedEvent := make(chan *commit_store.CommitStoreReportAccepted) + reportAccSub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + sub, err := lane.Dest.CommitStore.WatchReportAccepted(nil, reportAcceptedEvent) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to ReportAccepted event") + } + return sub, err + }) + if reportAccSub == nil { + return fmt.Errorf("failed to subscribe to ReportAccepted event") + } + go func(sub event.Subscription) { + defer sub.Unsubscribe() + for { + select { + case e := <-reportAcceptedEvent: + lane.Logger.Info().Interface("Interval", e.Report.Interval).Msgf("ReportAccepted event received") + for i := e.Report.Interval.Min; i <= e.Report.Interval.Max; i++ { + lane.Dest.ReportAcceptedWatcher.Store(i, &contracts.CommitStoreReportAccepted{ + Min: e.Report.Interval.Min, + Max: e.Report.Interval.Max, + MerkleRoot: e.Report.MerkleRoot, + LogInfo: contracts.LogInfo{ + BlockNumber: e.Raw.BlockNumber, + TxHash: e.Raw.TxHash, + }, + }) + } + lane.Dest.ReportAcceptedWatcher = testutils.DeleteNilEntriesFromMap(lane.Dest.ReportAcceptedWatcher) + case <-lane.Context.Done(): + return + } + } + }(reportAccSub) + + if lane.Dest.Common.ARM != nil { + reportBlessedEvent := make(chan *arm_contract.ARMContractTaggedRootBlessed) + blessedSub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + sub, err := lane.Dest.Common.ARM.Instance.WatchTaggedRootBlessed(nil, reportBlessedEvent, nil) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to TaggedRootBlessed event") + } + return sub, err + }) + if blessedSub == nil { + return fmt.Errorf("failed to subscribe to TaggedRootBlessed event") + } + go func(sub event.Subscription) { + defer sub.Unsubscribe() + for { + select { + case e := <-reportBlessedEvent: + lane.Logger.Info().Msgf("TaggedRootBlessed event received for root %x", e.TaggedRoot.Root) + if e.TaggedRoot.CommitStore == lane.Dest.CommitStore.EthAddress { + lane.Dest.ReportBlessedWatcher.Store(e.TaggedRoot.Root, &contracts.LogInfo{ + BlockNumber: e.Raw.BlockNumber, + TxHash: e.Raw.TxHash, + }) + } + lane.Dest.ReportBlessedWatcher = testutils.DeleteNilEntriesFromMap(lane.Dest.ReportBlessedWatcher) + case <-lane.Context.Done(): + return + } + } + }(blessedSub) + } + + execStateChangedEventLatest := make(chan *evm_2_evm_offramp.EVM2EVMOffRampExecutionStateChanged) + execSub := event.Resubscribe(DefaultResubscriptionTimeout, func(_ context.Context) (event.Subscription, error) { + sub, err := lane.Dest.OffRamp.WatchExecutionStateChanged(nil, execStateChangedEventLatest, nil, nil) + if err != nil { + log.Error().Err(err).Msg("error in subscribing to ExecutionStateChanged event") + } + return sub, err + }) + if execSub == nil { + return fmt.Errorf("failed to subscribe to ExecutionStateChanged event") + } + go func(sub event.Subscription) { + defer sub.Unsubscribe() + for { + select { + case e := <-execStateChangedEventLatest: + lane.Logger.Info().Msgf("Execution state changed event received for seq number %d", e.SequenceNumber) + lane.Dest.ExecStateChangedWatcher.Store(e.SequenceNumber, &contracts.EVM2EVMOffRampExecutionStateChanged{ + SequenceNumber: e.SequenceNumber, + MessageId: e.MessageId, + State: e.State, + ReturnData: e.ReturnData, + LogInfo: contracts.LogInfo{ + BlockNumber: e.Raw.BlockNumber, + TxHash: e.Raw.TxHash, + }, + }) + lane.Dest.ExecStateChangedWatcher = testutils.DeleteNilEntriesFromMap(lane.Dest.ExecStateChangedWatcher) + case <-lane.Context.Done(): + return + } + } + }(execSub) + return nil +} + +func (lane *CCIPLane) CleanUp(clearFees bool) error { + lane.Logger.Info().Msg("Cleaning up lane") + if lane.Source.Common.ChainClient.GetNetworkConfig().FinalityDepth == 0 { + lane.Source.Common.ChainClient.CancelFinalityPolling() + } + // recover fees from onRamp contract + if clearFees && !lane.Source.Common.ChainClient.NetworkSimulated() { + err := lane.Source.PayCCIPFeeToOwnerAddress() + if err != nil { + return err + } + } + err := lane.Dest.Common.ChainClient.Close() + if err != nil { + return err + } + return lane.Source.Common.ChainClient.Close() +} + +// DeployNewCCIPLane sets up a lane and initiates lane.Source and lane.Destination +// If configureCLNodes is true it sets up jobs and contract config for the lane +func (lane *CCIPLane) DeployNewCCIPLane( + setUpCtx context.Context, + env *CCIPTestEnv, + testConf *testconfig.CCIPTestGroupConfig, + bootstrapAdded *atomic.Bool, + jobErrGroup *errgroup.Group, +) error { + var ( + err error + sourceChainClient = lane.SourceChain + destChainClient = lane.DestChain + srcConf = lane.SrcNetworkLaneCfg + destConf = lane.DstNetworkLaneCfg + commitAndExecOnSameDON = pointer.GetBool(testConf.CommitAndExecuteOnSameDON) + withPipeline = pointer.GetBool(testConf.TokenConfig.WithPipeline) + configureCLNodes = !pointer.GetBool(testConf.ExistingDeployment) + ) + + lane.Source, err = DefaultSourceCCIPModule( + lane.Logger, + testConf, + sourceChainClient, destChainClient.GetChainID().Uint64(), + destChainClient.GetNetworkName(), + srcConf, + ) + if err != nil { + return fmt.Errorf("failed to create source module: %w", err) + } + lane.Dest, err = DefaultDestinationCCIPModule( + lane.Logger, testConf, + destChainClient, sourceChainClient.GetChainID().Uint64(), + sourceChainClient.GetNetworkName(), destConf, + ) + if err != nil { + return fmt.Errorf("failed to create destination module: %w", err) + } + + // deploy all source contracts + err = lane.Source.DeployContracts(srcConf) + if err != nil { + return fmt.Errorf("failed to deploy source contracts: %w", err) + } + // deploy all destination contracts + err = lane.Dest.DeployContracts(*lane.Source, destConf) + if err != nil { + return fmt.Errorf("failed to deploy destination contracts: %w", err) + } + // if it's a new USDC deployment, sync the USDC domain + err = lane.Source.Common.SyncUSDCDomain(lane.Dest.Common.TokenTransmitter, lane.Dest.Common.BridgeTokenPools, lane.Source.DestinationChainId) + if err != nil { + return fmt.Errorf("failed to sync USDC domain: %w", err) + } + + lane.UpdateLaneConfig() + + // if lane is being set up for already configured CL nodes and contracts + // no further action is necessary + if !configureCLNodes { + return nil + } + err = lane.Source.Common.WatchForPriceUpdates(setUpCtx, lane.Logger) + if err != nil { + return fmt.Errorf("error in starting price update watch %w", err) + } + if env == nil { + return fmt.Errorf("test environment not set") + } + // wait for the CL nodes to be ready before moving ahead with job creation + err = env.CLNodeWithKeyReady.Wait() + if err != nil { + return fmt.Errorf("failed to wait for CL nodes to be ready: %w", err) + } + clNodesWithKeys := env.CLNodesWithKeys + // set up ocr2 jobs + clNodes, exists := clNodesWithKeys[lane.Dest.Common.ChainClient.GetChainID().String()] + if !exists { + return fmt.Errorf("could not find CL nodes for %s", lane.Dest.Common.ChainClient.GetChainID().String()) + } + bootstrapCommit := clNodes[0] + var bootstrapExec *client.CLNodesWithKeys + commitNodes := clNodes[env.CommitNodeStartIndex : env.CommitNodeStartIndex+env.NumOfCommitNodes] + execNodes := clNodes[env.ExecNodeStartIndex : env.ExecNodeStartIndex+env.NumOfExecNodes] + if !commitAndExecOnSameDON { + if len(clNodes) < 11 { + return fmt.Errorf("not enough CL nodes for separate commit and execution nodes") + } + bootstrapExec = clNodes[1] // for a set-up of different commit and execution nodes second node is the bootstrapper for execution nodes + } + + // save the current block numbers. If there is a delay between job start up and ocr config set up, the jobs will + // replay the log polling from these mentioned block number. The dest block number should ideally be the block number on which + // contract config is set and the source block number should be the one on which the ccip send request is performed. + // Here for simplicity we are just taking the current block number just before the job is created. + currentBlockOnDest, err := destChainClient.LatestBlockNumber(context.Background()) + if err != nil { + return fmt.Errorf("getting current block should be successful in destination chain %w", err) + } + + var killgrave *ctftestenv.Killgrave + if env.LocalCluster != nil { + killgrave = env.LocalCluster.MockAdapter + } + var tokenAddresses []string + for _, token := range lane.Dest.Common.BridgeTokens { + tokenAddresses = append(tokenAddresses, token.Address()) + } + tokenAddresses = append(tokenAddresses, lane.Dest.Common.FeeToken.Address(), lane.Source.Common.WrappedNative.Hex(), lane.Dest.Common.WrappedNative.Hex()) + + // Only one off pipeline or price getter to be set. + tokenPricesUSDPipeline := "" + tokenPricesConfigJson := "" + if withPipeline { + tokensUSDUrl := TokenPricePipelineURLs(tokenAddresses, killgrave, env.MockServer) + tokenPricesUSDPipeline = TokenFeeForMultipleTokenAddr(tokensUSDUrl) + } else { + tokenPricesConfigJson, err = lane.TokenPricesConfig() + if err != nil { + return fmt.Errorf("error getting token prices config %w", err) + } + lane.Logger.Info().Str("tokenPricesConfigJson", tokenPricesConfigJson).Msg("Price getter config") + } + + jobParams := integrationtesthelpers.CCIPJobSpecParams{ + OffRamp: lane.Dest.OffRamp.EthAddress, + CommitStore: lane.Dest.CommitStore.EthAddress, + SourceChainName: sourceChainClient.GetNetworkName(), + DestChainName: destChainClient.GetNetworkName(), + DestEvmChainId: destChainClient.GetChainID().Uint64(), + SourceStartBlock: lane.Source.SrcStartBlock, + TokenPricesUSDPipeline: tokenPricesUSDPipeline, + PriceGetterConfig: tokenPricesConfigJson, + DestStartBlock: currentBlockOnDest, + } + if !lane.Source.Common.ExistingDeployment && lane.Source.Common.IsUSDCDeployment() { + api := "" + if killgrave != nil { + api = killgrave.InternalEndpoint + } + if env.MockServer != nil { + api = env.MockServer.Config.ClusterURL + } + if lane.Source.Common.TokenTransmitter == nil { + return fmt.Errorf("token transmitter address not set") + } + // Only one USDC allowed per chain + jobParams.USDCConfig = &config.USDCConfig{ + SourceTokenAddress: common.HexToAddress(lane.Source.Common.BridgeTokens[0].Address()), + SourceMessageTransmitterAddress: lane.Source.Common.TokenTransmitter.ContractAddress, + AttestationAPI: api, + AttestationAPITimeoutSeconds: 5, + } + } + if !bootstrapAdded.Load() { + bootstrapAdded.Store(true) + err := CreateBootstrapJob(jobParams, bootstrapCommit, bootstrapExec) + if err != nil { + return fmt.Errorf("failed to create bootstrap job: %w", err) + } + } + + bootstrapCommitP2PId := bootstrapCommit.KeysBundle.P2PKeys.Data[0].Attributes.PeerID + var p2pBootstrappersExec, p2pBootstrappersCommit *client.P2PData + if bootstrapExec != nil { + p2pBootstrappersExec = &client.P2PData{ + InternalIP: bootstrapExec.Node.InternalIP(), + PeerID: bootstrapExec.KeysBundle.P2PKeys.Data[0].Attributes.PeerID, + } + } + + p2pBootstrappersCommit = &client.P2PData{ + InternalIP: bootstrapCommit.Node.InternalIP(), + PeerID: bootstrapCommitP2PId, + } + + jobParams.P2PV2Bootstrappers = []string{p2pBootstrappersCommit.P2PV2Bootstrapper()} + + err = SetOCR2Config(lane.Context, lane.Logger, *testConf, commitNodes, execNodes, *lane.Dest, lane.PriceReportingDisabled) + if err != nil { + return fmt.Errorf("failed to set ocr2 config: %w", err) + } + + err = CreateOCR2CCIPCommitJobs(lane.Logger, jobParams, commitNodes, env.nodeMutexes, jobErrGroup) + if err != nil { + return fmt.Errorf("failed to create ocr2 commit jobs: %w", err) + } + if p2pBootstrappersExec != nil { + jobParams.P2PV2Bootstrappers = []string{p2pBootstrappersExec.P2PV2Bootstrapper()} + } + + err = CreateOCR2CCIPExecutionJobs(lane.Logger, jobParams, execNodes, env.nodeMutexes, jobErrGroup) + if err != nil { + return fmt.Errorf("failed to create ocr2 execution jobs: %w", err) + } + + if err := lane.Source.Common.ChainClient.WaitForEvents(); err != nil { + return fmt.Errorf("failed to wait for events: %w", err) + } + if err := lane.Dest.Common.ChainClient.WaitForEvents(); err != nil { + return fmt.Errorf("failed to wait for events: %w", err) + } + lane.Dest.Common.ChainClient.ParallelTransactions(false) + lane.Source.Common.ChainClient.ParallelTransactions(false) + + return nil +} + +// SetOCR2Config sets the oracle config in ocr2 contracts. If execNodes is nil, commit and execution jobs are set up in same DON +func SetOCR2Config( + ctx context.Context, + lggr *zerolog.Logger, + testConf testconfig.CCIPTestGroupConfig, + commitNodes, + execNodes []*client.CLNodesWithKeys, + destCCIP DestCCIPModule, + priceReportingDisabled bool, +) error { + inflightExpiryExec := commonconfig.MustNewDuration(InflightExpiryExec) + inflightExpiryCommit := commonconfig.MustNewDuration(InflightExpiryCommit) + blockTime, err := destCCIP.Common.AvgBlockTime(ctx) + if err != nil { + return fmt.Errorf("failed to get avg block time: %w", err) + } + + OCR2ParamsForCommit := contracts.OCR2ParamsForCommit(blockTime) + OCR2ParamsForExec := contracts.OCR2ParamsForExec(blockTime) + // if test config has custom ocr2 params, merge them with default params to replace default with custom ocr2 params provided in config + // for commit and exec + if testConf.CommitOCRParams != nil { + err := mergo.Merge(&OCR2ParamsForCommit, testConf.CommitOCRParams, mergo.WithOverride) + if err != nil { + return err + } + } + if testConf.ExecOCRParams != nil { + err := mergo.Merge(&OCR2ParamsForExec, testConf.ExecOCRParams, mergo.WithOverride) + if err != nil { + return err + } + } + lggr.Info(). + Dur("AvgBlockTimeOnDest", blockTime). + Interface("OCRParmsForCommit", OCR2ParamsForCommit). + Interface("OCRParmsForExec", OCR2ParamsForExec). + Msg("Setting OCR2 config") + commitOffchainCfg, err := contracts.NewCommitOffchainConfig( + *commonconfig.MustNewDuration(5 * time.Second), + 1e6, + 1e6, + *commonconfig.MustNewDuration(5 * time.Second), + 1e6, + *inflightExpiryCommit, + priceReportingDisabled, + ) + if err != nil { + return fmt.Errorf("failed to create commit offchain config: %w", err) + } + + commitOnchainCfg, err := contracts.NewCommitOnchainConfig( + destCCIP.Common.PriceRegistry.EthAddress, + ) + if err != nil { + return fmt.Errorf("failed to create commit onchain config: %w", err) + } + signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, err := contracts.NewOffChainAggregatorV2ConfigForCCIPPlugin( + commitNodes, commitOffchainCfg, commitOnchainCfg, OCR2ParamsForCommit, 3*time.Minute) + if err != nil { + return fmt.Errorf("failed to create ocr2 config params for commit: %w", err) + } + + err = destCCIP.CommitStore.SetOCR2Config(signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + if err != nil { + return fmt.Errorf("failed to set ocr2 config for commit: %w", err) + } + + nodes := commitNodes + // if commit and exec job is set up in different DON + if len(execNodes) > 0 { + nodes = execNodes + } + if destCCIP.OffRamp != nil { + execOffchainCfg, err := contracts.NewExecOffchainConfig( + 1, + BatchGasLimit, + 0.7, + *inflightExpiryExec, + *commonconfig.MustNewDuration(RootSnoozeTime), + ) + if err != nil { + return fmt.Errorf("failed to create exec offchain config: %w", err) + } + execOnchainCfg, err := contracts.NewExecOnchainConfig( + uint32(DefaultPermissionlessExecThreshold.Seconds()), + destCCIP.Common.Router.EthAddress, + destCCIP.Common.PriceRegistry.EthAddress, + DefaultMaxNoOfTokensInMsg, + MaxDataBytes, + 200_000, + ) + if err != nil { + return fmt.Errorf("failed to create exec onchain config: %w", err) + } + signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, err = contracts.NewOffChainAggregatorV2ConfigForCCIPPlugin( + nodes, + execOffchainCfg, + execOnchainCfg, + OCR2ParamsForExec, + 3*time.Minute, + ) + if err != nil { + return fmt.Errorf("failed to create ocr2 config params for exec: %w", err) + } + err = destCCIP.OffRamp.SetOCR2Config(signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + if err != nil { + return fmt.Errorf("failed to set ocr2 config for exec: %w", err) + } + } + return destCCIP.Common.ChainClient.WaitForEvents() +} + +func CreateBootstrapJob( + jobParams integrationtesthelpers.CCIPJobSpecParams, + bootstrapCommit *client.CLNodesWithKeys, + bootstrapExec *client.CLNodesWithKeys, +) error { + _, err := bootstrapCommit.Node.MustCreateJob(jobParams.BootstrapJob(jobParams.CommitStore.Hex())) + if err != nil { + return fmt.Errorf("shouldn't fail creating bootstrap job on bootstrap node %w", err) + } + if bootstrapExec != nil { + _, err := bootstrapExec.Node.MustCreateJob(jobParams.BootstrapJob(jobParams.OffRamp.Hex())) + if err != nil { + return fmt.Errorf("shouldn't fail creating bootstrap job on bootstrap node %w", err) + } + } + return nil +} + +func CreateOCR2CCIPCommitJobs( + lggr *zerolog.Logger, + jobParams integrationtesthelpers.CCIPJobSpecParams, + commitNodes []*client.CLNodesWithKeys, + mutexes []*sync.Mutex, + group *errgroup.Group, +) error { + ocr2SpecCommit, err := jobParams.CommitJobSpec() + if err != nil { + return fmt.Errorf("failed to create ocr2 commit job spec: %w", err) + } + createJob := func(index int, node *client.CLNodesWithKeys, ocr2SpecCommit client.OCR2TaskJobSpec, mu *sync.Mutex) error { + mu.Lock() + defer mu.Unlock() + ocr2SpecCommit.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeysBundle.OCR2Key.Data.ID) + ocr2SpecCommit.OCR2OracleSpec.TransmitterID.SetValid(node.KeysBundle.EthAddress) + lggr.Info().Msgf("Creating CCIP-Commit job on OCR node %d job name %s", index+1, ocr2SpecCommit.Name) + _, err = node.Node.MustCreateJob(&ocr2SpecCommit) + if err != nil { + return fmt.Errorf("shouldn't fail creating CCIP-Commit job on OCR node %d job name %s - %w", index+1, ocr2SpecCommit.Name, err) + } + return nil + } + + testSpec := client.OCR2TaskJobSpec{ + Name: ocr2SpecCommit.Name, + JobType: ocr2SpecCommit.JobType, + OCR2OracleSpec: ocr2SpecCommit.OCR2OracleSpec, + } + for i, node := range commitNodes { + node := node + i := i + group.Go(func() error { + return createJob(i, node, testSpec, mutexes[i]) + }) + } + return nil +} + +func CreateOCR2CCIPExecutionJobs( + lggr *zerolog.Logger, + jobParams integrationtesthelpers.CCIPJobSpecParams, + execNodes []*client.CLNodesWithKeys, + mutexes []*sync.Mutex, + group *errgroup.Group, +) error { + ocr2SpecExec, err := jobParams.ExecutionJobSpec() + if err != nil { + return fmt.Errorf("failed to create ocr2 execution job spec: %w", err) + } + createJob := func(index int, node *client.CLNodesWithKeys, ocr2SpecExec client.OCR2TaskJobSpec, mu *sync.Mutex) error { + mu.Lock() + defer mu.Unlock() + ocr2SpecExec.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeysBundle.OCR2Key.Data.ID) + ocr2SpecExec.OCR2OracleSpec.TransmitterID.SetValid(node.KeysBundle.EthAddress) + lggr.Info().Msgf("Creating CCIP-Exec job on OCR node %d job name %s", index+1, ocr2SpecExec.Name) + _, err = node.Node.MustCreateJob(&ocr2SpecExec) + if err != nil { + return fmt.Errorf("shouldn't fail creating CCIP-Exec job on OCR node %d job name %s - %w", index+1, + ocr2SpecExec.Name, err) + } + return nil + } + if ocr2SpecExec != nil { + for i, node := range execNodes { + node := node + i := i + group.Go(func() error { + return createJob(i, node, client.OCR2TaskJobSpec{ + Name: ocr2SpecExec.Name, + JobType: ocr2SpecExec.JobType, + MaxTaskDuration: ocr2SpecExec.MaxTaskDuration, + ForwardingAllowed: ocr2SpecExec.ForwardingAllowed, + OCR2OracleSpec: ocr2SpecExec.OCR2OracleSpec, + ObservationSource: ocr2SpecExec.ObservationSource, + }, mutexes[i]) + }) + } + } + return nil +} + +func TokenFeeForMultipleTokenAddr(tokenAddrToURL map[string]string) string { + source := "" + right := "" + i := 1 + for addr, url := range tokenAddrToURL { + source = source + fmt.Sprintf(` +token%d [type=http method=GET url="%s"]; +token%d_parse [type=jsonparse path="data,result"]; +token%d->token%d_parse;`, i, url, i, i, i) + right = right + fmt.Sprintf(` \\\"%s\\\":$(token%d_parse),`, addr, i) + i++ + } + right = right[:len(right)-1] + source = fmt.Sprintf(`%s +merge [type=merge left="{}" right="{%s}"];`, source, right) + + return source +} + +type CCIPTestEnv struct { + MockServer *ctfClient.MockserverClient + LocalCluster *test_env.CLClusterTestEnv + CLNodesWithKeys map[string][]*client.CLNodesWithKeys // key - network chain-id + CLNodes []*client.ChainlinkK8sClient + nodeMutexes []*sync.Mutex + ExecNodeStartIndex int + CommitNodeStartIndex int + NumOfAllowedFaultyCommit int + NumOfAllowedFaultyExec int + NumOfCommitNodes int + NumOfExecNodes int + K8Env *environment.Environment + CLNodeWithKeyReady *errgroup.Group // denotes if keys are created in chainlink node and ready to be used for job creation +} + +func (c *CCIPTestEnv) ChaosLabelForGeth(t *testing.T, srcChain, destChain string) { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, map[string]string{ + "app": GethLabel(srcChain), + }, ChaosGroupNetworkACCIPGeth) + require.NoError(t, err) + + err = c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, map[string]string{ + "app": GethLabel(destChain), + }, ChaosGroupNetworkBCCIPGeth) + require.NoError(t, err) + gethNetworksLabels := []string{GethLabel(srcChain), GethLabel(destChain)} + c.ChaosLabelForAllGeth(t, gethNetworksLabels) + +} + +func (c *CCIPTestEnv) ChaosLabelForAllGeth(t *testing.T, gethNetworksLabels []string) { + for _, gethNetworkLabel := range gethNetworksLabels { + err := c.K8Env.Client.AddLabel(c.K8Env.Cfg.Namespace, + fmt.Sprintf("app=%s", gethNetworkLabel), + fmt.Sprintf("geth=%s", ChaosGroupCCIPGeth)) + require.NoError(t, err) + } +} + +func (c *CCIPTestEnv) ChaosLabelForCLNodes(t *testing.T) { + allowedFaulty := c.NumOfAllowedFaultyCommit + commitStartInstance := c.CommitNodeStartIndex + 1 + execStartInstance := c.ExecNodeStartIndex + 1 + for i := commitStartInstance; i < len(c.CLNodes); i++ { + labelSelector := map[string]string{ + "app": "chainlink-0", + "instance": fmt.Sprintf("node-%d", i), + } + if i >= commitStartInstance && i < commitStartInstance+allowedFaulty+1 { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupCommitAndExecFaultyPlus) + require.NoError(t, err) + } + if i >= commitStartInstance && i < commitStartInstance+allowedFaulty { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupCommitAndExecFaulty) + require.NoError(t, err) + } + + // commit node starts from index 2 + if i >= commitStartInstance && i < commitStartInstance+c.NumOfCommitNodes { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupCommit) + require.NoError(t, err) + } + if i >= commitStartInstance && i < commitStartInstance+c.NumOfAllowedFaultyCommit+1 { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupCommitFaultyPlus) + require.NoError(t, err) + } + if i >= commitStartInstance && i < commitStartInstance+c.NumOfAllowedFaultyCommit { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupCommitFaulty) + require.NoError(t, err) + } + if i >= execStartInstance && i < execStartInstance+c.NumOfExecNodes { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupExecution) + require.NoError(t, err) + } + if i >= execStartInstance && i < execStartInstance+c.NumOfAllowedFaultyExec+1 { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupExecutionFaultyPlus) + require.NoError(t, err) + } + if i >= execStartInstance && i < execStartInstance+c.NumOfAllowedFaultyExec { + err := c.K8Env.Client.LabelChaosGroupByLabels(c.K8Env.Cfg.Namespace, labelSelector, ChaosGroupExecutionFaulty) + require.NoError(t, err) + } + } +} + +func (c *CCIPTestEnv) ConnectToExistingNodes(envConfig *testconfig.Common) error { + if envConfig.ExistingCLCluster == nil { + return fmt.Errorf("existing cluster is nil") + } + noOfNodes := pointer.GetInt(envConfig.ExistingCLCluster.NoOfNodes) + namespace := pointer.GetString(envConfig.ExistingCLCluster.Name) + + for i := 0; i < noOfNodes; i++ { + cfg := envConfig.ExistingCLCluster.NodeConfigs[i] + if cfg == nil { + return fmt.Errorf("node %d config is nil", i+1) + } + clClient, err := client.NewChainlinkK8sClient(cfg, cfg.InternalIP, namespace) + if err != nil { + return fmt.Errorf("failed to create chainlink client: %w for node %d config %v", err, i+1, cfg) + } + c.CLNodes = append(c.CLNodes, clClient) + c.nodeMutexes = append(c.nodeMutexes, &sync.Mutex{}) + } + + return nil +} + +func (c *CCIPTestEnv) ConnectToDeployedNodes() error { + if c.LocalCluster != nil { + // for local cluster, fetch the values from the local cluster + for _, chainlinkNode := range c.LocalCluster.ClCluster.Nodes { + c.nodeMutexes = append(c.nodeMutexes, &sync.Mutex{}) + c.CLNodes = append(c.CLNodes, &client.ChainlinkK8sClient{ + ChainlinkClient: chainlinkNode.API, + }) + } + } else { + // in case of k8s, we need to connect to the chainlink nodes + log.Info().Msg("Connecting to launched resources") + chainlinkK8sNodes, err := client.ConnectChainlinkNodes(c.K8Env) + if err != nil { + return fmt.Errorf("failed to connect to chainlink nodes: %w", err) + } + if len(chainlinkK8sNodes) == 0 { + return fmt.Errorf("no CL node found") + } + + for range chainlinkK8sNodes { + c.nodeMutexes = append(c.nodeMutexes, &sync.Mutex{}) + } + c.CLNodes = chainlinkK8sNodes + if _, exists := c.K8Env.URLs[mockserver.InternalURLsKey]; exists { + c.MockServer = ctfClient.ConnectMockServer(c.K8Env) + } + } + return nil +} + +// SetUpNodeKeysAndFund creates node keys and funds the nodes +func (c *CCIPTestEnv) SetUpNodeKeysAndFund( + logger *zerolog.Logger, + nodeFund *big.Float, + chains []blockchain.EVMClient, +) error { + if c.CLNodes == nil || len(c.CLNodes) == 0 { + return fmt.Errorf("no chainlink nodes to setup") + } + var chainlinkNodes []*client.ChainlinkClient + for _, node := range c.CLNodes { + chainlinkNodes = append(chainlinkNodes, node.ChainlinkClient) + } + nodesWithKeys := make(map[string][]*client.CLNodesWithKeys) + + populateKeys := func(chain blockchain.EVMClient) error { + log.Info().Str("chain id", chain.GetChainID().String()).Msg("creating node keys for chain") + _, clNodes, err := client.CreateNodeKeysBundle(chainlinkNodes, "evm", chain.GetChainID().String()) + if err != nil { + return fmt.Errorf("failed to create node keys for chain %s: %w", chain.GetChainID().String(), err) + } + if len(clNodes) == 0 { + return fmt.Errorf("no CL node with keys found for chain %s", chain.GetNetworkName()) + } + + nodesWithKeys[chain.GetChainID().String()] = clNodes + return nil + } + + fund := func(ec blockchain.EVMClient) error { + cfg := ec.GetNetworkConfig() + if cfg == nil { + return fmt.Errorf("blank network config") + } + c1, err := blockchain.ConcurrentEVMClient(*cfg, c.K8Env, ec, *logger) + if err != nil { + return fmt.Errorf("getting concurrent evmclient chain %s %w", ec.GetNetworkName(), err) + } + defer func() { + if c1 != nil { + c1.Close() + } + }() + log.Info().Str("chain id", c1.GetChainID().String()).Msg("Funding Chainlink nodes for chain") + for i := 1; i < len(chainlinkNodes); i++ { + cl := chainlinkNodes[i] + m := c.nodeMutexes[i] + toAddress, err := cl.EthAddressesForChain(c1.GetChainID().String()) + if err != nil { + return err + } + for _, addr := range toAddress { + toAddr := common.HexToAddress(addr) + gasEstimates, err := c1.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + m.Lock() + err = c1.Fund(addr, nodeFund, gasEstimates) + m.Unlock() + if err != nil { + return err + } + } + } + return c1.WaitForEvents() + } + grp, _ := errgroup.WithContext(context.Background()) + for _, chain := range chains { + err := populateKeys(chain) + if err != nil { + return err + } + } + for _, chain := range chains { + chain := chain + grp.Go(func() error { + return fund(chain) + }) + } + err := grp.Wait() + if err != nil { + return fmt.Errorf("error funding nodes %w", err) + } + c.CLNodesWithKeys = nodesWithKeys + + return nil +} + +func AssertBalances(t *testing.T, bas []testhelpers.BalanceAssertion) { + logEvent := log.Info() + for _, b := range bas { + actual := b.Getter(t, b.Address) + assert.NotNil(t, actual, "%v getter return nil", b.Name) + if b.Within == "" { + assert.Equal(t, b.Expected, actual.String(), "wrong balance for %s got %s want %s", b.Name, actual, b.Expected) + logEvent.Interface(b.Name, struct { + Exp string + Actual string + }{ + Exp: b.Expected, + Actual: actual.String(), + }) + } else { + bi, _ := big.NewInt(0).SetString(b.Expected, 10) + withinI, _ := big.NewInt(0).SetString(b.Within, 10) + high := big.NewInt(0).Add(bi, withinI) + low := big.NewInt(0).Sub(bi, withinI) + assert.Equal(t, -1, actual.Cmp(high), + "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + assert.Equal(t, 1, actual.Cmp(low), + "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + logEvent.Interface(b.Name, struct { + ExpRange string + Actual string + }{ + ExpRange: fmt.Sprintf("[%s, %s]", low, high), + Actual: actual.String(), + }) + } + } + logEvent.Msg("balance assertions succeeded") +} + +type BalFunc func(ctx context.Context, addr string) (*big.Int, error) + +func GetterForLinkToken(getBalance BalFunc, addr string) func(t *testing.T, _ common.Address) *big.Int { + return func(t *testing.T, _ common.Address) *big.Int { + balance, err := getBalance(context.Background(), addr) + assert.NoError(t, err) + return balance + } +} + +type BalanceItem struct { + Address common.Address + Getter func(t *testing.T, addr common.Address) *big.Int + PreviousBalance *big.Int + AmtToAdd *big.Int + AmtToSub *big.Int +} + +type BalanceSheet struct { + mu *sync.Mutex + Items map[string]BalanceItem + PrevBalance map[string]*big.Int +} + +func (b *BalanceSheet) Update(key string, item BalanceItem) { + b.mu.Lock() + defer b.mu.Unlock() + prev, ok := b.Items[key] + if !ok { + b.Items[key] = item + return + } + amtToAdd, amtToSub := big.NewInt(0), big.NewInt(0) + if prev.AmtToAdd != nil { + amtToAdd = prev.AmtToAdd + } + if prev.AmtToSub != nil { + amtToSub = prev.AmtToSub + } + if item.AmtToAdd != nil { + amtToAdd = new(big.Int).Add(amtToAdd, item.AmtToAdd) + } + if item.AmtToSub != nil { + amtToSub = new(big.Int).Add(amtToSub, item.AmtToSub) + } + + b.Items[key] = BalanceItem{ + Address: item.Address, + Getter: item.Getter, + AmtToAdd: amtToAdd, + AmtToSub: amtToSub, + } +} + +func (b *BalanceSheet) RecordBalance(bal map[string]*big.Int) { + b.mu.Lock() + defer b.mu.Unlock() + for key, value := range bal { + if _, ok := b.PrevBalance[key]; !ok { + b.PrevBalance[key] = value + } + } +} + +func (b *BalanceSheet) Verify(t *testing.T) { + var balAssertions []testhelpers.BalanceAssertion + for key, item := range b.Items { + prevBalance, ok := b.PrevBalance[key] + require.Truef(t, ok, "previous balance is not captured for %s", key) + exp := prevBalance + if item.AmtToAdd != nil { + exp = new(big.Int).Add(exp, item.AmtToAdd) + } + if item.AmtToSub != nil { + exp = new(big.Int).Sub(exp, item.AmtToSub) + } + balAssertions = append(balAssertions, testhelpers.BalanceAssertion{ + Name: key, + Address: item.Address, + Getter: item.Getter, + Expected: exp.String(), + }) + } + AssertBalances(t, balAssertions) +} + +func NewBalanceSheet() *BalanceSheet { + return &BalanceSheet{ + mu: &sync.Mutex{}, + Items: make(map[string]BalanceItem), + PrevBalance: make(map[string]*big.Int), + } +} + +// SetMockServerWithUSDCAttestation responds with a mock attestation for any msgHash +// The path is set with regex to match any path that starts with /v1/attestations +func SetMockServerWithUSDCAttestation( + killGrave *ctftestenv.Killgrave, + mockserver *ctfClient.MockserverClient, +) error { + path := "/v1/attestations" + response := struct { + Status string `json:"status"` + Attestation string `json:"attestation"` + Error string `json:"error"` + }{ + Status: "complete", + Attestation: "0x9049623e91719ef2aa63c55f357be2529b0e7122ae552c18aff8db58b4633c4d3920ff03d3a6d1ddf11f06bf64d7fd60d45447ac81f527ba628877dc5ca759651b08ffae25a6d3b1411749765244f0a1c131cbfe04430d687a2e12fd9d2e6dc08e118ad95d94ad832332cf3c4f7a4f3da0baa803b7be024b02db81951c0f0714de1b", + } + if killGrave == nil && mockserver == nil { + return fmt.Errorf("both killgrave and mockserver are nil") + } + log.Info().Str("path", path).Msg("setting attestation-api response for any msgHash") + if killGrave != nil { + err := killGrave.SetAnyValueResponse(fmt.Sprintf("%s/{_hash:.*}", path), []string{http.MethodGet}, response) + if err != nil { + return fmt.Errorf("failed to set killgrave server value: %w", err) + } + } + if mockserver != nil { + err := mockserver.SetAnyValueResponse(fmt.Sprintf("%s/.*", path), response) + if err != nil { + return fmt.Errorf("failed to set mockserver value: %w URL = %s", err, fmt.Sprintf("%s/%s/.*", mockserver.LocalURL(), path)) + } + } + return nil +} + +// SetMockserverWithTokenPriceValue sets the mock responses in mockserver that are read by chainlink nodes +// to simulate different price feed value. +// it keeps updating the response every 15 seconds to simulate price feed updates +func SetMockserverWithTokenPriceValue( + killGrave *ctftestenv.Killgrave, + mockserver *ctfClient.MockserverClient, +) { + wg := &sync.WaitGroup{} + path := "token_contract_" + wg.Add(1) + go func() { + set := true + // keep updating token value every 15 second + for { + if killGrave == nil && mockserver == nil { + log.Fatal().Msg("both killgrave and mockserver are nil") + return + } + tokenValue := big.NewInt(time.Now().UnixNano()).String() + if killGrave != nil { + err := killGrave.SetAdapterBasedAnyValuePath(fmt.Sprintf("%s{.*}", path), []string{http.MethodGet}, tokenValue) + if err != nil { + log.Fatal().Err(err).Msg("failed to set killgrave server value") + return + } + } + if mockserver != nil { + err := mockserver.SetAnyValuePath(fmt.Sprintf("/%s.*", path), tokenValue) + if err != nil { + log.Fatal().Err(err).Str("URL", fmt.Sprintf("%s/%s/.*", mockserver.LocalURL(), path)).Msg("failed to set mockserver value") + return + } + } + if set { + set = false + wg.Done() + } + time.Sleep(15 * time.Second) + } + }() + // wait for the first value to be set + wg.Wait() +} + +// TokenPricePipelineURLs returns the mockserver urls for the token price pipeline +func TokenPricePipelineURLs( + tokenAddresses []string, + killGrave *ctftestenv.Killgrave, + mockserver *ctfClient.MockserverClient, +) map[string]string { + mapTokenURL := make(map[string]string) + + for _, tokenAddr := range tokenAddresses { + path := fmt.Sprintf("token_contract_%s", tokenAddr[2:12]) + if mockserver != nil { + mapTokenURL[tokenAddr] = fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, path) + } + if killGrave != nil { + mapTokenURL[tokenAddr] = fmt.Sprintf("%s/%s", killGrave.InternalEndpoint, path) + } + } + + return mapTokenURL +} diff --git a/integration-tests/ccip-tests/actions/ccip_helpers_test.go b/integration-tests/ccip-tests/actions/ccip_helpers_test.go new file mode 100644 index 00000000000..4ca1061d6f2 --- /dev/null +++ b/integration-tests/ccip-tests/actions/ccip_helpers_test.go @@ -0,0 +1,105 @@ +package actions + +import ( + "errors" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" +) + +func TestIsPhaseValid(t *testing.T) { + // isPhaseValid has some complex logic that could lead to false negatives + t.Parallel() + logger := zerolog.New(zerolog.Nop()) + + testCases := []struct { + name string + currentPhase testreporters.Phase + opts validationOptions + phaseErr error + + expectedShouldReturn bool + expectedErr error + }{ + { + name: "should return error immediately if phase error is present and no phase is expected to fail", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{}, + phaseErr: errors.New("some error"), + + expectedShouldReturn: true, + expectedErr: errors.New("some error"), + }, + { + name: "should return with no error if phase is expected to fail and phase error present", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{ + phaseExpectedToFail: testreporters.CCIPSendRe, + }, + phaseErr: errors.New("some error"), + + expectedShouldReturn: true, + expectedErr: nil, + }, + { + name: "should return with error if phase is expected to fail and no phase error present", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{ + phaseExpectedToFail: testreporters.CCIPSendRe, + }, + phaseErr: nil, + + expectedShouldReturn: true, + expectedErr: errors.New("expected phase 'CCIPSendRequested' to fail, but it passed"), + }, + { + name: "should not return if phase is not expected to fail and no phase error present", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{ + phaseExpectedToFail: testreporters.ExecStateChanged, + }, + phaseErr: nil, + + expectedShouldReturn: false, + expectedErr: nil, + }, + { + name: "should return with no error if phase is expected to fail with specific error message and that error message is present", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{ + phaseExpectedToFail: testreporters.CCIPSendRe, + expectedErrorMessage: "some error", + }, + phaseErr: errors.New("some error"), + + expectedShouldReturn: true, + expectedErr: nil, + }, + { + name: "should return with error if phase is expected to fail with specific error message and that error message is not present", + currentPhase: testreporters.CCIPSendRe, + opts: validationOptions{ + phaseExpectedToFail: testreporters.CCIPSendRe, + expectedErrorMessage: "some error", + }, + phaseErr: errors.New("some other error"), + + expectedShouldReturn: true, + expectedErr: errors.New("expected phase 'CCIPSendRequested' to fail with error message 'some error' but got error 'some other error'"), + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + shouldReturn, err := isPhaseValid(&logger, tc.currentPhase, tc.opts, tc.phaseErr) + require.Equal(t, tc.expectedShouldReturn, shouldReturn, "shouldReturn not as expected") + require.Equal(t, tc.expectedErr, err, "err not as expected") + }) + } +} diff --git a/integration-tests/ccip-tests/chaos/ccip_test.go b/integration-tests/ccip-tests/chaos/ccip_test.go new file mode 100644 index 00000000000..4b1dda7a91e --- /dev/null +++ b/integration-tests/ccip-tests/chaos/ccip_test.go @@ -0,0 +1,153 @@ +package chaos_test + +import ( + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" +) + +/* @network-chaos and @pod-chaos are split intentionally into 2 parallel groups +we can't use chaos.NewNetworkPartition and chaos.NewFailPods in parallel +because of jsii runtime bug, see Makefile and please use those targets to run tests +In .github/workflows/ccip-chaos-tests.yml we use these tags to run these tests separately +*/ + +func TestChaosCCIP(t *testing.T) { + inputs := []struct { + testName string + chaosFunc chaos.ManifestFunc + chaosProps *chaos.Props + waitForChaosRecovery bool + }{ + { + testName: "CCIP works after rpc is down for NetworkA @network-chaos", + chaosFunc: chaos.NewNetworkPartition, + chaosProps: &chaos.Props{ + FromLabels: &map[string]*string{actions.ChaosGroupNetworkACCIPGeth: ptr.Ptr("1")}, + // chainlink-0 is default label set for all cll nodes + ToLabels: &map[string]*string{"app": ptr.Ptr("chainlink-0")}, + DurationStr: "1m", + }, + waitForChaosRecovery: true, + }, + { + testName: "CCIP works after rpc is down for NetworkB @network-chaos", + chaosFunc: chaos.NewNetworkPartition, + chaosProps: &chaos.Props{ + FromLabels: &map[string]*string{actions.ChaosGroupNetworkBCCIPGeth: ptr.Ptr("1")}, + ToLabels: &map[string]*string{"app": ptr.Ptr("chainlink-0")}, + DurationStr: "1m", + }, + waitForChaosRecovery: true, + }, + { + testName: "CCIP works after 2 rpc's are down for all cll nodes @network-chaos", + chaosFunc: chaos.NewNetworkPartition, + chaosProps: &chaos.Props{ + FromLabels: &map[string]*string{"geth": ptr.Ptr(actions.ChaosGroupCCIPGeth)}, + ToLabels: &map[string]*string{"app": ptr.Ptr("chainlink-0")}, + DurationStr: "1m", + }, + waitForChaosRecovery: true, + }, + { + testName: "CCIP Commit works after majority of CL nodes are recovered from pod failure @pod-chaos", + chaosFunc: chaos.NewFailPods, + chaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: ptr.Ptr("1")}, + DurationStr: "1m", + }, + waitForChaosRecovery: true, + }, + { + testName: "CCIP Execution works after majority of CL nodes are recovered from pod failure @pod-chaos", + chaosFunc: chaos.NewFailPods, + chaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: ptr.Ptr("1")}, + DurationStr: "1m", + }, + waitForChaosRecovery: true, + }, + { + testName: "CCIP Commit works while minority of CL nodes are in failed state for pod failure @pod-chaos", + chaosFunc: chaos.NewFailPods, + chaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: ptr.Ptr("1")}, + DurationStr: "90s", + }, + waitForChaosRecovery: false, + }, + { + testName: "CCIP Execution works while minority of CL nodes are in failed state for pod failure @pod-chaos", + chaosFunc: chaos.NewFailPods, + chaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: ptr.Ptr("1")}, + DurationStr: "90s", + }, + waitForChaosRecovery: false, + }, + } + + for _, in := range inputs { + in := in + t.Run(in.testName, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + testCfg := testsetups.NewCCIPTestConfig(t, l, testconfig.Chaos) + var numOfRequests = 3 + + setUpArgs := testsetups.CCIPDefaultTestSetUp( + t, &l, "chaos-ccip", nil, testCfg) + + if len(setUpArgs.Lanes) == 0 { + return + } + + lane := setUpArgs.Lanes[0].ForwardLane + + tearDown := setUpArgs.TearDown + testEnvironment := setUpArgs.Env.K8Env + testSetup := setUpArgs.Env + + testSetup.ChaosLabelForGeth(t, lane.SourceChain.GetNetworkName(), lane.DestChain.GetNetworkName()) + testSetup.ChaosLabelForCLNodes(t) + + lane.RecordStateBeforeTransfer() + // Send the ccip-request and verify ocr2 is running + err := lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + lane.ValidateRequests(nil) + + // apply chaos + chaosId, err := testEnvironment.Chaos.Run(in.chaosFunc(testEnvironment.Cfg.Namespace, in.chaosProps)) + require.NoError(t, err) + t.Cleanup(func() { + if chaosId != "" { + require.NoError(t, testEnvironment.Chaos.Stop(chaosId)) + } + require.NoError(t, tearDown()) + }) + lane.RecordStateBeforeTransfer() + // Now send the ccip-request while the chaos is at play + err = lane.SendRequests(numOfRequests, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + if in.waitForChaosRecovery { + // wait for chaos to be recovered before further validation + require.NoError(t, testEnvironment.Chaos.WaitForAllRecovered(chaosId, 1*time.Minute)) + } else { + l.Info().Msg("proceeding without waiting for chaos recovery") + } + lane.ValidateRequests(nil) + }) + } +} diff --git a/integration-tests/ccip-tests/contracts/contract_deployer.go b/integration-tests/ccip-tests/contracts/contract_deployer.go new file mode 100644 index 00000000000..8656656e0b2 --- /dev/null +++ b/integration-tests/ccip-tests/contracts/contract_deployer.go @@ -0,0 +1,1586 @@ +package contracts + +import ( + "context" + "crypto/ed25519" + "encoding/hex" + "fmt" + "math/big" + "strings" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "golang.org/x/crypto/curve25519" + + ocrconfighelper2 "github.com/smartcontractkit/libocr/offchainreporting2/confighelper" + ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + "github.com/smartcontractkit/chainlink/integration-tests/client" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/wrappers" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_usdc_token_messenger" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_usdc_token_transmitter" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_admin_registry" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/usdc_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/usdc_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + type_and_version "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/type_and_version_interface_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/burn_mint_erc677" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/erc20" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" +) + +// MatchContractVersionsOrAbove checks if the current contract versions for the test match or exceed the provided contract versions +func MatchContractVersionsOrAbove(requiredContractVersions map[Name]Version) error { + for contractName, r := range requiredContractVersions { + required := r + if contractVersion, ok := VersionMap[contractName]; !ok { + return fmt.Errorf("contract %s not found in version map", contractName) + } else if contractVersion.Compare(&required.Version) < 0 { + return fmt.Errorf("contract %s version %s is less than required version %s", contractName, contractVersion, required.Version) + } + } + return nil +} + +// NeedTokenAdminRegistry checks if token admin registry is needed for the current version of ccip +// if the version is less than 1.5.0-dev, then token admin registry is not needed +func NeedTokenAdminRegistry() bool { + return MatchContractVersionsOrAbove(map[Name]Version{ + TokenPoolContract: V1_5_0_dev, + }) == nil +} + +// CCIPContractsDeployer provides the implementations for deploying CCIP ETH contracts +type CCIPContractsDeployer struct { + evmClient blockchain.EVMClient + logger *zerolog.Logger +} + +// NewCCIPContractsDeployer returns an instance of a contract deployer for CCIP +func NewCCIPContractsDeployer(logger *zerolog.Logger, bcClient blockchain.EVMClient) (*CCIPContractsDeployer, error) { + return &CCIPContractsDeployer{ + evmClient: bcClient, + logger: logger, + }, nil +} + +func (e *CCIPContractsDeployer) Client() blockchain.EVMClient { + return e.evmClient +} + +func (e *CCIPContractsDeployer) DeployMultiCallContract() (common.Address, error) { + multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return common.Address{}, err + } + address, tx, _, err := e.evmClient.DeployContract("MultiCall Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + address, tx, contract, err := bind.DeployContract(auth, multiCallABI, common.FromHex(MultiCallBIN), wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, contract, err + }) + if err != nil { + return common.Address{}, err + } + r, err := bind.WaitMined(context.Background(), e.evmClient.DeployBackend(), tx) + if err != nil { + return common.Address{}, err + } + if r.Status != types.ReceiptStatusSuccessful { + return common.Address{}, fmt.Errorf("deploy multicall failed") + } + return *address, nil +} + +func (e *CCIPContractsDeployer) DeployTokenMessenger(tokenTransmitter common.Address) (*common.Address, error) { + address, _, _, err := e.evmClient.DeployContract("Mock Token Messenger", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + address, tx, contract, err := mock_usdc_token_messenger.DeployMockE2EUSDCTokenMessenger(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), 0, tokenTransmitter) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, contract, err + }) + + return address, err +} + +func (e *CCIPContractsDeployer) NewTokenTransmitter(addr common.Address) (*TokenTransmitter, error) { + transmitter, err := mock_usdc_token_transmitter.NewMockE2EUSDCTransmitter(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Mock USDC Token Transmitter"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &TokenTransmitter{ + client: e.evmClient, + instance: transmitter, + ContractAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) DeployTokenTransmitter(domain uint32, usdcToken common.Address) (*TokenTransmitter, error) { + address, _, instance, err := e.evmClient.DeployContract("Mock Token Transmitter", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + address, tx, contract, err := mock_usdc_token_transmitter.DeployMockE2EUSDCTransmitter(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), 0, domain, usdcToken) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, contract, err + }) + + if err != nil { + return nil, fmt.Errorf("error in deploying usdc token transmitter: %w", err) + } + + return &TokenTransmitter{ + client: e.evmClient, + instance: instance.(*mock_usdc_token_transmitter.MockE2EUSDCTransmitter), + ContractAddress: *address, + }, err +} + +func (e *CCIPContractsDeployer) DeployLinkTokenContract() (*LinkToken, error) { + address, _, instance, err := e.evmClient.DeployContract("Link Token", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return link_token_interface.DeployLinkToken(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + }) + + if err != nil { + return nil, err + } + return &LinkToken{ + client: e.evmClient, + logger: e.logger, + instance: instance.(*link_token_interface.LinkToken), + EthAddress: *address, + }, err +} + +// DeployBurnMintERC677 deploys a BurnMintERC677 contract, mints given amount ( if provided) to the owner address and returns the ERC20Token wrapper instance +func (e *CCIPContractsDeployer) DeployBurnMintERC677(ownerMintingAmount *big.Int) (*ERC677Token, error) { + address, _, instance, err := e.evmClient.DeployContract("Burn Mint ERC 677", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return burn_mint_erc677.DeployBurnMintERC677(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), "Test Token ERC677", "TERC677", 6, new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e9))) + }) + if err != nil { + return nil, err + } + + token := &ERC677Token{ + client: e.evmClient, + logger: e.logger, + ContractAddress: *address, + instance: instance.(*burn_mint_erc677.BurnMintERC677), + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + } + if ownerMintingAmount != nil { + // grant minter role to owner and mint tokens + err = token.GrantMintRole(common.HexToAddress(e.evmClient.GetDefaultWallet().Address())) + if err != nil { + return token, fmt.Errorf("granting minter role to owner shouldn't fail %w", err) + } + err = e.evmClient.WaitForEvents() + if err != nil { + return token, fmt.Errorf("error in waiting for granting mint role %w", err) + } + err = token.Mint(common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), ownerMintingAmount) + if err != nil { + return token, fmt.Errorf("minting tokens shouldn't fail %w", err) + } + } + return token, err +} + +func (e *CCIPContractsDeployer) DeployERC20TokenContract(deployerFn blockchain.ContractDeployer) (*ERC20Token, error) { + address, _, _, err := e.evmClient.DeployContract("Custom ERC20 Token", deployerFn) + if err != nil { + return nil, err + } + err = e.evmClient.WaitForEvents() + if err != nil { + return nil, err + } + return e.NewERC20TokenContract(*address) +} + +func (e *CCIPContractsDeployer) NewLinkTokenContract(addr common.Address) (*LinkToken, error) { + token, err := link_token_interface.NewLinkToken(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Link Token"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &LinkToken{ + client: e.evmClient, + logger: e.logger, + instance: token, + EthAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) NewERC20TokenContract(addr common.Address) (*ERC20Token, error) { + token, err := erc20.NewERC20(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "ERC20 Token"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &ERC20Token{ + client: e.evmClient, + logger: e.logger, + instance: token, + ContractAddress: addr, + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + }, err +} + +func (e *CCIPContractsDeployer) NewLockReleaseTokenPoolContract(addr common.Address) ( + *TokenPool, + error, +) { + version := VersionMap[TokenPoolContract] + e.logger.Info().Str("Version", version.String()).Msg("New LockRelease Token Pool") + switch version { + case Latest: + pool, err := lock_release_token_pool.NewLockReleaseTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Native Token Pool"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + poolInstance, err := token_pool.NewTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &TokenPool{ + client: e.evmClient, + logger: e.logger, + Instance: &TokenPoolWrapper{ + Latest: &LatestPool{ + PoolInterface: poolInstance, + LockReleasePool: pool, + }, + }, + EthAddress: addr, + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + }, err + case V1_4_0: + pool, err := lock_release_token_pool_1_4_0.NewLockReleaseTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Native Token Pool"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + poolInstance, err := token_pool_1_4_0.NewTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &TokenPool{ + client: e.evmClient, + logger: e.logger, + Instance: &TokenPoolWrapper{ + V1_4_0: &V1_4_0Pool{ + PoolInterface: poolInstance, + LockReleasePool: pool, + }, + }, + EthAddress: addr, + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) NewUSDCTokenPoolContract(addr common.Address) ( + *TokenPool, + error, +) { + version := VersionMap[TokenPoolContract] + e.logger.Info().Str("Version", version.String()).Msg("New USDC Token Pool") + switch version { + case Latest: + pool, err := usdc_token_pool.NewUSDCTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "USDC Token Pool"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + poolInterface, err := token_pool.NewTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &TokenPool{ + client: e.evmClient, + logger: e.logger, + Instance: &TokenPoolWrapper{ + Latest: &LatestPool{ + PoolInterface: poolInterface, + USDCPool: pool, + }, + }, + EthAddress: addr, + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + }, err + case V1_4_0: + pool, err := usdc_token_pool_1_4_0.NewUSDCTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "USDC Token Pool"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + poolInterface, err := token_pool_1_4_0.NewTokenPool(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &TokenPool{ + client: e.evmClient, + logger: e.logger, + Instance: &TokenPoolWrapper{ + V1_4_0: &V1_4_0Pool{ + PoolInterface: poolInterface, + USDCPool: pool, + }, + }, + EthAddress: addr, + OwnerAddress: common.HexToAddress(e.evmClient.GetDefaultWallet().Address()), + OwnerWallet: e.evmClient.GetDefaultWallet(), + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } + +} + +func (e *CCIPContractsDeployer) DeployUSDCTokenPoolContract(tokenAddr string, tokenMessenger, rmnProxy common.Address, router common.Address) ( + *TokenPool, + error, +) { + version := VersionMap[TokenPoolContract] + e.logger.Debug().Str("Token", tokenAddr).Msg("Deploying USDC token pool") + token := common.HexToAddress(tokenAddr) + switch version { + case Latest: + address, _, _, err := e.evmClient.DeployContract("USDC Token Pool", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return usdc_token_pool.DeployUSDCTokenPool( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + tokenMessenger, + token, + []common.Address{}, + rmnProxy, + router, + ) + }) + + if err != nil { + return nil, err + } + return e.NewUSDCTokenPoolContract(*address) + case V1_4_0: + address, _, _, err := e.evmClient.DeployContract("USDC Token Pool", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return usdc_token_pool_1_4_0.DeployUSDCTokenPool( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + tokenMessenger, + token, + []common.Address{}, + rmnProxy, + router, + ) + }) + + if err != nil { + return nil, err + } + return e.NewUSDCTokenPoolContract(*address) + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployLockReleaseTokenPoolContract(tokenAddr string, rmnProxy common.Address, router common.Address) ( + *TokenPool, + error, +) { + version := VersionMap[TokenPoolContract] + e.logger.Info().Str("Version", version.String()).Msg("Deploying LockRelease Token Pool") + token := common.HexToAddress(tokenAddr) + switch version { + case Latest: + address, _, _, err := e.evmClient.DeployContract("LockRelease Token Pool", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return lock_release_token_pool.DeployLockReleaseTokenPool( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + token, + []common.Address{}, + rmnProxy, + true, + router, + ) + }) + + if err != nil { + return nil, err + } + return e.NewLockReleaseTokenPoolContract(*address) + case V1_4_0: + address, _, _, err := e.evmClient.DeployContract("LockRelease Token Pool", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return lock_release_token_pool_1_4_0.DeployLockReleaseTokenPool( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + token, + []common.Address{}, + rmnProxy, + true, + router, + ) + }) + + if err != nil { + return nil, err + } + return e.NewLockReleaseTokenPoolContract(*address) + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployMockARMContract() (*common.Address, error) { + address, _, _, err := e.evmClient.DeployContract("Mock ARM Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return mock_arm_contract.DeployMockARMContract(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + }) + return address, err +} + +func (e *CCIPContractsDeployer) NewARMContract(addr common.Address) (*ARM, error) { + arm, err := arm_contract.NewARMContract(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Mock ARM Contract"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + + return &ARM{ + client: e.evmClient, + Instance: arm, + EthAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) NewCommitStore(addr common.Address) ( + *CommitStore, + error, +) { + version := VersionMap[CommitStoreContract] + e.logger.Info().Str("Version", version.String()).Msg("New CommitStore") + switch version { + case Latest: + ins, err := commit_store.NewCommitStore(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "CommitStore"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &CommitStore{ + client: e.evmClient, + logger: e.logger, + Instance: &CommitStoreWrapper{ + Latest: ins, + }, + EthAddress: addr, + }, err + case V1_2_0: + ins, err := commit_store_1_2_0.NewCommitStore(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "CommitStore"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &CommitStore{ + client: e.evmClient, + logger: e.logger, + Instance: &CommitStoreWrapper{ + V1_2_0: ins, + }, + EthAddress: addr, + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployCommitStore(sourceChainSelector, destChainSelector uint64, onRamp common.Address, armProxy common.Address) (*CommitStore, error) { + version, ok := VersionMap[CommitStoreContract] + if !ok { + return nil, fmt.Errorf("versioning not supported: %s", version) + } + e.logger.Info().Str("Version", version.String()).Msg("Deploying CommitStore") + switch version { + case Latest: + address, _, instance, err := e.evmClient.DeployContract("CommitStore Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return commit_store.DeployCommitStore( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + commit_store.CommitStoreStaticConfig{ + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp, + RmnProxy: armProxy, + }, + ) + }) + if err != nil { + return nil, err + } + return &CommitStore{ + client: e.evmClient, + logger: e.logger, + Instance: &CommitStoreWrapper{ + Latest: instance.(*commit_store.CommitStore), + }, + EthAddress: *address, + }, err + case V1_2_0: + address, _, instance, err := e.evmClient.DeployContract("CommitStore Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return commit_store_1_2_0.DeployCommitStore( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + commit_store_1_2_0.CommitStoreStaticConfig{ + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp, + ArmProxy: armProxy, + }, + ) + }) + if err != nil { + return nil, err + } + return &CommitStore{ + client: e.evmClient, + logger: e.logger, + Instance: &CommitStoreWrapper{ + V1_2_0: instance.(*commit_store_1_2_0.CommitStore), + }, + EthAddress: *address, + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployReceiverDapp(revert bool) ( + *ReceiverDapp, + error, +) { + address, _, instance, err := e.evmClient.DeployContract("ReceiverDapp", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), revert) + }) + if err != nil { + return nil, err + } + return &ReceiverDapp{ + client: e.evmClient, + logger: e.logger, + instance: instance.(*maybe_revert_message_receiver.MaybeRevertMessageReceiver), + EthAddress: *address, + }, err +} + +func (e *CCIPContractsDeployer) NewReceiverDapp(addr common.Address) ( + *ReceiverDapp, + error, +) { + ins, err := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "ReceiverDapp"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &ReceiverDapp{ + client: e.evmClient, + logger: e.logger, + instance: ins, + EthAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) DeployRouter(wrappedNative common.Address, armAddress common.Address) ( + *Router, + error, +) { + address, _, instance, err := e.evmClient.DeployContract("Router", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return router.DeployRouter(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), wrappedNative, armAddress) + }) + if err != nil { + return nil, err + } + return &Router{ + client: e.evmClient, + logger: e.logger, + Instance: instance.(*router.Router), + EthAddress: *address, + }, err +} + +func (e *CCIPContractsDeployer) NewRouter(addr common.Address) ( + *Router, + error, +) { + r, err := router.NewRouter(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "Router"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + if err != nil { + return nil, err + } + return &Router{ + client: e.evmClient, + logger: e.logger, + Instance: r, + EthAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) NewPriceRegistry(addr common.Address) ( + *PriceRegistry, + error, +) { + var wrapper *PriceRegistryWrapper + version := VersionMap[PriceRegistryContract] + e.logger.Info().Str("Version", version.String()).Msg("New PriceRegistry") + switch version { + case Latest: + ins, err := price_registry_1_2_0.NewPriceRegistry(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, fmt.Errorf("error in creating price registry instance: %w", err) + } + wrapper = &PriceRegistryWrapper{ + V1_2_0: ins, + } + case V1_2_0: + ins, err := price_registry_1_2_0.NewPriceRegistry(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, fmt.Errorf("error in creating price registry instance: %w", err) + } + wrapper = &PriceRegistryWrapper{ + V1_2_0: ins, + } + default: + return nil, fmt.Errorf("version not supported: %s", version) + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "PriceRegistry"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &PriceRegistry{ + client: e.evmClient, + logger: e.logger, + Instance: wrapper, + EthAddress: addr, + }, nil +} + +func (e *CCIPContractsDeployer) DeployPriceRegistry(tokens []common.Address) (*PriceRegistry, error) { + var address *common.Address + var wrapper *PriceRegistryWrapper + var err error + var instance interface{} + version := VersionMap[PriceRegistryContract] + e.logger.Info().Str("Version", version.String()).Msg("Deploying PriceRegistry") + switch version { + case Latest: + address, _, instance, err = e.evmClient.DeployContract("PriceRegistry", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return price_registry_1_2_0.DeployPriceRegistry(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), nil, tokens, 60*60*24*14) + }) + if err != nil { + return nil, err + } + wrapper = &PriceRegistryWrapper{ + V1_2_0: instance.(*price_registry_1_2_0.PriceRegistry), + } + case V1_2_0: + address, _, instance, err = e.evmClient.DeployContract("PriceRegistry", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return price_registry_1_2_0.DeployPriceRegistry(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), nil, tokens, 60*60*24*14) + }) + if err != nil { + return nil, err + } + wrapper = &PriceRegistryWrapper{ + V1_2_0: instance.(*price_registry_1_2_0.PriceRegistry), + } + default: + return nil, fmt.Errorf("version not supported: %s", version) + } + reg := &PriceRegistry{ + client: e.evmClient, + logger: e.logger, + EthAddress: *address, + Instance: wrapper, + } + return reg, err +} + +func (e *CCIPContractsDeployer) DeployTokenAdminRegistry() (*TokenAdminRegistry, error) { + address, _, instance, err := e.evmClient.DeployContract("TokenAdminRegistry", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return token_admin_registry.DeployTokenAdminRegistry(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + }) + if err != nil { + return nil, err + } + return &TokenAdminRegistry{ + client: e.evmClient, + logger: e.logger, + Instance: instance.(*token_admin_registry.TokenAdminRegistry), + EthAddress: *address, + }, err +} + +func (e *CCIPContractsDeployer) NewTokenAdminRegistry(addr common.Address) ( + *TokenAdminRegistry, + error, +) { + ins, err := token_admin_registry.NewTokenAdminRegistry(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "TokenAdminRegistry"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &TokenAdminRegistry{ + client: e.evmClient, + logger: e.logger, + Instance: ins, + EthAddress: addr, + }, err +} + +func (e *CCIPContractsDeployer) NewOnRamp(addr common.Address) ( + *OnRamp, + error, +) { + version := VersionMap[OnRampContract] + e.logger.Info().Str("Version", version.String()).Msg("New OnRamp") + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "OnRamp"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + switch version { + case V1_2_0: + ins, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &OnRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OnRampWrapper{V1_2_0: ins}, + EthAddress: addr, + }, err + case Latest: + ins, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + return &OnRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OnRampWrapper{Latest: ins}, + EthAddress: addr, + }, nil + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployOnRamp( + sourceChainSelector, destChainSelector uint64, + tokensAndPools []evm_2_evm_onramp_1_2_0.InternalPoolUpdate, + rmn, + router, + priceRegistry, + tokenAdminRegistry common.Address, + opts RateLimiterConfig, + feeTokenConfig []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs, + tokenTransferFeeConfig []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs, + linkTokenAddress common.Address, +) (*OnRamp, error) { + version := VersionMap[OnRampContract] + e.logger.Info().Str("Version", version.String()).Msg("Deploying OnRamp") + switch version { + case V1_2_0: + feeTokenConfigV1_2_0 := make([]evm_2_evm_onramp_1_2_0.EVM2EVMOnRampFeeTokenConfigArgs, len(feeTokenConfig)) + for i, f := range feeTokenConfig { + feeTokenConfigV1_2_0[i] = evm_2_evm_onramp_1_2_0.EVM2EVMOnRampFeeTokenConfigArgs{ + Token: f.Token, + NetworkFeeUSDCents: f.NetworkFeeUSDCents, + GasMultiplierWeiPerEth: f.GasMultiplierWeiPerEth, + PremiumMultiplierWeiPerEth: f.PremiumMultiplierWeiPerEth, + Enabled: f.Enabled, + } + } + tokenTransferFeeConfigV1_2_0 := make([]evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs, len(tokenTransferFeeConfig)) + for i, f := range tokenTransferFeeConfig { + tokenTransferFeeConfigV1_2_0[i] = evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + Token: f.Token, + MinFeeUSDCents: f.MinFeeUSDCents, + MaxFeeUSDCents: f.MaxFeeUSDCents, + DeciBps: f.DeciBps, + DestGasOverhead: f.DestGasOverhead, + DestBytesOverhead: f.DestBytesOverhead, + } + } + address, _, instance, err := e.evmClient.DeployContract("OnRamp", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return evm_2_evm_onramp_1_2_0.DeployEVM2EVMOnRamp( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: sourceChainSelector, // source chain id + DestChainSelector: destChainSelector, // destinationChainSelector + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: common.HexToAddress(""), + ArmProxy: rmn, + }, + evm_2_evm_onramp_1_2_0.EVM2EVMOnRampDynamicConfig{ + Router: router, + MaxNumberOfTokensPerMsg: 50, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: priceRegistry, + MaxDataBytes: 50000, + MaxPerMsgGasLimit: 4_000_000, + }, + tokensAndPools, + evm_2_evm_onramp_1_2_0.RateLimiterConfig{ + Capacity: opts.Capacity, + Rate: opts.Rate, + }, + feeTokenConfigV1_2_0, + tokenTransferFeeConfigV1_2_0, + []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampNopAndWeight{}, + ) + }) + if err != nil { + return nil, err + } + return &OnRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OnRampWrapper{ + V1_2_0: instance.(*evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp), + }, + EthAddress: *address, + }, nil + case Latest: + address, _, instance, err := e.evmClient.DeployContract("OnRamp", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return evm_2_evm_onramp.DeployEVM2EVMOnRamp( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: sourceChainSelector, // source chain id + DestChainSelector: destChainSelector, // destinationChainSelector + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: common.HexToAddress(""), + RmnProxy: rmn, + TokenAdminRegistry: tokenAdminRegistry, + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: router, + MaxNumberOfTokensPerMsg: 50, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: priceRegistry, + MaxDataBytes: 50000, + MaxPerMsgGasLimit: 4_000_000, + DefaultTokenFeeUSDCents: 50, + DefaultTokenDestGasOverhead: 125_000, + DefaultTokenDestBytesOverhead: 500, + }, + evm_2_evm_onramp.RateLimiterConfig{ + Capacity: opts.Capacity, + Rate: opts.Rate, + }, + feeTokenConfig, + tokenTransferFeeConfig, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + }) + if err != nil { + return nil, err + } + return &OnRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OnRampWrapper{ + Latest: instance.(*evm_2_evm_onramp.EVM2EVMOnRamp), + }, + EthAddress: *address, + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) NewOffRamp(addr common.Address) ( + *OffRamp, + error, +) { + version := VersionMap[OffRampContract] + e.logger.Info().Str("Version", version.String()).Msg("New OffRamp") + switch version { + case V1_2_0: + ins, err := evm_2_evm_offramp_1_2_0.NewEVM2EVMOffRamp(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "OffRamp"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &OffRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OffRampWrapper{V1_2_0: ins}, + EthAddress: addr, + }, err + case Latest: + ins, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, err + } + e.logger.Info(). + Str("Contract Address", addr.Hex()). + Str("Contract Name", "OffRamp"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &OffRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OffRampWrapper{Latest: ins}, + EthAddress: addr, + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployOffRamp( + sourceChainSelector, destChainSelector uint64, + commitStore, onRamp common.Address, + opts RateLimiterConfig, + sourceTokens, pools []common.Address, + rmnProxy common.Address, + tokenAdminRegistry common.Address, +) (*OffRamp, error) { + version := VersionMap[OffRampContract] + e.logger.Info().Str("Version", version.String()).Msg("Deploying OffRamp") + switch version { + case V1_2_0: + address, _, instance, err := e.evmClient.DeployContract("OffRamp Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return evm_2_evm_offramp_1_2_0.DeployEVM2EVMOffRamp( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + evm_2_evm_offramp_1_2_0.EVM2EVMOffRampStaticConfig{ + CommitStore: commitStore, + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp, + PrevOffRamp: common.Address{}, + ArmProxy: rmnProxy, + }, + sourceTokens, + pools, + evm_2_evm_offramp_1_2_0.RateLimiterConfig{ + IsEnabled: true, + Capacity: opts.Capacity, + Rate: opts.Rate, + }, + ) + }) + if err != nil { + return nil, err + } + return &OffRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OffRampWrapper{ + V1_2_0: instance.(*evm_2_evm_offramp_1_2_0.EVM2EVMOffRamp), + }, + EthAddress: *address, + }, err + case Latest: + staticConfig := evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: commitStore, + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp, + PrevOffRamp: common.Address{}, + RmnProxy: rmnProxy, + TokenAdminRegistry: tokenAdminRegistry, + } + address, _, instance, err := e.evmClient.DeployContract("OffRamp Contract", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return evm_2_evm_offramp.DeployEVM2EVMOffRamp( + auth, + wrappers.MustNewWrappedContractBackend(e.evmClient, nil), + staticConfig, + evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: opts.Capacity, + Rate: opts.Rate, + }, + ) + }) + e.logger.Info().Msg(fmt.Sprintf("deploying offramp with static config: %+v", staticConfig)) + + if err != nil { + return nil, err + } + return &OffRamp{ + client: e.evmClient, + logger: e.logger, + Instance: &OffRampWrapper{ + Latest: instance.(*evm_2_evm_offramp.EVM2EVMOffRamp), + }, + EthAddress: *address, + }, err + default: + return nil, fmt.Errorf("version not supported: %s", version) + } +} + +func (e *CCIPContractsDeployer) DeployWrappedNative() (*common.Address, error) { + address, _, _, err := e.evmClient.DeployContract("WrappedNative", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return weth9.DeployWETH9(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + }) + if err != nil { + return nil, err + } + return address, err +} + +func (e *CCIPContractsDeployer) DeployMockAggregator(decimals uint8, initialAns *big.Int) (*MockAggregator, error) { + address, _, instance, err := e.evmClient.DeployContract("MockAggregator", func( + auth *bind.TransactOpts, + _ bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return mock_v3_aggregator_contract.DeployMockV3Aggregator(auth, wrappers.MustNewWrappedContractBackend(e.evmClient, nil), decimals, initialAns) + }) + if err != nil { + return nil, fmt.Errorf("deploying mock aggregator: %w", err) + } + e.logger.Info(). + Str("Contract Address", address.Hex()). + Str("Contract Name", "MockAggregator"). + Str("From", e.evmClient.GetDefaultWallet().Address()). + Str("Network Name", e.evmClient.GetNetworkConfig().Name). + Msg("New contract") + return &MockAggregator{ + client: e.evmClient, + logger: e.logger, + Instance: instance.(*mock_v3_aggregator_contract.MockV3Aggregator), + ContractAddress: *address, + }, nil +} + +func (e *CCIPContractsDeployer) NewMockAggregator(addr common.Address) (*MockAggregator, error) { + ins, err := mock_v3_aggregator_contract.NewMockV3Aggregator(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return nil, fmt.Errorf("creating mock aggregator: %w", err) + } + return &MockAggregator{ + client: e.evmClient, + logger: e.logger, + Instance: ins, + ContractAddress: addr, + }, nil +} + +func (e *CCIPContractsDeployer) TypeAndVersion(addr common.Address) (string, error) { + tv, err := type_and_version.NewTypeAndVersionInterface(addr, wrappers.MustNewWrappedContractBackend(e.evmClient, nil)) + if err != nil { + return "", err + } + tvStr, err := tv.TypeAndVersion(nil) + if err != nil { + return "", fmt.Errorf("error calling typeAndVersion on addr: %s %w", addr.Hex(), err) + } + e.logger.Info(). + Str("TypeAndVersion", tvStr). + Str("Contract Address", addr.Hex()). + Msg("TypeAndVersion") + + _, versionStr, err := ccipconfig.ParseTypeAndVersion(tvStr) + if err != nil { + return versionStr, err + } + v, err := semver.NewVersion(versionStr) + if err != nil { + return "", fmt.Errorf("failed parsing version %s: %w", versionStr, err) + } + return v.String(), nil +} + +// OCR2ParamsForCommit and OCR2ParamsForExec - +// These functions return the default OCR2 parameters for Commit and Exec respectively. +// Refer to CommitOCRParams and ExecOCRParams in CCIPTestConfig located in testconfig/ccip.go to override these values with custom param values. +func OCR2ParamsForCommit(blockTime time.Duration) contracts.OffChainAggregatorV2Config { + // slow blocktime chains like Ethereum + if blockTime >= 10*time.Second { + return contracts.OffChainAggregatorV2Config{ + DeltaProgress: 2 * time.Minute, + DeltaResend: 5 * time.Second, + DeltaRound: 90 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaStage: 60 * time.Second, + MaxDurationQuery: 100 * time.Millisecond, + MaxDurationObservation: 35 * time.Second, + MaxDurationReport: 10 * time.Second, + MaxDurationShouldAcceptFinalizedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } + } + // fast blocktime chains like Avalanche + return contracts.OffChainAggregatorV2Config{ + DeltaProgress: 2 * time.Minute, + DeltaResend: 5 * time.Second, + DeltaRound: 60 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaStage: 25 * time.Second, + MaxDurationQuery: 100 * time.Millisecond, + MaxDurationObservation: 35 * time.Second, + MaxDurationReport: 10 * time.Second, + MaxDurationShouldAcceptFinalizedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } +} + +func OCR2ParamsForExec(blockTime time.Duration) contracts.OffChainAggregatorV2Config { + // slow blocktime chains like Ethereum + if blockTime >= 10*time.Second { + return contracts.OffChainAggregatorV2Config{ + DeltaProgress: 2 * time.Minute, + DeltaResend: 5 * time.Second, + DeltaRound: 90 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaStage: 60 * time.Second, + MaxDurationQuery: 100 * time.Millisecond, + MaxDurationObservation: 35 * time.Second, + MaxDurationReport: 10 * time.Second, + MaxDurationShouldAcceptFinalizedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } + } + // fast blocktime chains like Avalanche + return contracts.OffChainAggregatorV2Config{ + DeltaProgress: 120 * time.Second, + DeltaResend: 5 * time.Second, + DeltaRound: 30 * time.Second, + DeltaGrace: 5 * time.Second, + DeltaStage: 10 * time.Second, + MaxDurationQuery: 100 * time.Millisecond, + MaxDurationObservation: 35 * time.Second, + MaxDurationReport: 10 * time.Second, + MaxDurationShouldAcceptFinalizedReport: 5 * time.Second, + MaxDurationShouldTransmitAcceptedReport: 10 * time.Second, + } +} + +func OffChainAggregatorV2ConfigWithNodes(numberNodes int, inflightExpiry time.Duration, cfg contracts.OffChainAggregatorV2Config) contracts.OffChainAggregatorV2Config { + if numberNodes <= 4 { + log.Err(fmt.Errorf("insufficient number of nodes (%d) supplied for OCR, need at least 5", numberNodes)). + Int("Number Chainlink Nodes", numberNodes). + Msg("You likely need more chainlink nodes to properly configure OCR, try 5 or more.") + } + s := make([]int, 0) + for i := 0; i < numberNodes; i++ { + s = append(s, 1) + } + faultyNodes := 0 + if numberNodes > 1 { + faultyNodes = (numberNodes - 1) / 3 + } + if faultyNodes == 0 { + faultyNodes = 1 + } + if cfg.DeltaStage == 0 { + cfg.DeltaStage = inflightExpiry + } + return contracts.OffChainAggregatorV2Config{ + DeltaProgress: cfg.DeltaProgress, + DeltaResend: cfg.DeltaResend, + DeltaRound: cfg.DeltaRound, + DeltaGrace: cfg.DeltaGrace, + DeltaStage: cfg.DeltaStage, + RMax: 3, + S: s, + F: faultyNodes, + Oracles: []ocrconfighelper2.OracleIdentityExtra{}, + MaxDurationQuery: cfg.MaxDurationQuery, + MaxDurationObservation: cfg.MaxDurationObservation, + MaxDurationReport: cfg.MaxDurationReport, + MaxDurationShouldAcceptFinalizedReport: cfg.MaxDurationShouldAcceptFinalizedReport, + MaxDurationShouldTransmitAcceptedReport: cfg.MaxDurationShouldTransmitAcceptedReport, + OnchainConfig: []byte{}, + } +} + +func stripKeyPrefix(key string) string { + chunks := strings.Split(key, "_") + if len(chunks) == 3 { + return chunks[2] + } + return key +} + +func NewCommitOffchainConfig( + GasPriceHeartBeat config.Duration, + DAGasPriceDeviationPPB uint32, + ExecGasPriceDeviationPPB uint32, + TokenPriceHeartBeat config.Duration, + TokenPriceDeviationPPB uint32, + InflightCacheExpiry config.Duration, + _ bool, // TODO: priceReportingDisabled added after this merge +) (ccipconfig.OffchainConfig, error) { + switch VersionMap[CommitStoreContract] { + case Latest: + return testhelpers.NewCommitOffchainConfig( + GasPriceHeartBeat, + DAGasPriceDeviationPPB, + ExecGasPriceDeviationPPB, + TokenPriceHeartBeat, + TokenPriceDeviationPPB, + InflightCacheExpiry, + ), nil + case V1_2_0: + return testhelpers_1_4_0.NewCommitOffchainConfig( + GasPriceHeartBeat, + DAGasPriceDeviationPPB, + ExecGasPriceDeviationPPB, + TokenPriceHeartBeat, + TokenPriceDeviationPPB, + InflightCacheExpiry, + ), nil + default: + return nil, fmt.Errorf("version not supported: %s", VersionMap[CommitStoreContract]) + } +} + +func NewCommitOnchainConfig( + PriceRegistry common.Address, +) (abihelpers.AbiDefined, error) { + switch VersionMap[CommitStoreContract] { + case Latest: + return testhelpers.NewCommitOnchainConfig(PriceRegistry), nil + case V1_2_0: + return testhelpers_1_4_0.NewCommitOnchainConfig(PriceRegistry), nil + default: + return nil, fmt.Errorf("version not supported: %s", VersionMap[CommitStoreContract]) + } +} + +func NewExecOnchainConfig( + PermissionLessExecutionThresholdSeconds uint32, + Router common.Address, + PriceRegistry common.Address, + MaxNumberOfTokensPerMsg uint16, + MaxDataBytes uint32, + MaxPoolReleaseOrMintGas uint32, +) (abihelpers.AbiDefined, error) { + switch VersionMap[OffRampContract] { + case Latest: + return testhelpers.NewExecOnchainConfig( + PermissionLessExecutionThresholdSeconds, + Router, + PriceRegistry, + MaxNumberOfTokensPerMsg, + MaxDataBytes, + MaxPoolReleaseOrMintGas, // TODO: obsolete soon after this merge + 50_000, // TODO: MaxTokenTransferGas, obsolete soon after this merge + ), nil + case V1_2_0: + return testhelpers_1_4_0.NewExecOnchainConfig( + PermissionLessExecutionThresholdSeconds, + Router, + PriceRegistry, + MaxNumberOfTokensPerMsg, + MaxDataBytes, + MaxPoolReleaseOrMintGas, + ), nil + default: + return nil, fmt.Errorf("version not supported: %s", VersionMap[OffRampContract]) + } +} + +func NewExecOffchainConfig( + destOptimisticConfirmations uint32, + batchGasLimit uint32, + relativeBoostPerWaitHour float64, + inflightCacheExpiry config.Duration, + rootSnoozeTime config.Duration, +) (ccipconfig.OffchainConfig, error) { + switch VersionMap[OffRampContract] { + case Latest: + return testhelpers.NewExecOffchainConfig( + destOptimisticConfirmations, + batchGasLimit, + relativeBoostPerWaitHour, + inflightCacheExpiry, + rootSnoozeTime, + ), nil + case V1_2_0: + return testhelpers_1_4_0.NewExecOffchainConfig( + destOptimisticConfirmations, + batchGasLimit, + relativeBoostPerWaitHour, + inflightCacheExpiry, + rootSnoozeTime, + ), nil + default: + return nil, fmt.Errorf("version not supported: %s", VersionMap[OffRampContract]) + } +} + +func NewOffChainAggregatorV2ConfigForCCIPPlugin[T ccipconfig.OffchainConfig]( + nodes []*client.CLNodesWithKeys, + offchainCfg T, + onchainCfg abihelpers.AbiDefined, + ocr2Params contracts.OffChainAggregatorV2Config, + inflightExpiry time.Duration, +) ( + signers []common.Address, + transmitters []common.Address, + f_ uint8, + onchainConfig_ []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + err error, +) { + oracleIdentities := make([]ocrconfighelper2.OracleIdentityExtra, 0) + ocrConfig := OffChainAggregatorV2ConfigWithNodes(len(nodes), inflightExpiry, ocr2Params) + var onChainKeys []ocrtypes2.OnchainPublicKey + for i, nodeWithKeys := range nodes { + ocr2Key := nodeWithKeys.KeysBundle.OCR2Key.Data + offChainPubKeyTemp, err := hex.DecodeString(stripKeyPrefix(ocr2Key.Attributes.OffChainPublicKey)) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + formattedOnChainPubKey := stripKeyPrefix(ocr2Key.Attributes.OnChainPublicKey) + cfgPubKeyTemp, err := hex.DecodeString(stripKeyPrefix(ocr2Key.Attributes.ConfigPublicKey)) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + cfgPubKeyBytes := [ed25519.PublicKeySize]byte{} + copy(cfgPubKeyBytes[:], cfgPubKeyTemp) + offChainPubKey := [curve25519.PointSize]byte{} + copy(offChainPubKey[:], offChainPubKeyTemp) + ethAddress := nodeWithKeys.KeysBundle.EthAddress + p2pKeys := nodeWithKeys.KeysBundle.P2PKeys + peerID := p2pKeys.Data[0].Attributes.PeerID + oracleIdentities = append(oracleIdentities, ocrconfighelper2.OracleIdentityExtra{ + OracleIdentity: ocrconfighelper2.OracleIdentity{ + OffchainPublicKey: offChainPubKey, + OnchainPublicKey: common.HexToAddress(formattedOnChainPubKey).Bytes(), + PeerID: peerID, + TransmitAccount: ocrtypes2.Account(ethAddress), + }, + ConfigEncryptionPublicKey: cfgPubKeyBytes, + }) + onChainKeys = append(onChainKeys, oracleIdentities[i].OnchainPublicKey) + transmitters = append(transmitters, common.HexToAddress(ethAddress)) + } + signers, err = evm.OnchainPublicKeyToAddress(onChainKeys) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + ocrConfig.Oracles = oracleIdentities + ocrConfig.ReportingPluginConfig, err = ccipconfig.EncodeOffchainConfig(offchainCfg) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + ocrConfig.OnchainConfig, err = abihelpers.EncodeAbiStruct(onchainCfg) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + + _, _, f_, onchainConfig_, offchainConfigVersion, offchainConfig, err = ocrconfighelper2.ContractSetConfigArgsForTests( + ocrConfig.DeltaProgress, + ocrConfig.DeltaResend, + ocrConfig.DeltaRound, + ocrConfig.DeltaGrace, + ocrConfig.DeltaStage, + ocrConfig.RMax, + ocrConfig.S, + ocrConfig.Oracles, + ocrConfig.ReportingPluginConfig, + ocrConfig.MaxDurationQuery, + ocrConfig.MaxDurationObservation, + ocrConfig.MaxDurationReport, + ocrConfig.MaxDurationShouldAcceptFinalizedReport, + ocrConfig.MaxDurationShouldTransmitAcceptedReport, + ocrConfig.F, + ocrConfig.OnchainConfig, + ) + return +} diff --git a/integration-tests/ccip-tests/contracts/contract_models.go b/integration-tests/ccip-tests/contracts/contract_models.go new file mode 100644 index 00000000000..7008d51b622 --- /dev/null +++ b/integration-tests/ccip-tests/contracts/contract_models.go @@ -0,0 +1,2247 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "strconv" + "strings" + "time" + + "github.com/AlekSi/pointer" + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/rs/zerolog" + "golang.org/x/exp/rand" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + "github.com/smartcontractkit/chainlink/integration-tests/wrappers" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_usdc_token_transmitter" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_admin_registry" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/usdc_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/usdc_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/burn_mint_erc677" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/erc20" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +type LogInfo struct { + BlockNumber uint64 + TxHash common.Hash +} + +// Name denotes a contract name +type Name string + +// Version wraps a semver.Version object to provide some custom unmarshalling +type Version struct { + semver.Version +} + +// GasUpdateEvent holds the event details of Gas price update +type GasUpdateEvent struct { + Sender string + Tx string + Value *big.Int + DestChain uint64 + Source string +} + +// MustVersion creates a new Version object from a semver string and panics if it fails +func MustVersion(version string) Version { + v := semver.MustParse(version) + return Version{Version: *v} +} + +// UnmarshalTOML unmarshals TOML data into a Version object +func (v *Version) UnmarshalText(data []byte) error { + str := strings.Trim(string(data), `"`) + str = strings.Trim(str, `'`) + if strings.ToLower(str) == "latest" { + *v = Latest + return nil + } + ver, err := semver.NewVersion(str) + if err != nil { + return fmt.Errorf("failed to parse version from '%s': %w", str, err) + } + v.Version = *ver + return nil +} + +// Latest returns true if the version is the latest version +func (v *Version) Latest() bool { + return v.Version.Equal(&Latest.Version) +} + +const ( + Network = "Network Name" + PriceRegistryContract Name = "PriceRegistry" + OffRampContract Name = "OffRamp" + OnRampContract Name = "OnRamp" + TokenPoolContract Name = "TokenPool" + CommitStoreContract Name = "CommitStore" + + defaultDestByteOverhead = uint32(32) + defaultDestGasOverhead = uint32(125_000) +) + +var ( + V1_2_0 = MustVersion("1.2.0") + V1_4_0 = MustVersion("1.4.0") + V1_5_0_dev = MustVersion("1.5.0-dev") + LatestPoolVersion = V1_5_0_dev + Latest = V1_5_0_dev + VersionMap = map[Name]Version{ + PriceRegistryContract: V1_2_0, + OffRampContract: Latest, + OnRampContract: Latest, + CommitStoreContract: Latest, + TokenPoolContract: Latest, + } + SupportedContracts = map[Name]map[string]bool{ + PriceRegistryContract: { + Latest.String(): true, + V1_2_0.String(): true, + }, + OffRampContract: { + Latest.String(): true, + V1_2_0.String(): true, + }, + OnRampContract: { + Latest.String(): true, + V1_2_0.String(): true, + }, + CommitStoreContract: { + Latest.String(): true, + V1_2_0.String(): true, + }, + TokenPoolContract: { + Latest.String(): true, + V1_4_0.String(): true, + }, + } + + FiftyCoins = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(50)) + HundredCoins = new(big.Int).Mul(big.NewInt(1e18), big.NewInt(100)) +) + +// CheckVersionSupported checks if a given version is supported for a given contract +func CheckVersionSupported(name Name, version Version) error { + if contract, ok := SupportedContracts[name]; ok { + if isSupported, ok := contract[version.String()]; ok { + if isSupported { + return nil + } + return fmt.Errorf("version %s is not supported for contract %s", version.String(), name) + } + return fmt.Errorf("version %s is not supported for contract %s", version.String(), name) + } + return fmt.Errorf("contract %s is not supported", name) +} + +type RateLimiterConfig struct { + IsEnabled bool + Rate *big.Int + Capacity *big.Int + Tokens *big.Int +} + +type ARMConfig struct { + ARMWeightsByParticipants map[string]*big.Int // mapping : ARM participant address => weight + ThresholdForBlessing *big.Int + ThresholdForBadSignal *big.Int +} + +type TokenTransmitter struct { + client blockchain.EVMClient + instance *mock_usdc_token_transmitter.MockE2EUSDCTransmitter + ContractAddress common.Address +} + +type ERC677Token struct { + client blockchain.EVMClient + logger *zerolog.Logger + instance *burn_mint_erc677.BurnMintERC677 + ContractAddress common.Address + OwnerAddress common.Address + OwnerWallet *blockchain.EthereumWallet +} + +func (token *ERC677Token) GrantMintAndBurn(burnAndMinter common.Address) error { + opts, err := token.client.TransactionOpts(token.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + token.logger.Info(). + Str(Network, token.client.GetNetworkName()). + Str("BurnAndMinter", burnAndMinter.Hex()). + Str("Token", token.ContractAddress.Hex()). + Msg("Granting mint and burn roles") + tx, err := token.instance.GrantMintAndBurnRoles(opts, burnAndMinter) + if err != nil { + return fmt.Errorf("failed to grant mint and burn roles: %w", err) + } + return token.client.ProcessTransaction(tx) +} + +func (token *ERC677Token) GrantMintRole(minter common.Address) error { + opts, err := token.client.TransactionOpts(token.OwnerWallet) + if err != nil { + return err + } + token.logger.Info(). + Str(Network, token.client.GetNetworkName()). + Str("Minter", minter.Hex()). + Str("Token", token.ContractAddress.Hex()). + Msg("Granting mint roles") + tx, err := token.instance.GrantMintRole(opts, minter) + if err != nil { + return fmt.Errorf("failed to grant mint role: %w", err) + } + return token.client.ProcessTransaction(tx) +} + +func (token *ERC677Token) Mint(to common.Address, amount *big.Int) error { + opts, err := token.client.TransactionOpts(token.OwnerWallet) + if err != nil { + return err + } + token.logger.Info(). + Str(Network, token.client.GetNetworkName()). + Str("To", to.Hex()). + Str("Token", token.ContractAddress.Hex()). + Str("Amount", amount.String()). + Msg("Minting tokens") + tx, err := token.instance.Mint(opts, to, amount) + if err != nil { + return fmt.Errorf("failed to mint tokens: %w", err) + } + return token.client.ProcessTransaction(tx) +} + +type ERC20Token struct { + client blockchain.EVMClient + logger *zerolog.Logger + instance *erc20.ERC20 + ContractAddress common.Address + OwnerAddress common.Address + OwnerWallet *blockchain.EthereumWallet +} + +func (token *ERC20Token) Address() string { + return token.ContractAddress.Hex() +} + +func (token *ERC20Token) BalanceOf(ctx context.Context, addr string) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(token.client.GetDefaultWallet().Address()), + Context: ctx, + } + balance, err := token.instance.BalanceOf(opts, common.HexToAddress(addr)) + if err != nil { + return nil, fmt.Errorf("failed to get balance: %w", err) + } + return balance, nil +} + +// Allowance returns the amount which spender is still allowed to withdraw from owner +// https://docs.openzeppelin.com/contracts/2.x/api/token/erc20#IERC20-allowance-address-address- +func (token *ERC20Token) Allowance(owner, spender string) (*big.Int, error) { + allowance, err := token.instance.Allowance(nil, common.HexToAddress(owner), common.HexToAddress(spender)) + if err != nil { + return nil, err + } + return allowance, nil +} + +// Approve approves the spender to spend the given amount of tokens on behalf of another account +// https://docs.openzeppelin.com/contracts/2.x/api/token/erc20#IERC20-approve-address-uint256- +func (token *ERC20Token) Approve(onBehalf *blockchain.EthereumWallet, spender string, amount *big.Int) error { + onBehalfBalance, err := token.BalanceOf(context.Background(), onBehalf.Address()) + if err != nil { + return fmt.Errorf("failed to get balance of onBehalf: %w", err) + } + currentAllowance, err := token.Allowance(onBehalf.Address(), spender) + if err != nil { + return fmt.Errorf("failed to get current allowance for '%s' on behalf of '%s': %w", spender, onBehalf.Address(), err) + } + opts, err := token.client.TransactionOpts(onBehalf) + if err != nil { + return fmt.Errorf("failed to get transaction options: %w", err) + } + log := token.logger.Info(). + Str("On Behalf Of", onBehalf.Address()). + Str("On Behalf Of Balance", onBehalfBalance.String()). + Str("Spender", spender). + Str("Spender Current Allowance", currentAllowance.String()). + Str("Token", token.Address()). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Str(Network, token.client.GetNetworkConfig().Name) + tx, err := token.instance.Approve(opts, common.HexToAddress(spender), amount) + if err != nil { + log.Err(err).Msg("Error Approving ERC20 Transfer") + return fmt.Errorf("failed to approve ERC20: %w", err) + } + log.Str("Hash", tx.Hash().Hex()).Msg("Approving ERC20 Transfer") + return token.client.ProcessTransaction(tx) +} + +func (token *ERC20Token) Transfer(from *blockchain.EthereumWallet, to string, amount *big.Int) error { + opts, err := token.client.TransactionOpts(from) + if err != nil { + return fmt.Errorf("failed to get transaction options: %w", err) + } + token.logger.Info(). + Str("From", from.Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Str(Network, token.client.GetNetworkConfig().Name). + Msg("Transferring ERC20") + tx, err := token.instance.Transfer(opts, common.HexToAddress(to), amount) + if err != nil { + return fmt.Errorf("failed to transfer ERC20: %w", err) + } + return token.client.ProcessTransaction(tx) +} + +type LinkToken struct { + client blockchain.EVMClient + logger *zerolog.Logger + instance *link_token_interface.LinkToken + EthAddress common.Address +} + +func (l *LinkToken) Address() string { + return l.EthAddress.Hex() +} + +func (l *LinkToken) BalanceOf(ctx context.Context, addr string) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(l.client.GetDefaultWallet().Address()), + Context: ctx, + } + balance, err := l.instance.BalanceOf(opts, common.HexToAddress(addr)) + if err != nil { + return nil, fmt.Errorf("failed to get LINK balance: %w", err) + } + return balance, nil +} + +func (l *LinkToken) Allowance(owner, spender string) (*big.Int, error) { + allowance, err := l.instance.Allowance(nil, common.HexToAddress(owner), common.HexToAddress(spender)) + if err != nil { + return nil, err + } + return allowance, nil +} + +func (l *LinkToken) Approve(to string, amount *big.Int) error { + opts, err := l.client.TransactionOpts(l.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + l.logger.Info(). + Str("From", l.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Token", l.Address()). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Str(Network, l.client.GetNetworkConfig().Name). + Msg("Approving LINK Transfer") + tx, err := l.instance.Approve(opts, common.HexToAddress(to), amount) + if err != nil { + return fmt.Errorf("failed to approve LINK transfer: %w", err) + } + return l.client.ProcessTransaction(tx) +} + +func (l *LinkToken) Transfer(to string, amount *big.Int) error { + opts, err := l.client.TransactionOpts(l.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + l.logger.Info(). + Str("From", l.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Str(Network, l.client.GetNetworkConfig().Name). + Msg("Transferring LINK") + tx, err := l.instance.Transfer(opts, common.HexToAddress(to), amount) + if err != nil { + return fmt.Errorf("failed to transfer LINK: %w", err) + } + return l.client.ProcessTransaction(tx) +} + +type LatestPool struct { + PoolInterface *token_pool.TokenPool + LockReleasePool *lock_release_token_pool.LockReleaseTokenPool + USDCPool *usdc_token_pool.USDCTokenPool +} + +type V1_4_0Pool struct { + PoolInterface *token_pool_1_4_0.TokenPool + LockReleasePool *lock_release_token_pool_1_4_0.LockReleaseTokenPool + USDCPool *usdc_token_pool_1_4_0.USDCTokenPool +} + +type TokenPoolWrapper struct { + Latest *LatestPool + V1_4_0 *V1_4_0Pool +} + +func (w TokenPoolWrapper) SetRebalancer(opts *bind.TransactOpts, from common.Address) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.LockReleasePool != nil { + return w.Latest.LockReleasePool.SetRebalancer(opts, from) + } + if w.V1_4_0 != nil && w.V1_4_0.LockReleasePool != nil { + return w.V1_4_0.LockReleasePool.SetRebalancer(opts, from) + } + return nil, fmt.Errorf("no pool found to set rebalancer") +} + +func (w TokenPoolWrapper) SetUSDCDomains(opts *bind.TransactOpts, updates []usdc_token_pool.USDCTokenPoolDomainUpdate) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.USDCPool != nil { + return w.Latest.USDCPool.SetDomains(opts, updates) + } + if w.V1_4_0 != nil && w.V1_4_0.USDCPool != nil { + V1_4_0Updates := make([]usdc_token_pool_1_4_0.USDCTokenPoolDomainUpdate, len(updates)) + for i, update := range updates { + V1_4_0Updates[i] = usdc_token_pool_1_4_0.USDCTokenPoolDomainUpdate{ + AllowedCaller: update.AllowedCaller, + DomainIdentifier: update.DomainIdentifier, + DestChainSelector: update.DestChainSelector, + Enabled: update.Enabled, + } + } + return w.V1_4_0.USDCPool.SetDomains(opts, V1_4_0Updates) + } + return nil, fmt.Errorf("no pool found to set USDC domains") +} + +func (w TokenPoolWrapper) WithdrawLiquidity(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.LockReleasePool != nil { + return w.Latest.LockReleasePool.WithdrawLiquidity(opts, amount) + } + if w.V1_4_0 != nil && w.V1_4_0.LockReleasePool != nil { + return w.V1_4_0.LockReleasePool.WithdrawLiquidity(opts, amount) + } + return nil, fmt.Errorf("no pool found to withdraw liquidity") +} + +func (w TokenPoolWrapper) ProvideLiquidity(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.LockReleasePool != nil { + return w.Latest.LockReleasePool.ProvideLiquidity(opts, amount) + } + if w.V1_4_0 != nil && w.V1_4_0.LockReleasePool != nil { + return w.V1_4_0.LockReleasePool.ProvideLiquidity(opts, amount) + } + return nil, fmt.Errorf("no pool found to provide liquidity") +} + +func (w TokenPoolWrapper) IsSupportedChain(opts *bind.CallOpts, remoteChainSelector uint64) (bool, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + return w.Latest.PoolInterface.IsSupportedChain(opts, remoteChainSelector) + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + return w.V1_4_0.PoolInterface.IsSupportedChain(opts, remoteChainSelector) + } + return false, fmt.Errorf("no pool found to check if chain is supported") +} + +func (w TokenPoolWrapper) ApplyChainUpdates(opts *bind.TransactOpts, update []token_pool.TokenPoolChainUpdate) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + return w.Latest.PoolInterface.ApplyChainUpdates(opts, update) + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + V1_4_0Updates := make([]token_pool_1_4_0.TokenPoolChainUpdate, len(update)) + for i, u := range update { + V1_4_0Updates[i] = token_pool_1_4_0.TokenPoolChainUpdate{ + RemoteChainSelector: u.RemoteChainSelector, + Allowed: u.Allowed, + InboundRateLimiterConfig: token_pool_1_4_0.RateLimiterConfig{ + IsEnabled: u.InboundRateLimiterConfig.IsEnabled, + Capacity: u.InboundRateLimiterConfig.Capacity, + Rate: u.InboundRateLimiterConfig.Rate, + }, + OutboundRateLimiterConfig: token_pool_1_4_0.RateLimiterConfig{ + IsEnabled: u.OutboundRateLimiterConfig.IsEnabled, + Capacity: u.OutboundRateLimiterConfig.Capacity, + Rate: u.OutboundRateLimiterConfig.Rate, + }, + } + } + return w.V1_4_0.PoolInterface.ApplyChainUpdates(opts, V1_4_0Updates) + } + return nil, fmt.Errorf("no pool found to apply chain updates") +} + +func (w TokenPoolWrapper) SetChainRateLimiterConfig(opts *bind.TransactOpts, selector uint64, out token_pool.RateLimiterConfig, in token_pool.RateLimiterConfig) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + return w.Latest.PoolInterface.SetChainRateLimiterConfig(opts, selector, out, in) + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + return w.V1_4_0.PoolInterface.SetChainRateLimiterConfig(opts, selector, + token_pool_1_4_0.RateLimiterConfig{ + IsEnabled: out.IsEnabled, + Capacity: out.Capacity, + Rate: out.Rate, + }, token_pool_1_4_0.RateLimiterConfig{ + IsEnabled: in.IsEnabled, + Capacity: in.Capacity, + Rate: in.Rate, + }) + } + return nil, fmt.Errorf("no pool found to set chain rate limiter config") +} + +func (w TokenPoolWrapper) GetCurrentOutboundRateLimiterState(opts *bind.CallOpts, selector uint64) (*RateLimiterConfig, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + rl, err := w.Latest.PoolInterface.GetCurrentOutboundRateLimiterState(opts, selector) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rl.IsEnabled, + Capacity: rl.Capacity, + Rate: rl.Rate, + Tokens: rl.Tokens, + }, nil + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + rl, err := w.V1_4_0.PoolInterface.GetCurrentOutboundRateLimiterState(opts, selector) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rl.IsEnabled, + Capacity: rl.Capacity, + Rate: rl.Rate, + Tokens: rl.Tokens, + }, nil + } + return nil, fmt.Errorf("no pool found to get current outbound rate limiter state") +} + +func (w TokenPoolWrapper) GetCurrentInboundRateLimiterState(opts *bind.CallOpts, selector uint64) (*RateLimiterConfig, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + rl, err := w.Latest.PoolInterface.GetCurrentInboundRateLimiterState(opts, selector) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rl.IsEnabled, + Capacity: rl.Capacity, + Rate: rl.Rate, + Tokens: rl.Tokens, + }, nil + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + rl, err := w.V1_4_0.PoolInterface.GetCurrentInboundRateLimiterState(opts, selector) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rl.IsEnabled, + Capacity: rl.Capacity, + Rate: rl.Rate, + Tokens: rl.Tokens, + }, nil + } + return nil, fmt.Errorf("no pool found to get current outbound rate limiter state") +} + +func (w TokenPoolWrapper) SetRouter(opts *bind.TransactOpts, routerAddr common.Address) (*types.Transaction, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + return w.Latest.PoolInterface.SetRouter(opts, routerAddr) + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + return w.V1_4_0.PoolInterface.SetRouter(opts, routerAddr) + } + return nil, fmt.Errorf("no pool found to set router") +} + +func (w TokenPoolWrapper) GetRouter(opts *bind.CallOpts) (common.Address, error) { + if w.Latest != nil && w.Latest.PoolInterface != nil { + addr, err := w.Latest.PoolInterface.GetRouter(opts) + if err != nil { + return common.Address{}, err + } + return addr, nil + } + if w.V1_4_0 != nil && w.V1_4_0.PoolInterface != nil { + addr, err := w.V1_4_0.PoolInterface.GetRouter(opts) + if err != nil { + return common.Address{}, err + } + return addr, nil + } + return common.Address{}, fmt.Errorf("no pool found to get router") +} + +func (w TokenPoolWrapper) GetRebalancer(opts *bind.CallOpts) (common.Address, error) { + if w.Latest != nil && w.Latest.LockReleasePool != nil { + addr, err := w.Latest.LockReleasePool.GetRebalancer(opts) + if err != nil { + return common.Address{}, err + } + return addr, nil + } + if w.V1_4_0 != nil && w.V1_4_0.LockReleasePool != nil { + addr, err := w.V1_4_0.LockReleasePool.GetRebalancer(opts) + if err != nil { + return common.Address{}, err + } + return addr, nil + } + return common.Address{}, fmt.Errorf("no pool found to get rebalancer") +} + +// TokenPool represents a TokenPool address +type TokenPool struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *TokenPoolWrapper + EthAddress common.Address + OwnerAddress common.Address + OwnerWallet *blockchain.EthereumWallet +} + +func (pool *TokenPool) Address() string { + return pool.EthAddress.Hex() +} + +func (pool *TokenPool) IsUSDC() bool { + if pool.Instance.Latest != nil && pool.Instance.Latest.USDCPool != nil { + return true + } + if pool.Instance.V1_4_0 != nil && pool.Instance.V1_4_0.USDCPool != nil { + return true + } + return false +} + +func (pool *TokenPool) IsLockRelease() bool { + if pool.Instance.Latest != nil && pool.Instance.Latest.LockReleasePool != nil { + return true + } + if pool.Instance.V1_4_0 != nil && pool.Instance.V1_4_0.LockReleasePool != nil { + return true + } + return false +} + +func (pool *TokenPool) SyncUSDCDomain(destTokenTransmitter *TokenTransmitter, destPoolAddr common.Address, destChainSelector uint64) error { + if !pool.IsUSDC() { + return fmt.Errorf("pool is not a USDC pool, cannot sync domain") + } + + var allowedCallerBytes [32]byte + copy(allowedCallerBytes[12:], destPoolAddr.Bytes()) + destTokenTransmitterIns, err := mock_usdc_token_transmitter.NewMockE2EUSDCTransmitter( + destTokenTransmitter.ContractAddress, destTokenTransmitter.client.Backend(), + ) + if err != nil { + return fmt.Errorf("failed to create mock USDC token transmitter: %w", err) + } + domain, err := destTokenTransmitterIns.LocalDomain(nil) + if err != nil { + return fmt.Errorf("failed to get local domain: %w", err) + } + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("From", pool.OwnerAddress.Hex()). + Str(Network, pool.client.GetNetworkName()). + Uint32("Domain", domain). + Str("Allowed Caller", destPoolAddr.Hex()). + Str("Dest Chain Selector", fmt.Sprintf("%d", destChainSelector)). + Msg("Syncing USDC Domain") + tx, err := pool.Instance.SetUSDCDomains(opts, []usdc_token_pool.USDCTokenPoolDomainUpdate{ + { + AllowedCaller: allowedCallerBytes, + DomainIdentifier: domain, + DestChainSelector: destChainSelector, + Enabled: true, + }, + }) + if err != nil { + return fmt.Errorf("failed to set domain: %w", err) + } + return pool.client.ProcessTransaction(tx) +} + +// MintUSDCToUSDCPool mints 100 USDC tokens to the pool if it is a USDC pool. +// This helps provide liquidity to the pool which is necessary for USDC tests to function properly. +func (pool *TokenPool) MintUSDCToUSDCPool() error { + if !pool.IsUSDC() { + return fmt.Errorf("pool is not a USDC pool, cannot send USDC") + } + usdcToken, err := pool.GetToken() + if err != nil { + return fmt.Errorf("failed to get dest usdc token: %w", err) + } + usdcInstance, err := burn_mint_erc677.NewBurnMintERC677(usdcToken, pool.client.Backend()) + if err != nil { + return fmt.Errorf("failed to get dest usdc token instance: %w", err) + } + + opts, err := pool.client.TransactionOpts(pool.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + + tx, err := usdcInstance.Mint(opts, pool.EthAddress, HundredCoins) + if err != nil { + return fmt.Errorf("failed to mint usdc tokens to destPool: %w", err) + } + return pool.client.ProcessTransaction(tx) +} + +func (pool *TokenPool) RemoveLiquidity(amount *big.Int) error { + if !pool.IsLockRelease() { + return fmt.Errorf("pool is not a lock release pool, cannot remove liquidity") + } + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Amount", amount.String()). + Msg("Initiating removing funds from pool") + tx, err := pool.Instance.WithdrawLiquidity(opts, amount) + if err != nil { + return fmt.Errorf("failed to withdraw liquidity: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Amount", amount.String()). + Str(Network, pool.client.GetNetworkConfig().Name). + Msg("Liquidity removed") + return pool.client.ProcessTransaction(tx) +} + +// AddLiquidity approves the token pool to spend the given amount of tokens from the given wallet +func (pool *TokenPool) AddLiquidity(token *ERC20Token, fromWallet *blockchain.EthereumWallet, amount *big.Int) error { + if !pool.IsLockRelease() { + return fmt.Errorf("pool is not a lock release pool, cannot add liquidity") + } + pool.logger.Info(). + Str("Token", token.Address()). + Str("Token Pool", pool.Address()). + Msg("Initiating adding liquidity to token pool") + err := token.Approve(fromWallet, pool.Address(), amount) + if err != nil { + return fmt.Errorf("failed to approve token transfer: %w", err) + } + err = pool.client.WaitForEvents() + if err != nil { + return fmt.Errorf("failed to wait for events: %w", err) + } + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + _, err = pool.Instance.SetRebalancer(opts, opts.From) + if err != nil { + return fmt.Errorf("failed to set rebalancer: %w", err) + } + opts, err = pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Msg("Initiating adding Tokens in pool") + tx, err := pool.Instance.ProvideLiquidity(opts, amount) + if err != nil { + return fmt.Errorf("failed to provide liquidity: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Token", token.Address()). + Str(Network, pool.client.GetNetworkConfig().Name). + Msg("Liquidity added") + return pool.client.ProcessTransaction(tx) +} + +func (pool *TokenPool) SetRemoteChainOnPool(remoteChainSelector uint64, remotePoolAddresses common.Address, remoteTokenAddress common.Address) error { + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Msg("Setting remote chain on pool") + var selectorsToUpdate []token_pool.TokenPoolChainUpdate + + isSupported, err := pool.Instance.IsSupportedChain(nil, remoteChainSelector) + if err != nil { + return fmt.Errorf("failed to get if chain is supported: %w", err) + } + // Check if remote chain is already supported, if yes return + if isSupported { + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str(Network, pool.client.GetNetworkName()). + Uint64("Remote Chain Selector", remoteChainSelector). + Msg("Remote chain is already supported") + return nil + } + // if not, add it + encodedPoolAddress, err := abihelpers.EncodeAddress(remotePoolAddresses) + if err != nil { + return fmt.Errorf("failed to encode address: %w", err) + } + + encodedTokenAddress, err := abihelpers.EncodeAddress(remoteTokenAddress) + if err != nil { + return fmt.Errorf("failed to encode token address: %w", err) + } + + selectorsToUpdate = append(selectorsToUpdate, token_pool.TokenPoolChainUpdate{ + RemoteChainSelector: remoteChainSelector, + RemotePoolAddress: encodedPoolAddress, + RemoteTokenAddress: encodedTokenAddress, + Allowed: true, + InboundRateLimiterConfig: token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e9)), + Rate: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e5)), + }, + OutboundRateLimiterConfig: token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e9)), + Rate: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e5)), + }, + }) + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := pool.Instance.ApplyChainUpdates(opts, selectorsToUpdate) + if err != nil { + return fmt.Errorf("failed to set chain updates on token pool: %w", err) + } + + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Uint64("Chain selector", remoteChainSelector). + Str(Network, pool.client.GetNetworkConfig().Name). + Msg("Remote chains set on token pool") + return pool.client.ProcessTransaction(tx) +} + +// SetRemoteChainRateLimits sets the rate limits for the token pool on the remote chain +func (pool *TokenPool) SetRemoteChainRateLimits(remoteChainSelector uint64, rl token_pool.RateLimiterConfig) error { + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Remote chain selector", strconv.FormatUint(remoteChainSelector, 10)). + Interface("RateLimiterConfig", rl). + Msg("Setting Rate Limit on token pool") + tx, err := pool.Instance.SetChainRateLimiterConfig(opts, remoteChainSelector, rl, rl) + + if err != nil { + return fmt.Errorf("error setting rate limit token pool: %w", err) + } + + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Remote chain selector", strconv.FormatUint(remoteChainSelector, 10)). + Interface("RateLimiterConfig", rl). + Msg("Rate Limit on token pool is set") + return pool.client.ProcessTransaction(tx) +} + +func (pool *TokenPool) SetRouter(routerAddr common.Address) error { + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Msg("Setting router on pool") + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := pool.Instance.SetRouter(opts, routerAddr) + if err != nil { + return fmt.Errorf("failed to set router: %w", err) + + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Router", routerAddr.String()). + Msg("Router set on pool") + return pool.client.ProcessTransaction(tx) +} + +func (pool *TokenPool) GetRouter() (common.Address, error) { + return pool.Instance.GetRouter(nil) +} + +func (pool *TokenPool) GetToken() (common.Address, error) { + if pool.Instance.V1_4_0 != nil && pool.Instance.V1_4_0.PoolInterface != nil { + return pool.Instance.V1_4_0.PoolInterface.GetToken(nil) + } + if pool.Instance.Latest != nil && pool.Instance.Latest.PoolInterface != nil { + return pool.Instance.Latest.PoolInterface.GetToken(nil) + } + return common.Address{}, fmt.Errorf("no pool found to get token") +} + +func (pool *TokenPool) SetRebalancer(rebalancerAddress common.Address) error { + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Msg("Setting rebalancer on pool") + opts, err := pool.client.TransactionOpts(pool.OwnerWallet) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := pool.Instance.SetRebalancer(opts, rebalancerAddress) + if err != nil { + return fmt.Errorf("failed to set router: %w", err) + + } + pool.logger.Info(). + Str("Token Pool", pool.Address()). + Str("Rebalancer", rebalancerAddress.String()). + Msg("Rebalancer set on pool") + return pool.client.ProcessTransaction(tx) +} + +func (pool *TokenPool) GetRebalancer() (common.Address, error) { + return pool.Instance.GetRebalancer(nil) +} + +type ARM struct { + client blockchain.EVMClient + Instance *arm_contract.ARMContract + EthAddress common.Address +} + +func (arm *ARM) Address() string { + return arm.EthAddress.Hex() +} + +type MockARM struct { + client blockchain.EVMClient + Instance *mock_arm_contract.MockARMContract + EthAddress common.Address +} + +func (arm *MockARM) SetClient(client blockchain.EVMClient) { + arm.client = client +} +func (arm *MockARM) Address() string { + return arm.EthAddress.Hex() +} + +type CommitStoreReportAccepted struct { + Min uint64 + Max uint64 + MerkleRoot [32]byte + LogInfo LogInfo +} + +type CommitStoreWrapper struct { + Latest *commit_store.CommitStore + V1_2_0 *commit_store_1_2_0.CommitStore +} + +func (w CommitStoreWrapper) SetOCR2Config(opts *bind.TransactOpts, + signers []common.Address, + transmitters []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.SetOCR2Config(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + } + if w.V1_2_0 != nil { + return w.V1_2_0.SetOCR2Config(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + } + return nil, fmt.Errorf("no instance found to set OCR2 config") +} + +func (w CommitStoreWrapper) GetExpectedNextSequenceNumber(opts *bind.CallOpts) (uint64, error) { + if w.Latest != nil { + return w.Latest.GetExpectedNextSequenceNumber(opts) + } + if w.V1_2_0 != nil { + return w.V1_2_0.GetExpectedNextSequenceNumber(opts) + } + return 0, fmt.Errorf("no instance found to get expected next sequence number") +} + +type CommitStore struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *CommitStoreWrapper + EthAddress common.Address +} + +func (b *CommitStore) Address() string { + return b.EthAddress.Hex() +} + +// SetOCR2Config sets the offchain reporting protocol configuration +func (b *CommitStore) SetOCR2Config( + signers []common.Address, + transmitters []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) error { + b.logger.Info().Str("Contract Address", b.Address()).Msg("Configuring OCR config for CommitStore Contract") + // Set Config + opts, err := b.client.TransactionOpts(b.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err := b.Instance.SetOCR2Config( + opts, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + b.logger.Debug(). + Interface("signerAddresses", signers). + Interface("transmitterAddresses", transmitters). + Str(Network, b.client.GetNetworkConfig().Name). + Str("Tx", tx.Hash().Hex()). + Msg("Configuring CommitStore") + + if err != nil { + return fmt.Errorf("error setting OCR2 config: %w", err) + } + return b.client.ProcessTransaction(tx) +} + +// WatchReportAccepted watches for report accepted events +// There is no need to differentiate between the two versions of the contract as the event signature is the same +// we can cast the contract to the latest version +func (b *CommitStore) WatchReportAccepted(opts *bind.WatchOpts, acceptedEvent chan *commit_store.CommitStoreReportAccepted) (event.Subscription, error) { + if b.Instance.Latest != nil { + return b.Instance.Latest.WatchReportAccepted(opts, acceptedEvent) + } + if b.Instance.V1_2_0 != nil { + newCommitStore, err := commit_store.NewCommitStore(b.EthAddress, wrappers.MustNewWrappedContractBackend(b.client, nil)) + if err != nil { + return nil, fmt.Errorf("failed to create new CommitStore contract: %w", err) + } + return newCommitStore.WatchReportAccepted(opts, acceptedEvent) + } + return nil, fmt.Errorf("no instance found to watch for report accepted") +} + +type ReceiverDapp struct { + client blockchain.EVMClient + logger *zerolog.Logger + instance *maybe_revert_message_receiver.MaybeRevertMessageReceiver + EthAddress common.Address +} + +func (rDapp *ReceiverDapp) Address() string { + return rDapp.EthAddress.Hex() +} + +func (rDapp *ReceiverDapp) ToggleRevert(revert bool) error { + opts, err := rDapp.client.TransactionOpts(rDapp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err := rDapp.instance.SetRevert(opts, revert) + if err != nil { + return fmt.Errorf("error setting revert: %w", err) + } + rDapp.logger.Info(). + Bool("revert", revert). + Str("tx", tx.Hash().String()). + Str("ReceiverDapp", rDapp.Address()). + Str(Network, rDapp.client.GetNetworkConfig().Name). + Msg("ReceiverDapp revert set") + return rDapp.client.ProcessTransaction(tx) +} + +type InternalTimestampedPackedUint224 struct { + Value *big.Int + Timestamp uint32 +} + +type PriceRegistryUsdPerUnitGasUpdated struct { + DestChain uint64 + Value *big.Int + Timestamp *big.Int + Raw types.Log +} + +type PriceRegistryWrapper struct { + Latest *price_registry.PriceRegistry + V1_2_0 *price_registry_1_2_0.PriceRegistry +} + +func (p *PriceRegistryWrapper) GetTokenPrice(opts *bind.CallOpts, token common.Address) (*big.Int, error) { + if p.Latest != nil { + price, err := p.Latest.GetTokenPrice(opts, token) + if err != nil { + return nil, err + } + return price.Value, nil + } + if p.V1_2_0 != nil { + p, err := p.V1_2_0.GetTokenPrice(opts, token) + if err != nil { + return nil, err + } + return p.Value, nil + } + return nil, fmt.Errorf("no instance found to get token price") +} + +func (p *PriceRegistryWrapper) AddPriceUpdater(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) { + if p.Latest != nil { + return p.Latest.ApplyAuthorizedCallerUpdates( + opts, + price_registry.AuthorizedCallersAuthorizedCallerArgs{ + AddedCallers: []common.Address{addr}, + RemovedCallers: []common.Address{}, + }, + ) + } + if p.V1_2_0 != nil { + return p.V1_2_0.ApplyPriceUpdatersUpdates(opts, []common.Address{addr}, []common.Address{}) + } + return nil, fmt.Errorf("no instance found to add price updater") +} + +func (p *PriceRegistryWrapper) AddFeeToken(opts *bind.TransactOpts, addr common.Address) (*types.Transaction, error) { + if p.Latest != nil { + return p.Latest.ApplyFeeTokensUpdates(opts, []common.Address{addr}, []common.Address{}) + } + if p.V1_2_0 != nil { + return p.V1_2_0.ApplyFeeTokensUpdates(opts, []common.Address{addr}, []common.Address{}) + } + return nil, fmt.Errorf("no instance found to add fee token") +} + +func (p *PriceRegistryWrapper) GetDestinationChainGasPrice(opts *bind.CallOpts, chainselector uint64) (InternalTimestampedPackedUint224, error) { + if p.Latest != nil { + price, err := p.Latest.GetDestinationChainGasPrice(opts, chainselector) + if err != nil { + return InternalTimestampedPackedUint224{}, err + } + return InternalTimestampedPackedUint224{ + Value: price.Value, + Timestamp: price.Timestamp, + }, nil + } + if p.V1_2_0 != nil { + price, err := p.V1_2_0.GetDestinationChainGasPrice(opts, chainselector) + if err != nil { + return InternalTimestampedPackedUint224{}, err + } + return InternalTimestampedPackedUint224{ + Value: price.Value, + Timestamp: price.Timestamp, + }, nil + } + return InternalTimestampedPackedUint224{}, fmt.Errorf("no instance found to add fee token") +} + +type InternalGasPriceUpdate struct { + DestChainSelector uint64 + UsdPerUnitGas *big.Int +} + +type InternalTokenPriceUpdate struct { + SourceToken common.Address + UsdPerToken *big.Int +} + +type PriceRegistry struct { + client blockchain.EVMClient + Instance *PriceRegistryWrapper + logger *zerolog.Logger + EthAddress common.Address +} + +func (c *PriceRegistry) Address() string { + return c.EthAddress.Hex() +} + +func (c *PriceRegistry) AddPriceUpdater(addr common.Address) error { + opts, err := c.client.TransactionOpts(c.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err := c.Instance.AddPriceUpdater(opts, addr) + if err != nil { + return fmt.Errorf("error adding price updater: %w", err) + } + c.logger.Info(). + Str("updaters", addr.Hex()). + Str(Network, c.client.GetNetworkConfig().Name). + Msg("PriceRegistry updater added") + return c.client.ProcessTransaction(tx) +} + +func (c *PriceRegistry) AddFeeToken(addr common.Address) error { + opts, err := c.client.TransactionOpts(c.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err := c.Instance.AddFeeToken(opts, addr) + if err != nil { + return fmt.Errorf("error adding fee token: %w", err) + } + c.logger.Info(). + Str("feeTokens", addr.Hex()). + Str(Network, c.client.GetNetworkConfig().Name). + Msg("PriceRegistry feeToken set") + return c.client.ProcessTransaction(tx) +} + +func (c *PriceRegistry) UpdatePrices(tokenUpdates []InternalTokenPriceUpdate, gasUpdates []InternalGasPriceUpdate) error { + opts, err := c.client.TransactionOpts(c.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + var tx *types.Transaction + if c.Instance.Latest != nil { + var tokenUpdatesLatest []price_registry.InternalTokenPriceUpdate + var gasUpdatesLatest []price_registry.InternalGasPriceUpdate + for _, update := range tokenUpdates { + tokenUpdatesLatest = append(tokenUpdatesLatest, price_registry.InternalTokenPriceUpdate{ + SourceToken: update.SourceToken, + UsdPerToken: update.UsdPerToken, + }) + } + for _, update := range gasUpdates { + gasUpdatesLatest = append(gasUpdatesLatest, price_registry.InternalGasPriceUpdate{ + DestChainSelector: update.DestChainSelector, + UsdPerUnitGas: update.UsdPerUnitGas, + }) + } + tx, err = c.Instance.Latest.UpdatePrices(opts, price_registry.InternalPriceUpdates{ + TokenPriceUpdates: tokenUpdatesLatest, + GasPriceUpdates: gasUpdatesLatest, + }) + if err != nil { + return fmt.Errorf("error updating prices: %w", err) + } + } + if c.Instance.V1_2_0 != nil { + var tokenUpdates_1_2_0 []price_registry_1_2_0.InternalTokenPriceUpdate + var gasUpdates_1_2_0 []price_registry_1_2_0.InternalGasPriceUpdate + for _, update := range tokenUpdates { + tokenUpdates_1_2_0 = append(tokenUpdates_1_2_0, price_registry_1_2_0.InternalTokenPriceUpdate{ + SourceToken: update.SourceToken, + UsdPerToken: update.UsdPerToken, + }) + } + for _, update := range gasUpdates { + gasUpdates_1_2_0 = append(gasUpdates_1_2_0, price_registry_1_2_0.InternalGasPriceUpdate{ + DestChainSelector: update.DestChainSelector, + UsdPerUnitGas: update.UsdPerUnitGas, + }) + } + tx, err = c.Instance.V1_2_0.UpdatePrices(opts, price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: tokenUpdates_1_2_0, + GasPriceUpdates: gasUpdates_1_2_0, + }) + if err != nil { + return fmt.Errorf("error updating prices: %w", err) + } + } + if tx == nil { + return fmt.Errorf("no instance found to update prices") + } + c.logger.Info(). + Str(Network, c.client.GetNetworkConfig().Name). + Interface("tokenUpdates", tokenUpdates). + Interface("gasUpdates", gasUpdates). + Msg("Prices updated") + return c.client.ProcessTransaction(tx) +} + +func (c *PriceRegistry) WatchUsdPerUnitGasUpdated(opts *bind.WatchOpts, latest chan *price_registry.PriceRegistryUsdPerUnitGasUpdated, destChain []uint64) (event.Subscription, error) { + if c.Instance.Latest != nil { + return c.Instance.Latest.WatchUsdPerUnitGasUpdated(opts, latest, destChain) + } + if c.Instance.V1_2_0 != nil { + newP, err := price_registry.NewPriceRegistry(c.Instance.V1_2_0.Address(), wrappers.MustNewWrappedContractBackend(c.client, nil)) + if err != nil { + return nil, fmt.Errorf("failed to create new PriceRegistry contract: %w", err) + } + return newP.WatchUsdPerUnitGasUpdated(opts, latest, destChain) + } + return nil, fmt.Errorf("no instance found to watch for price updates for gas") +} + +func (c *PriceRegistry) WatchUsdPerTokenUpdated(opts *bind.WatchOpts, latest chan *price_registry.PriceRegistryUsdPerTokenUpdated) (event.Subscription, error) { + if c.Instance.Latest != nil { + return c.Instance.Latest.WatchUsdPerTokenUpdated(opts, latest, nil) + } + if c.Instance.V1_2_0 != nil { + newP, err := price_registry.NewPriceRegistry(c.Instance.V1_2_0.Address(), wrappers.MustNewWrappedContractBackend(c.client, nil)) + if err != nil { + return nil, fmt.Errorf("failed to create new PriceRegistry contract: %w", err) + } + return newP.WatchUsdPerTokenUpdated(opts, latest, nil) + } + return nil, fmt.Errorf("no instance found to watch for price updates for tokens") +} + +type TokenAdminRegistry struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *token_admin_registry.TokenAdminRegistry + EthAddress common.Address +} + +func (r *TokenAdminRegistry) Address() string { + return r.EthAddress.Hex() +} + +func (r *TokenAdminRegistry) SetAdminAndRegisterPool(tokenAddr, poolAddr common.Address) error { + opts, err := r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err := r.Instance.ProposeAdministrator(opts, tokenAddr, opts.From) + if err != nil { + return fmt.Errorf("error setting admin for token %s : %w", tokenAddr.Hex(), err) + } + err = r.client.ProcessTransaction(tx) + if err != nil { + return fmt.Errorf("error processing tx for setting admin on token %w", err) + } + r.logger.Info(). + Str("Admin", opts.From.Hex()). + Str("Token", tokenAddr.Hex()). + Str("TokenAdminRegistry", r.Address()). + Msg("Admin is set for token on TokenAdminRegistry") + err = r.client.WaitForEvents() + if err != nil { + return fmt.Errorf("error waiting for tx for setting admin on pool %w", err) + } + opts, err = r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err = r.Instance.AcceptAdminRole(opts, tokenAddr) + if err != nil { + return fmt.Errorf("error accepting admin role for token %s : %w", tokenAddr.Hex(), err) + } + err = r.client.ProcessTransaction(tx) + if err != nil { + return fmt.Errorf("error processing tx for accepting admin role for token %w", err) + } + r.logger.Info(). + Str("Token", tokenAddr.Hex()). + Str("TokenAdminRegistry", r.Address()). + Msg("Admin role is accepted for token on TokenAdminRegistry") + err = r.client.WaitForEvents() + if err != nil { + return fmt.Errorf("error waiting for tx for accepting admin role for token %w", err) + } + opts, err = r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + tx, err = r.Instance.SetPool(opts, tokenAddr, poolAddr) + if err != nil { + return fmt.Errorf("error setting token %s and pool %s : %w", tokenAddr.Hex(), poolAddr.Hex(), err) + } + r.logger.Info(). + Str("Token", tokenAddr.Hex()). + Str("Pool", poolAddr.Hex()). + Str("TokenAdminRegistry", r.Address()). + Msg("token and pool are set on TokenAdminRegistry") + err = r.client.ProcessTransaction(tx) + if err != nil { + return fmt.Errorf("error processing tx for setting token %s and pool %s : %w", tokenAddr.Hex(), poolAddr.Hex(), err) + } + return nil +} + +type Router struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *router.Router + EthAddress common.Address +} + +func (r *Router) Address() string { + return r.EthAddress.Hex() +} + +func (r *Router) SetOnRamp(chainSelector uint64, onRamp common.Address) error { + opts, err := r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("error getting transaction opts: %w", err) + } + r.logger.Info(). + Str("Router", r.Address()). + Str("OnRamp", onRamp.Hex()). + Str(Network, r.client.GetNetworkName()). + Str("ChainSelector", strconv.FormatUint(chainSelector, 10)). + Msg("Setting on ramp for r") + + tx, err := r.Instance.ApplyRampUpdates(opts, []router.RouterOnRamp{{DestChainSelector: chainSelector, OnRamp: onRamp}}, nil, nil) + if err != nil { + return fmt.Errorf("error applying ramp updates: %w", err) + } + r.logger.Info(). + Str("onRamp", onRamp.Hex()). + Str("Network Name", r.client.GetNetworkConfig().Name). + Msg("Router is configured") + return r.client.ProcessTransaction(tx) +} + +func (r *Router) CCIPSend(destChainSelector uint64, msg router.ClientEVM2AnyMessage, valueForNative *big.Int) (*types.Transaction, error) { + opts, err := r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return nil, fmt.Errorf("error getting transaction opts: %w", err) + } + if valueForNative != nil { + opts.Value = valueForNative + } + + r.logger.Info(). + Str(Network, r.client.GetNetworkName()). + Str("Router", r.Address()). + Interface("TokensAndAmounts", msg.TokenAmounts). + Str("FeeToken", msg.FeeToken.Hex()). + Str("ExtraArgs", fmt.Sprintf("0x%x", msg.ExtraArgs[:])). + Str("Receiver", fmt.Sprintf("0x%x", msg.Receiver[:])). + Msg("Sending msg") + return r.Instance.CcipSend(opts, destChainSelector, msg) +} + +func (r *Router) CCIPSendAndProcessTx(destChainSelector uint64, msg router.ClientEVM2AnyMessage, valueForNative *big.Int) (*types.Transaction, error) { + tx, err := r.CCIPSend(destChainSelector, msg, valueForNative) + if err != nil { + return nil, fmt.Errorf("failed to send msg: %w", err) + } + r.logger.Info(). + Str("Router", r.Address()). + Str("txHash", tx.Hash().Hex()). + Str(Network, r.client.GetNetworkConfig().Name). + Str("Chain Selector", strconv.FormatUint(destChainSelector, 10)). + Msg("Message Sent") + return tx, r.client.ProcessTransaction(tx) +} + +func (r *Router) AddOffRamp(offRamp common.Address, sourceChainId uint64) (*types.Transaction, error) { + opts, err := r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return nil, fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := r.Instance.ApplyRampUpdates(opts, nil, nil, []router.RouterOffRamp{{SourceChainSelector: sourceChainId, OffRamp: offRamp}}) + if err != nil { + return nil, fmt.Errorf("failed to add offRamp: %w", err) + } + r.logger.Info(). + Str("offRamp", offRamp.Hex()). + Str(Network, r.client.GetNetworkConfig().Name). + Msg("offRamp is added to Router") + return tx, r.client.ProcessTransaction(tx) +} + +func (r *Router) SetWrappedNative(wNative common.Address) (*types.Transaction, error) { + opts, err := r.client.TransactionOpts(r.client.GetDefaultWallet()) + if err != nil { + return nil, fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := r.Instance.SetWrappedNative(opts, wNative) + if err != nil { + return nil, fmt.Errorf("failed to set wrapped native: %w", err) + } + r.logger.Info(). + Str("wrapped native", wNative.Hex()). + Str("router", r.Address()). + Str(Network, r.client.GetNetworkConfig().Name). + Msg("wrapped native is added for Router") + return tx, r.client.ProcessTransaction(tx) +} + +func (r *Router) GetFee(destChainSelector uint64, message router.ClientEVM2AnyMessage) (*big.Int, error) { + return r.Instance.GetFee(nil, destChainSelector, message) +} + +type SendReqEventData struct { + MessageId [32]byte + SequenceNumber uint64 + DataLength int + NoOfTokens int + LogInfo LogInfo + Fee *big.Int +} + +type OnRampWrapper struct { + Latest *evm_2_evm_onramp.EVM2EVMOnRamp + V1_2_0 *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp +} + +func (w OnRampWrapper) SetNops(opts *bind.TransactOpts, owner common.Address) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.SetNops(opts, []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{ + { + Nop: owner, + Weight: 1, + }, + }) + } + if w.V1_2_0 != nil { + return w.V1_2_0.SetNops(opts, []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampNopAndWeight{ + { + Nop: owner, + Weight: 1, + }, + }) + } + return nil, fmt.Errorf("no instance found to set nops") +} + +func (w OnRampWrapper) SetTokenTransferFeeConfig( + opts *bind.TransactOpts, + config []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs, + addresses []common.Address, +) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.SetTokenTransferFeeConfig(opts, config, addresses) + } + if w.V1_2_0 != nil { + var configV12 []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs + for _, c := range config { + configV12 = append(configV12, evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + Token: c.Token, + MinFeeUSDCents: c.MinFeeUSDCents, + MaxFeeUSDCents: c.MaxFeeUSDCents, + DeciBps: c.DeciBps, + DestGasOverhead: c.DestGasOverhead, + DestBytesOverhead: c.DestBytesOverhead, + }) + } + return w.V1_2_0.SetTokenTransferFeeConfig(opts, configV12) + } + return nil, fmt.Errorf("no instance found to set token transfer fee config") +} + +func (w OnRampWrapper) PayNops(opts *bind.TransactOpts) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.PayNops(opts) + } + if w.V1_2_0 != nil { + return w.V1_2_0.PayNops(opts) + } + return nil, fmt.Errorf("no instance found to pay nops") +} + +func (w OnRampWrapper) WithdrawNonLinkFees(opts *bind.TransactOpts, native common.Address, owner common.Address) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.WithdrawNonLinkFees(opts, native, owner) + } + if w.V1_2_0 != nil { + return w.V1_2_0.WithdrawNonLinkFees(opts, native, owner) + } + return nil, fmt.Errorf("no instance found to withdraw non link fees") +} + +func (w OnRampWrapper) SetRateLimiterConfig(opts *bind.TransactOpts, config evm_2_evm_onramp.RateLimiterConfig) (*types.Transaction, error) { + if w.Latest != nil { + return w.Latest.SetRateLimiterConfig(opts, config) + } + if w.V1_2_0 != nil { + return w.V1_2_0.SetRateLimiterConfig(opts, evm_2_evm_onramp_1_2_0.RateLimiterConfig{ + IsEnabled: config.IsEnabled, + Capacity: config.Capacity, + Rate: config.Rate, + }) + } + return nil, fmt.Errorf("no instance found to set rate limiter config") +} + +func (w OnRampWrapper) ParseCCIPSendRequested(l types.Log) (uint64, error) { + if w.Latest != nil { + sendReq, err := w.Latest.ParseCCIPSendRequested(l) + if err != nil { + return 0, err + } + return sendReq.Message.SequenceNumber, nil + } + if w.V1_2_0 != nil { + sendReq, err := w.V1_2_0.ParseCCIPSendRequested(l) + if err != nil { + return 0, err + } + return sendReq.Message.SequenceNumber, nil + } + return 0, fmt.Errorf("no instance found to parse CCIPSendRequested") +} + +func (w OnRampWrapper) GetDynamicConfig(opts *bind.CallOpts) (uint32, error) { + if w.Latest != nil { + cfg, err := w.Latest.GetDynamicConfig(opts) + if err != nil { + return 0, err + } + return cfg.MaxDataBytes, nil + } + if w.V1_2_0 != nil { + cfg, err := w.V1_2_0.GetDynamicConfig(opts) + if err != nil { + return 0, err + } + return cfg.MaxDataBytes, nil + } + return 0, fmt.Errorf("no instance found to get dynamic config") +} + +func (w OnRampWrapper) ApplyPoolUpdates(opts *bind.TransactOpts, tokens []common.Address, pools []common.Address) (*types.Transaction, error) { + if w.Latest != nil { + return nil, fmt.Errorf("latest version does not support ApplyPoolUpdates") + } + if w.V1_2_0 != nil { + var poolUpdates []evm_2_evm_onramp_1_2_0.InternalPoolUpdate + if len(tokens) != len(pools) { + return nil, fmt.Errorf("tokens and pools length mismatch") + } + for i, token := range tokens { + poolUpdates = append(poolUpdates, evm_2_evm_onramp_1_2_0.InternalPoolUpdate{ + Token: token, + Pool: pools[i], + }) + } + return w.V1_2_0.ApplyPoolUpdates(opts, []evm_2_evm_onramp_1_2_0.InternalPoolUpdate{}, poolUpdates) + } + return nil, fmt.Errorf("no instance found to apply pool updates") +} + +// CurrentRateLimiterState returns the current state of the rate limiter +func (w OnRampWrapper) CurrentRateLimiterState(opts *bind.CallOpts) (*RateLimiterConfig, error) { + if w.Latest != nil { + rlConfig, err := w.Latest.CurrentRateLimiterState(opts) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Rate: rlConfig.Rate, + Capacity: rlConfig.Capacity, + Tokens: rlConfig.Tokens, + }, err + } + if w.V1_2_0 != nil { + rlConfig, err := w.V1_2_0.CurrentRateLimiterState(opts) + if err != nil { + return nil, err + } + return &RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Rate: rlConfig.Rate, + Capacity: rlConfig.Capacity, + Tokens: rlConfig.Tokens, + }, err + } + return nil, fmt.Errorf("no instance found to get current rate limiter state") +} + +type OnRamp struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *OnRampWrapper + EthAddress common.Address +} + +// WatchCCIPSendRequested returns a subscription to watch for CCIPSendRequested events +// there is no difference in the event between the two versions +// so we can use the latest version to watch for events +func (onRamp *OnRamp) WatchCCIPSendRequested(opts *bind.WatchOpts, sendReqEvent chan *evm_2_evm_onramp.EVM2EVMOnRampCCIPSendRequested) (event.Subscription, error) { + if onRamp.Instance.Latest != nil { + return onRamp.Instance.Latest.WatchCCIPSendRequested(opts, sendReqEvent) + } + // cast the contract to the latest version so that we can watch for events with latest wrapper + if onRamp.Instance.V1_2_0 != nil { + newRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRamp.EthAddress, wrappers.MustNewWrappedContractBackend(onRamp.client, nil)) + if err != nil { + return nil, fmt.Errorf("failed to cast to latest version: %w", err) + } + return newRamp.WatchCCIPSendRequested(opts, sendReqEvent) + } + // should never reach here + return nil, fmt.Errorf("no instance found to watch for CCIPSendRequested") +} + +func (onRamp *OnRamp) Address() string { + return onRamp.EthAddress.Hex() +} + +func (onRamp *OnRamp) SetNops() error { + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + owner := common.HexToAddress(onRamp.client.GetDefaultWallet().Address()) + // set the payee to the default wallet + tx, err := onRamp.Instance.SetNops(opts, owner) + if err != nil { + return fmt.Errorf("failed to set nops: %w", err) + } + return onRamp.client.ProcessTransaction(tx) +} + +// SetTokenTransferFeeConfig sets the token transfer fee configuration for the OnRamp +func (onRamp *OnRamp) SetTokenTransferFeeConfig(tokenTransferFeeConfig []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs) error { + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + for i := range tokenTransferFeeConfig { + if tokenTransferFeeConfig[i].DestBytesOverhead == 0 { + tokenTransferFeeConfig[i].DestBytesOverhead = defaultDestByteOverhead + } + if tokenTransferFeeConfig[i].DestGasOverhead == 0 { + tokenTransferFeeConfig[i].DestGasOverhead = defaultDestGasOverhead + } + } + tx, err := onRamp.Instance.SetTokenTransferFeeConfig(opts, tokenTransferFeeConfig, []common.Address{}) + if err != nil { + return fmt.Errorf("failed to set token transfer fee config: %w", err) + } + onRamp.logger.Info(). + Interface("tokenTransferFeeConfig", tokenTransferFeeConfig). + Str("onRamp", onRamp.Address()). + Str(Network, onRamp.client.GetNetworkConfig().Name). + Msg("TokenTransferFeeConfig set in OnRamp") + return onRamp.client.ProcessTransaction(tx) +} + +func (onRamp *OnRamp) PayNops() error { + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := onRamp.Instance.PayNops(opts) + if err != nil { + return fmt.Errorf("failed to pay nops: %w", err) + } + return onRamp.client.ProcessTransaction(tx) +} + +func (onRamp *OnRamp) WithdrawNonLinkFees(wrappedNative common.Address) error { + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + owner := common.HexToAddress(onRamp.client.GetDefaultWallet().Address()) + tx, err := onRamp.Instance.WithdrawNonLinkFees(opts, wrappedNative, owner) + if err != nil { + return fmt.Errorf("failed to withdraw non link fees: %w", err) + } + return onRamp.client.ProcessTransaction(tx) +} + +// SetRateLimit sets the Aggregate Rate Limit (ARL) values for the OnRamp +func (onRamp *OnRamp) SetRateLimit(rlConfig evm_2_evm_onramp.RateLimiterConfig) error { + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := onRamp.Instance.SetRateLimiterConfig(opts, rlConfig) + if err != nil { + return fmt.Errorf("failed to set rate limit: %w", err) + } + onRamp.logger.Info(). + Bool("Enabled", rlConfig.IsEnabled). + Str("capacity", rlConfig.Capacity.String()). + Str("rate", rlConfig.Rate.String()). + Str("onRamp", onRamp.Address()). + Str(Network, onRamp.client.GetNetworkConfig().Name). + Msg("Setting Rate limit in OnRamp") + return onRamp.client.ProcessTransaction(tx) +} + +func (onRamp *OnRamp) ApplyPoolUpdates(tokens []common.Address, pools []common.Address) error { + // if the latest version is used, no need to apply pool updates + if onRamp.Instance.Latest != nil { + return nil + } + opts, err := onRamp.client.TransactionOpts(onRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := onRamp.Instance.ApplyPoolUpdates(opts, tokens, pools) + if err != nil { + return fmt.Errorf("failed to apply pool updates: %w", err) + } + onRamp.logger.Info(). + Interface("tokens", tokens). + Interface("pools", pools). + Str("onRamp", onRamp.Address()). + Str(Network, onRamp.client.GetNetworkConfig().Name). + Msg("poolUpdates set in OnRamp") + return onRamp.client.ProcessTransaction(tx) +} + +// OffRamp represents the OffRamp CCIP contract on the destination chain +type OffRamp struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *OffRampWrapper + EthAddress common.Address +} + +func (offRamp *OffRamp) Address() string { + return offRamp.EthAddress.Hex() +} + +// WatchExecutionStateChanged returns a subscription to watch for ExecutionStateChanged events +// there is no difference in the event between the two versions +// so we can use the latest version to watch for events +func (offRamp *OffRamp) WatchExecutionStateChanged( + opts *bind.WatchOpts, + execEvent chan *evm_2_evm_offramp.EVM2EVMOffRampExecutionStateChanged, + sequenceNumber []uint64, + messageId [][32]byte, +) (event.Subscription, error) { + if offRamp.Instance.Latest != nil { + return offRamp.Instance.Latest.WatchExecutionStateChanged(opts, execEvent, sequenceNumber, messageId) + } + if offRamp.Instance.V1_2_0 != nil { + newOffRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(offRamp.EthAddress, wrappers.MustNewWrappedContractBackend(offRamp.client, nil)) + if err != nil { + return nil, fmt.Errorf("failed to cast to latest version of OffRamp from v1_2_0: %w", err) + } + return newOffRamp.WatchExecutionStateChanged(opts, execEvent, sequenceNumber, messageId) + } + return nil, fmt.Errorf("no instance found to watch for ExecutionStateChanged") +} + +// SetOCR2Config sets the offchain reporting protocol configuration +func (offRamp *OffRamp) SetOCR2Config( + signers []common.Address, + transmitters []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) error { + offRamp.logger.Info().Str("Contract Address", offRamp.Address()).Msg("Configuring OffRamp Contract") + // Set Config + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction options: %w", err) + } + offRamp.logger.Debug(). + Interface("SignerAddresses", signers). + Interface("TransmitterAddresses", transmitters). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("Configuring OffRamp") + if offRamp.Instance.Latest != nil { + tx, err := offRamp.Instance.Latest.SetOCR2Config( + opts, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + return fmt.Errorf("failed to set latest OCR2 config: %w", err) + } + return offRamp.client.ProcessTransaction(tx) + } + if offRamp.Instance.V1_2_0 != nil { + tx, err := offRamp.Instance.V1_2_0.SetOCR2Config( + opts, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + return fmt.Errorf("failed to set 1.2 OCR2 config: %w", err) + } + return offRamp.client.ProcessTransaction(tx) + } + return fmt.Errorf("no instance found to set OCR2 config") +} + +// AddRateLimitTokens adds token pairs to the OffRamp's rate limit +func (offRamp *OffRamp) AddRateLimitTokens(sourceTokens, destTokens []common.Address) error { + if offRamp.Instance.V1_2_0 != nil { + return nil + } + + if len(sourceTokens) != len(destTokens) { + return fmt.Errorf("source and dest tokens must be of the same length") + } + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + + if offRamp.Instance.Latest != nil { + rateLimitTokens := make([]evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken, len(sourceTokens)) + for i, sourceToken := range sourceTokens { + rateLimitTokens[i] = evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{ + SourceToken: sourceToken, + DestToken: destTokens[i], + } + } + + tx, err := offRamp.Instance.Latest.UpdateRateLimitTokens(opts, []evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{}, rateLimitTokens) + if err != nil { + return fmt.Errorf("failed to apply rate limit tokens updates: %w", err) + } + offRamp.logger.Info(). + Interface("rateLimitToken adds", rateLimitTokens). + Str("offRamp", offRamp.Address()). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("rateLimitTokens set in OffRamp") + return offRamp.client.ProcessTransaction(tx) + } + return fmt.Errorf("no supported OffRamp version instance found") +} + +// RemoveRateLimitTokens removes token pairs to the OffRamp's rate limit. +// If you ask to remove a token pair that doesn't exist, it will return an error. +func (offRamp *OffRamp) RemoveRateLimitTokens(ctx context.Context, sourceTokens, destTokens []common.Address) error { + callOpts := &bind.CallOpts{ + From: common.HexToAddress(offRamp.client.GetDefaultWallet().Address()), + Context: ctx, + } + + switch { + case offRamp.Instance.Latest != nil: + existingRateLimitTokens, err := offRamp.Instance.Latest.GetAllRateLimitTokens(callOpts) + if err != nil { + return fmt.Errorf("failed to get all rate limit tokens: %w", err) + } + + rateLimitTokens := make([]evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken, len(sourceTokens)) + for i, sourceToken := range sourceTokens { + destToken := destTokens[i] + // Check if the source rate limit token exists + foundIndex := -1 + for j, existingSourceToken := range existingRateLimitTokens.SourceTokens { + if existingSourceToken == sourceToken { + foundIndex = j + break + } + } + if foundIndex == -1 { + return fmt.Errorf("source rate limit token not found for pair: %s -> %s", sourceTokens[i].Hex(), destTokens[i].Hex()) + } + // Check if the matching dest rate limit token exists + if existingRateLimitTokens.DestTokens[foundIndex] != destToken { + return fmt.Errorf("dest rate limit token not found for pair: %s -> %s", sourceTokens[i].Hex(), destTokens[i].Hex()) + } + // Update the existing rate limit tokens to remove the pair for visibility + existingRateLimitTokens.SourceTokens = append(existingRateLimitTokens.SourceTokens[:foundIndex], existingRateLimitTokens.SourceTokens[foundIndex+1:]...) + existingRateLimitTokens.DestTokens = append(existingRateLimitTokens.DestTokens[:foundIndex], existingRateLimitTokens.DestTokens[foundIndex+1:]...) + + rateLimitTokens[i] = evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{ + SourceToken: sourceToken, + DestToken: destToken, + } + } + + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := offRamp.Instance.Latest.UpdateRateLimitTokens(opts, rateLimitTokens, []evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{}) + if err != nil { + return fmt.Errorf("failed to remove rate limit tokens: %w", err) + } + offRamp.logger.Info(). + Interface("RateLimitTokens Remaining", existingRateLimitTokens). + Interface("RateLimitTokens Removed", rateLimitTokens). + Str("OffRamp", offRamp.Address()). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("RateLimitTokens Removed from OffRamp") + return offRamp.client.ProcessTransaction(tx) + case offRamp.Instance.V1_2_0 != nil: + return nil + } + return fmt.Errorf("no supported OffRamp version instance found") +} + +// RemoveAllRateLimitTokens removes all token pairs from the OffRamp's rate limit. +func (offRamp *OffRamp) RemoveAllRateLimitTokens(ctx context.Context) error { + callOpts := &bind.CallOpts{ + From: common.HexToAddress(offRamp.client.GetDefaultWallet().Address()), + Context: ctx, + } + + switch { + case offRamp.Instance.Latest != nil: + allRateLimitTokens, err := offRamp.Instance.Latest.GetAllRateLimitTokens(callOpts) + if err != nil { + return fmt.Errorf("failed to get all rate limit tokens: %w", err) + } + + rateLimitTokens := make([]evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken, len(allRateLimitTokens.SourceTokens)) + for i, sourceToken := range allRateLimitTokens.SourceTokens { + rateLimitTokens[i] = evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{ + SourceToken: sourceToken, + DestToken: allRateLimitTokens.DestTokens[i], + } + } + + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + tx, err := offRamp.Instance.Latest.UpdateRateLimitTokens(opts, rateLimitTokens, []evm_2_evm_offramp.EVM2EVMOffRampRateLimitToken{}) + if err != nil { + return fmt.Errorf("failed to remove rate limit tokens: %w", err) + } + offRamp.logger.Info(). + Interface("RateLimitTokens Removed", rateLimitTokens). + Str("OffRamp", offRamp.Address()). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("Removed all RateLimitTokens from OffRamp") + return offRamp.client.ProcessTransaction(tx) + case offRamp.Instance.V1_2_0 != nil: + return nil + } + return fmt.Errorf("no supported OffRamp version instance found") +} + +// SetRateLimit sets the Aggregate Rate Limit (ARL) values for the OffRamp +func (offRamp *OffRamp) SetRateLimit(rlConfig RateLimiterConfig) error { + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return err + } + offRamp.logger.Info(). + Bool("Enabled", rlConfig.IsEnabled). + Str("Capacity", rlConfig.Capacity.String()). + Str("Rate", rlConfig.Rate.String()). + Str("OffRamp", offRamp.Address()). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("Setting Rate limit on OffRamp") + + switch { + case offRamp.Instance.Latest != nil: + tx, err := offRamp.Instance.Latest.SetRateLimiterConfig(opts, evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Capacity: rlConfig.Capacity, + Rate: rlConfig.Rate, + }) + if err != nil { + return fmt.Errorf("failed to set rate limit: %w", err) + } + return offRamp.client.ProcessTransaction(tx) + case offRamp.Instance.V1_2_0 != nil: + tx, err := offRamp.Instance.V1_2_0.SetRateLimiterConfig(opts, evm_2_evm_offramp_1_2_0.RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Capacity: rlConfig.Capacity, + Rate: rlConfig.Rate, + }) + if err != nil { + return fmt.Errorf("failed to set rate limit: %w", err) + } + return offRamp.client.ProcessTransaction(tx) + } + return fmt.Errorf("no supported OffRamp version instance found") +} + +func (offRamp *OffRamp) SyncTokensAndPools(sourceTokens, pools []common.Address) error { + if offRamp.Instance.Latest != nil { + return nil + } + opts, err := offRamp.client.TransactionOpts(offRamp.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("failed to get transaction opts: %w", err) + } + if offRamp.Instance.V1_2_0 != nil { + var tokenUpdates []evm_2_evm_offramp_1_2_0.InternalPoolUpdate + for i, srcToken := range sourceTokens { + tokenUpdates = append(tokenUpdates, evm_2_evm_offramp_1_2_0.InternalPoolUpdate{ + Token: srcToken, + Pool: pools[i], + }) + } + tx, err := offRamp.Instance.V1_2_0.ApplyPoolUpdates(opts, []evm_2_evm_offramp_1_2_0.InternalPoolUpdate{}, tokenUpdates) + if err != nil { + return fmt.Errorf("failed to apply pool updates: %w", err) + } + offRamp.logger.Info(). + Interface("tokenUpdates", tokenUpdates). + Str("offRamp", offRamp.Address()). + Str(Network, offRamp.client.GetNetworkConfig().Name). + Msg("tokenUpdates set in OffRamp") + return offRamp.client.ProcessTransaction(tx) + } + return fmt.Errorf("no instance found to sync tokens and pools") +} + +// OffRampWrapper wraps multiple versions of the OffRamp contract as we support multiple at once. +// If you are using any of the functions in this struct, be sure to follow best practices: +// 1. If the function does not make sense for a specific version, +// (e.g. crucial functionality that changes state, but doesn't exist yet) return an error. +// 2. If the function does not make sense for a specific version, but calling it doesn't change how execution would work +// (e.g. functionality that wouldn't change state), you can return a nil or default value, treating it as a no-op. +// 3. If no valid versions are available, return an error. +// +// See CurrentRateLimiterState, WatchExecutionStateChanged, and AddRateLimitTokens for examples. +type OffRampWrapper struct { + Latest *evm_2_evm_offramp.EVM2EVMOffRamp + V1_2_0 *evm_2_evm_offramp_1_2_0.EVM2EVMOffRamp +} + +// CurrentRateLimiterState retrieves the current rate limiter state for the OffRamp contract +func (offRamp *OffRampWrapper) CurrentRateLimiterState(opts *bind.CallOpts) (RateLimiterConfig, error) { + if offRamp.Latest != nil { + rlConfig, err := offRamp.Latest.CurrentRateLimiterState(opts) + if err != nil { + return RateLimiterConfig{}, err + } + return RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Capacity: rlConfig.Capacity, + Rate: rlConfig.Rate, + }, nil + } + if offRamp.V1_2_0 != nil { + rlConfig, err := offRamp.V1_2_0.CurrentRateLimiterState(opts) + if err != nil { + return RateLimiterConfig{}, err + } + return RateLimiterConfig{ + IsEnabled: rlConfig.IsEnabled, + Capacity: rlConfig.Capacity, + Rate: rlConfig.Rate, + }, nil + } + return RateLimiterConfig{}, fmt.Errorf("no instance found to get rate limiter state") +} + +type EVM2EVMOffRampExecutionStateChanged struct { + SequenceNumber uint64 + MessageId [32]byte + State uint8 + ReturnData []byte + LogInfo LogInfo +} + +type MockAggregator struct { + client blockchain.EVMClient + logger *zerolog.Logger + Instance *mock_v3_aggregator_contract.MockV3Aggregator + ContractAddress common.Address + RoundId *big.Int + Answer *big.Int +} + +func (a *MockAggregator) ChainID() uint64 { + return a.client.GetChainID().Uint64() +} + +// UpdateRoundData updates the round data in the aggregator contract +// if answer is nil, it will set next round data by adding random percentage( within provided range) to the previous round data +func (a *MockAggregator) UpdateRoundData(answer *big.Int, minP, maxP *int) error { + if answer == nil && (minP == nil || maxP == nil) { + return fmt.Errorf("minP and maxP are required to update round data with random percentage if answer is nil") + } + // if round id is nil, set it to 1 + if a.RoundId == nil { + a.RoundId = big.NewInt(1) + } + // if there is no answer provided and last saved answer is nil + // we fetch the last round data from chain + // and set the answer to the aggregator's latest answer and round id to the aggregator's latest round id + if answer == nil && a.Answer == nil { + roundData, err := a.Instance.LatestRoundData(nil) + if err != nil || roundData.RoundId == nil || roundData.Answer == nil { + return fmt.Errorf("unable to get latest round data: %w", err) + } + a.Answer = roundData.Answer + a.RoundId = roundData.RoundId + } + + // if answer is nil, we calculate the answer with random percentage (within the provided range) of latest answer + if answer == nil { + rand.Seed(uint64(time.Now().UnixNano())) + randomNumber := rand.Intn(pointer.GetInt(maxP)-pointer.GetInt(minP)+1) + pointer.GetInt(minP) + // answer = previous round answer + (previous round answer * random percentage) + answer = new(big.Int).Add(a.Answer, new(big.Int).Div(new(big.Int).Mul(a.Answer, big.NewInt(int64(randomNumber))), big.NewInt(100))) + } + // increment the round id + round := new(big.Int).Add(a.RoundId, big.NewInt(1)) + // save the round data as the latest round data + a.RoundId = round + a.Answer = answer + opts, err := a.client.TransactionOpts(a.client.GetDefaultWallet()) + if err != nil { + return fmt.Errorf("unable to get transaction opts: %w", err) + } + a.logger.Info(). + Str("Contract Address", a.ContractAddress.Hex()). + Str("Network Name", a.client.GetNetworkConfig().Name). + Msg("Updating Round Data") + tx, err := a.Instance.UpdateRoundData(opts, round, answer, big.NewInt(time.Now().UTC().UnixNano()), big.NewInt(time.Now().UTC().UnixNano())) + if err != nil { + return fmt.Errorf("unable to update round data: %w", err) + } + a.logger.Info(). + Str("Contract Address", a.ContractAddress.Hex()). + Str("Network Name", a.client.GetNetworkConfig().Name). + Str("Round", round.String()). + Str("Answer", answer.String()). + Msg("Updated Round Data") + ctx, cancel := context.WithTimeout(context.Background(), a.client.GetNetworkConfig().Timeout.Duration) + defer cancel() + rec, err := bind.WaitMined(ctx, a.client.DeployBackend(), tx) + if err != nil { + return fmt.Errorf("error waiting for tx %s to be mined", tx.Hash().Hex()) + } + if rec.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("tx %s failed while updating round data", tx.Hash().Hex()) + } + + return a.client.MarkTxAsSentOnL2(tx) +} diff --git a/integration-tests/ccip-tests/contracts/laneconfig/contracts-1.2.json b/integration-tests/ccip-tests/contracts/laneconfig/contracts-1.2.json new file mode 100644 index 00000000000..4de4d1e504d --- /dev/null +++ b/integration-tests/ccip-tests/contracts/laneconfig/contracts-1.2.json @@ -0,0 +1,634 @@ +{ + "lane_configs": { + "Arbitrum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xe06b0e8c4bd455153e8794ad7Ea8Ff5A14B64E4b", + "router": "0x141fa059441E0ca23ce184B6A78bafD2A517DdE8", + "price_registry": "0x13015e4E6f839E1Aa1016DF521ea458ecA20438c", + "wrapped_native": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x05B723f3db92430FbE4395fD03E40Cc7e9D17988", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x77b60F85b25fD501E3ddED6C1fe7bF565C08A22A", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x79f3ABeCe5A3AFFf32D47F4CFe45e7b65c9a2D91", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xCe11020D56e5FDbfE46D9FC3021641FfbBB5AdEE", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xC09b72E8128620C40D89649019d995Cc79f030C3", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x122F05F49e90508F089eE8D0d868d1a4f3E5a809", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x66a0046ac9FA104eB38B04cfF391CcD0122E6FbC", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0xe0109912157d5B75ea8b3181123Cf32c73bc9920", + "commit_store": "0xDaa61b8Cd85977820f92d1e749E1D9F55Da6CCEA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xdB19F77F87661f9be0F557cf9a1ebeCf7D8F206c", + "commit_store": "0x6e37f4c82d9A31cc42B445874dd3c3De97AB553f", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0xB1b705c2315fced1B38baE463BE7DDef531e47fA", + "commit_store": "0x310cECbFf14Ad0307EfF762F461a487C1abb90bf", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x542ba1902044069330e8c5b36A84EC503863722f", + "commit_store": "0x060331fEdA35691e54876D957B4F9e3b8Cb47d20", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xeeed4D86F3E0e6d32A6Ad29d8De6A0Dc91963A5f", + "commit_store": "0xbbB563c4d98020b9c0f3Cc34c2C0Ef9676806E35", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x9bDA7c8DCda4E39aFeB483cc0B7E3C1f6E0D5AB1", + "commit_store": "0x63a0AeaadAe851b990bBD9dc41f5C1B08b32026d", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xEEf5Fb4c4953F9cA9ab1f25cE590776AfFc2c455", + "commit_store": "0xD268286A277095a9C3C90205110831a84505881c", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Avalanche Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x5947BB275c521040051D82396192181b413227A3", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xdFD6C0dc67666DE3bB36b31eec5c7B1542A82C1E", + "router": "0xF4c7E640EdA248ef95972845a62bdC74237805dB", + "price_registry": "0xfA4edD04eaAcDB07c8D73621bc1790eC50D8c489", + "wrapped_native": "0xB31f66AA3C1e785363F0875A1B74E27b85FD66c7", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x98f51B041e493fc4d72B8BD33218480bA0c66DDF", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x268fb4311D2c6CB2bbA01CCA9AC073Fb3bfd1C7c", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x8eaae6462816CB4957184c48B86afA7642D8Bf2B", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xD0701FcC7818c31935331B02Eb21e91eC71a1704", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x8629008887E073260c5434D6CaCFc83C3001d211", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x97500490d9126f34cf9aA0126d64623E170319Ef", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x9b1ed9De069Be4d50957464b359f98eD0Bf34dd5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x770b1375F86E7a9bf30DBe3F97bea67193dC9135", + "commit_store": "0x23E2b34Ce8e12c53f8a39AD4b3FFCa845f8E617C", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Base Mainnet": { + "off_ramp": "0x4d6A796Bc85dcDF41ce9AaEB50B094C6b589748f", + "commit_store": "0xc4C4358FA01a04D6c6FE3b96a351946d4c2715C2", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0x83F53Fc798FEbfFbdF84830AD403b9989187a06C", + "commit_store": "0xD8ceCE2D7794385E00Ce3EF94550E732b0A0B959", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0x5B833BD6456c604Eb396C0fBa477aD49e82B1A2a", + "commit_store": "0x23E23958D220B774680f91c2c91a6f2B2f610d7e", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xb68A3EE8bD0A09eE221cf1859Dd5a4d5765188Fe", + "commit_store": "0x83DCeeCf822981F9F8552925eEfd88CAc1905dEA", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x19250aBE66B88F214d02B6f3BF80F4118290C619", + "commit_store": "0x87A0935cE6254dB1252bBac90d1D07D04846aDCA", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "WeMix Mainnet": { + "off_ramp": "0x317dE8bc5c3292E494b6496586696d4966A922B0", + "commit_store": "0x97Fbf3d6DEac16adC721aE9187CeEa1e610aC7Af", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Base Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x88Fb150BDc53A65fe94Dea0c9BA0a6dAf8C6e196", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x38660c8CC222c0192b635c2ac09687B4F25cCE5F", + "router": "0x881e3A65B4d4a04dD529061dd0071cf975F58bCD", + "price_registry": "0x6337a58D4BD7Ba691B66341779e8f87d4679923a", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x1E5Ca70d1e7A1B26061125738a880BBeA42FeB21", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xBE5a9E336D9614024B4Fa10D8112671fc9A42d96", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xdd4Fb402d41Beb0eEeF6CfB1bf445f50bDC8c981", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xDEA286dc0E01Cb4755650A6CF8d1076b454eA1cb", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xd952FEAcDd5919Cc5E9454b53bF45d4E73dD6457", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x3DB8Bea142e41cA3633890d0e5640F99a895D6A5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x8531E63aE9279a1f0D09eba566CD1b092b95f3D5", + "commit_store": "0x327E13f54c7871a2416006B33B4822eAAD357916", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8345F2fF67e5A65e85dc955DE1414832608E00aD", + "commit_store": "0xd0b13be4c53A6262b47C5DDd36F0257aa714F562", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0x48a51f5D38BE630Ddd6417Ea2D9052B8efc91a18", + "commit_store": "0xF97127e77252284EC9D4bc13C247c9D1A99F72B0", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0xEC0cFe335a4d53dBA70CB650Ab56eEc32788F0BB", + "commit_store": "0x0ae3c2c7FB789bd05A450CD3075D11f6c2Ca4F77", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xf50c0d2a8B6Db60f1D93E60f03d0413D56153E4F", + "commit_store": "0x16f72C15165f7C9d74c12fDF188E399d4d3724e4", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x75F29f058b31106F99caFdc17c9b26ADfcC7b5D7", + "commit_store": "0xb719616E732581B570232DfB13Ca49D27667Af9f", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "BSC Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x404460C6A5EdE2D891e8297795264fDe62ADBB75", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x3DB43b96B2625F4232e9Df900d464dd2c64C0021", + "router": "0x34B03Cb9086d7D758AC55af71584F81A598759FE", + "price_registry": "0xd64aAbD70A71d9f0A00B99F6EFc1626aA2dD43C7", + "wrapped_native": "0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x6aa72a998859eF93356c6521B72155D355D0Cfd2", + "deployed_at": 11111111 + }, + "Arbitrum Mainnet": { + "on_ramp": "0x2788b46BAcFF49BD89562e6bA5c5FBbbE5Fa92F7", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x70bC7f7a6D936b289bBF5c0E19ECE35B437E2e36", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x0Bf40b034872D0b364f3DCec04C7434a4Da1C8d9", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x4FEB11A454C9E8038A8d0aDF599Fe7612ce114bA", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6bD4754D86fc87FE5b463D368f26a3587a08347c", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x1467fF8f249f5bc604119Af26a47035886f856BE", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0x37a6fa55fe61061Ae97bF7314Ae270eCF71c5ED3", + "commit_store": "0x1f558F6dcf0224Ef1F78A24814FED548B9602c80", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Arbitrum Mainnet": { + "off_ramp": "0x3DA330fd8Ef10d93cFB7D4f8ecE7BC1F10811feC", + "commit_store": "0x86D55Ff492cfBBAf0c0D42D4EE615144E78b3D02", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0x574c697deab06B805D8780898B3F136a1F4892Dc", + "commit_store": "0x002B164b1dcf4E92F352DC625A01Be0E890EdEea", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0x181Bb1E97b0bDD1D85E741ad0943552D3682cc35", + "commit_store": "0x3fF27A34fF0FA77921C3438e67f58da1a83e9Ce1", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xE7E080C8d62d595a223C577C7C8d1f75d9A5E664", + "commit_store": "0xF4d53346bDb6d393C74B0B72Aa7D6689a3eAad79", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x26af2046Da85d7f6712D5edCa81B9E3b2e7A60Ab", + "commit_store": "0x4C1dA405a789AC2853A69D8290B8B9b47a0374F8", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "WeMix Mainnet": { + "off_ramp": "0xC027C5AEb230008c243Be463A73571e581F94c13", + "commit_store": "0x2EB426C8C54D740d1FC856eB3Ff96feA03957978", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Ethereum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x514910771AF9Ca656af840dff83E8264EcF986CA", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x8B63b3DE93431C0f756A493644d128134291fA1b", + "router": "0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D", + "price_registry": "0x8c9b2Efb7c64C394119270bfecE7f54763b958Ad", + "wrapped_native": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x925228D7B82d883Dde340A55Fe8e6dA56244A22C", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x3df8dAe2d123081c4D5E946E655F7c109B9Dd630", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0xe2c2AB221AA0b957805f229d2AA57fBE2f4dADf7", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x91D25A56Db77aD5147437d8B83Eb563D46eBFa69", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x86B47d8411006874eEf8E4584BdFD7be8e5549d1", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x35F0ca9Be776E4B38659944c257bDd0ba75F1B8B", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0xCbE7e5DA76dC99Ac317adF6d99137005FDA4E2C4", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xeFC4a18af59398FF23bfe7325F2401aD44286F4d", + "commit_store": "0x9B2EEd6A1e16cB50Ed4c876D2dD69468B21b7749", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Avalanche Mainnet": { + "off_ramp": "0x569940e02D4425eac61A7601632eC00d69f75c17", + "commit_store": "0x2aa101BF99CaeF7fc1355D4c493a1fe187A007cE", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Base Mainnet": { + "off_ramp": "0xdf85c8381954694E74abD07488f452b4c2Cddfb3", + "commit_store": "0x8DC27D621c41a32140e22E2a4dAf1259639BAe04", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0x7Afe7088aff57173565F4b034167643AA8b9171c", + "commit_store": "0x87c55D48DF6EF7B08153Ab079e76bFEcbb793D75", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xB095900fB91db00E6abD247A5A5AD1cee3F20BF7", + "commit_store": "0x4af4B497c998007eF83ad130318eB2b925a79dc8", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x0af338F0E314c7551bcE0EF516d46d855b0Ee395", + "commit_store": "0xD37a60E8C36E802D2E1a6321832Ee85556Beeb76", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "WeMix Mainnet": { + "off_ramp": "0x3a129e6C18b23d18BA9E6Aa14Dc2e79d1f91c6c5", + "commit_store": "0x31f6ab382DDeb9A316Ab61C3945a5292a50a89AB", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Kroma Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xC1F6f7622ad37C3f46cDF6F8AA0344ADE80BF450", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xB59779d3364BC6d71168245f9ebb96469E5a5a98", + "router": "0xE93E8B0d1b1CEB44350C8758ed1E2799CCee31aB", + "price_registry": "0x8155B4710e7bbC90924E957104F94Afd4f95Eca2", + "wrapped_native": "0x4200000000000000000000000000000000000001", + "src_contracts": { + "WeMix Mainnet": { + "on_ramp": "0x3C5Ab46fA1dB1dECD854224654313a69bf9fcAD3", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "WeMix Mainnet": { + "off_ramp": "0x2B555774B3D1dcbcd76efb7751F3c5FbCFABC5C4", + "commit_store": "0x213124614aAf31eBCE7c612A12aac5f8aAD77DE4", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Optimism Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x8C7C2C3362a42308BB5c368677Ad321D11693b81", + "router": "0x3206695CaE29952f4b0c22a169725a865bc8Ce0f", + "price_registry": "0xb52545aECE8C73A97E52a146757EC15b90Ed8488", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xD0D3E757bFBce7ae1881DDD7F6d798DDcE588445", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x0b1760A8112183303c5526C6b24569fd3A274f3B", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xa3c9544B82846C45BE37593d5d9ACffbE61BF3A6", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x55183Db1d2aE0b63e4c92A64bEF2CBfc2032B127", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6B57145e322c877E7D91Ed8E31266eB5c02F7EfC", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x82e9f4C5ec4a84E310d60D462a12042E5cbA0954", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "commit_store": "0x55028780918330FD00a34a61D9a7Efd3f43ca845", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8dc6490A6204dF846BaBE809cB695ba17Df1F9B1", + "commit_store": "0xA190660787B6B183Dd82B243eA10e609327c7308", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Base Mainnet": { + "off_ramp": "0xBAE6560eCa9B77Cb047158C783e36F7735C86037", + "commit_store": "0x6168aDF58e1Ad446BaD45c6275Bef60Ef4FFBAb8", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0xE14501F2838F2fA1Ceb52E78ABdA289EcE1705EA", + "commit_store": "0xa8DD25B29787527Df283211C24Ac72B17150A696", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0xd2D98Be6a1C241e86C807e51cED6ABb51d044203", + "commit_store": "0x4d75A5cE454b264b187BeE9e189aF1564a68408D", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x7c6221880A1D62506b1A08Dab3Bf695A49AcDD22", + "commit_store": "0x0684076EE3595221861C50cDb9Cb66402Ec11Cb9", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "WeMix Mainnet": { + "off_ramp": "0x3e5B3b7559D39563a74434157b31781322dA712D", + "commit_store": "0x7954372FF6f80908e5A2dC2a19d796A1005f91D2", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Polygon Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xb0897686c545045aFc77CF20eC7A532E3120E0F1", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xD7AcF65dA1E1f34b663aB199a474F209bF2b0523", + "router": "0x849c5ED5a80F5B408Dd4969b78c2C8fdf0565Bfe", + "price_registry": "0x30D873664Ba766C983984C7AF9A921ccE36D34e1", + "wrapped_native": "0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0xD16D025330Edb91259EEA8ed499daCd39087c295", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x5FA30697e90eB30954895c45b028F7C0dDD39b12", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x20B028A2e0F6CCe3A11f3CE5F2B8986F932e89b4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xF5b5A2fC11BF46B1669C3B19d98B19C79109Dca9", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xFd77c53AA4eF0E3C01f5Ac012BF7Cc7A3ECf5168", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x3111cfbF5e84B5D9BD952dd8e957f4Ca75f728Cf", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x5060eF647a1F66BE6eE27FAe3046faf8D53CeB2d", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xa8a9eDa2867c2E0CE0d5ECe273961F1EcC3CC25B", + "commit_store": "0xbD4480658dca8496a65046dfD1BDD44EF897Bdb5", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Avalanche Mainnet": { + "off_ramp": "0xB9e3680639c9F0C4e0b02FD81C445094426244Ae", + "commit_store": "0x8c63d4e67f7c4af6FEd2f56A34fB4e01CB807CFF", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Base Mainnet": { + "off_ramp": "0xD0FA7DE2D18A0c59D3fD7dfC7aB4e913C6Aa7b68", + "commit_store": "0xF88053B9DAC8Dd3039a4eFa8639159aaa3F2D4Cb", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0x592773924741F0Da889a0dfdab71171Dd11E054C", + "commit_store": "0xEC4d35E1A85f770f4D93BA43a462c9d87Ef7017e", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0x45320085fF051361D301eC1044318213A5387A15", + "commit_store": "0x4Dc771B5ef21ef60c33e2987E092345f2b63aE08", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0xBa754ecd3CFA7E9093F688EAc3860cf9D07Fc0AC", + "commit_store": "0x04C0D5302E3D8Ca0A0019141a52a23B59cdb70e4", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "WeMix Mainnet": { + "off_ramp": "0xd7c877ea02310Cce9278D9A048Aa1Bb9aF72F00d", + "commit_store": "0x92A1C927E8E10Ab6A40E5A5154e2300D278d1a67", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "WeMix Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x80f1FcdC96B55e459BF52b998aBBE2c364935d69", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x07aaC8B69A62dB5bd3d244091916EbF2fac17b76", + "router": "0x7798b795Fde864f4Cd1b124a38Ba9619B7F8A442", + "price_registry": "0x252863688762aD86868D3d3076233Eacd80c7055", + "wrapped_native": "0x7D72b22a74A216Af4a002a1095C8C707d6eC1C5f", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x9aBfd6f4C865610692AB6fb1Be862575809fFabf", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xbE0Cfae74677F8dd16a246a3a5c8cbB1973118f4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x56657ec4D15C71f7F3C17ba2b21C853A24Dc5381", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x70f3b0FD7e6a4B9B623e9AB859604A9EE03e48BD", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x777058C1e1dcE4eB8001F38631a1cd9450816e5a", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x190bcE84CF2d500B878966F4Cf98a50d78f2675E", + "deployed_at": 11111111 + }, + "Kroma Mainnet": { + "on_ramp": "0x47E9AE0A815C94836202E696748A5d5476aD8735", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x2ba68a395B72a6E3498D312efeD755ed2f3CF223", + "commit_store": "0xdAeC234DA83F68707Bb8AcB2ee6a01a5FD4c2391", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Avalanche Mainnet": { + "off_ramp": "0xFac907F9a1087B846Faa75A14C5d34A8639233d8", + "commit_store": "0xF2812063446c7deD2CA306c67A68364BdDcbEfc5", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0x6ec9ca4Cba62cA17c55F05ad2000B46192f02035", + "commit_store": "0x84534BE763366a69710E119c100832955795B34B", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0x87220D01DF0fF27149B47227897074653788fd23", + "commit_store": "0xF8dD2be2C6FA43e48A17146380CbEBBB4291807b", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0x8f0229804513A9Bc00c1308414AB279Dbc718ae1", + "commit_store": "0x3A85D1b8641d83a87957C6ECF1b62151213e0842", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0xF92Fa796F5307b029c65CA26f322a6D86f211194", + "commit_store": "0xbeC110FF43D52be2066B06525304A9924E16b73b", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Kroma Mainnet": { + "off_ramp": "0xF886d8DC64E544af4835cbf91e5678A54D95B80e", + "commit_store": "0x8794C9534658fdCC44f2FF6645Bf31cf9F6d2d5D", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + } + } +} \ No newline at end of file diff --git a/integration-tests/ccip-tests/contracts/laneconfig/contracts.json b/integration-tests/ccip-tests/contracts/laneconfig/contracts.json new file mode 100644 index 00000000000..4d20e2a4d57 --- /dev/null +++ b/integration-tests/ccip-tests/contracts/laneconfig/contracts.json @@ -0,0 +1,243 @@ +{ + "lane_configs": { + "Arbitrum Mainnet": { + "fee_token": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4", + "bridge_tokens": [ + "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4" + ], + "bridge_tokens_pools": [ + "" + ], + "arm": "0xe06b0e8c4bd455153e8794ad7Ea8Ff5A14B64E4b", + "router": "0xE92634289A1841A979C11C2f618B33D376e4Ba85", + "price_registry": "0xeBec5Cb8651FCD0Fd86Bd1BBb8562f5028D5102E", + "wrapped_native": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0x98dd9E9b8AE458225119Ab5B8c947A9d1cd0B648", + "deployed_at": 126471491 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0x7b1f908ceBf41d5829D0134c7dfD6aa0f163C97d", + "commit_store": "0x8E2adA223f8514C2E6E6Fb0877a19018B67256fF", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Avalanche Mainnet": { + "fee_token": "0x5947BB275c521040051D82396192181b413227A3", + "bridge_tokens": [ + "0x5947BB275c521040051D82396192181b413227A3" + ], + "bridge_tokens_pools": [ + "0x8A3e8D8614189d7ad0CF3f1a0D787Da79eBCEc17" + ], + "arm": "0xdFD6C0dc67666DE3bB36b31eec5c7B1542A82C1E", + "router": "0x27F39D0af3303703750D4001fCc1844c6491563c", + "price_registry": "0x2d3b38E0a4DFFDad2A613f7760bE1683F272eA18", + "wrapped_native": "0xb31f66aa3c1e785363f0875a1b74e27b85fd66c7", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0x3D3817270db2b89e9F68bA27297fb4672082f942", + "deployed_at": 32263102 + }, + "Polygon Mainnet": { + "on_ramp": "0x2d306510FE83Cdb33Ff1658c71C181e9567F0009", + "deployed_at": 32562460 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0x2BF2611a07e2cA880b814d53325e9b2ee0BbfD2f", + "commit_store": "0x5eBE880c4d340892dA1b0F32798a7A28e17e6E65", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0xC65F15b8178c2Fd653183130C6E003d196C39eC2", + "commit_store": "0xa9DC27fAc318fdDCa08E215ca157Fa5C7A832d80", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Base Mainnet": { + "fee_token": "0x88Fb150BDc53A65fe94Dea0c9BA0a6dAf8C6e196", + "arm": "0x38660c8CC222c0192b635c2ac09687B4F25cCE5F", + "router": "0x673AA85efd75080031d44fcA061575d1dA427A28", + "price_registry": "0x1bA15c57c8b74cD32443D7583E7f6d7c638aCf46", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0xD44371bFDe87f2db3eA6Df242091351A06c2e181", + "deployed_at": 3316617 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0x391B9B016C3bBA61F02e7ddd345130415908B9c7", + "commit_store": "0x398d2164a3F61353B4619814A31cC74A7741612E", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "BSC Mainnet": { + "fee_token": "0x404460C6A5EdE2D891e8297795264fDe62ADBB75", + "bridge_tokens": [ + "0x404460C6A5EdE2D891e8297795264fDe62ADBB75" + ], + "bridge_tokens_pools": [ + "" + ], + "arm": "0x3DB43b96B2625F4232e9Df900d464dd2c64C0021", + "router": "0x536d7E53D0aDeB1F20E7c81fea45d02eC9dBD698", + "price_registry": "0x18C3D917D55Bc1784a3d4729AA3e2C1ecd662fFd", + "wrapped_native": "0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0x1f17D464652f5Bd74a03446FeA20590CCfB3332D", + "deployed_at": 31312405 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0xEcaa7473b57956647C8Cff5a909520e7A0A4a5f6", + "commit_store": "0x9C68a868db2C27E9A7Ce43b73272A5d7ecFB5865", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Ethereum Mainnet": { + "fee_token": "0x514910771AF9Ca656af840dff83E8264EcF986CA", + "bridge_tokens": [ + "0x514910771AF9Ca656af840dff83E8264EcF986CA" + ], + "bridge_tokens_pools": [ + "0xC2291992A08eBFDfedfE248F2CCD34Da63570DF4" + ], + "arm": "0x8B63b3DE93431C0f756A493644d128134291fA1b", + "router": "0xE561d5E02207fb5eB32cca20a699E0d8919a1476", + "price_registry": "0x020082A7a9c2510e1921116001152DEE4da81985", + "wrapped_native": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x333f976915195ba9044fD0cd603cEcE936f6264e", + "deployed_at": 18029393 + }, + "Avalanche Mainnet": { + "on_ramp": "0xd0B5Fc9790a6085b048b8Aa1ED26ca2b3b282CF2", + "deployed_at": 17636709 + }, + "BSC Mainnet": { + "on_ramp": "0xdF1d7FD22aC3aB5171E275796f123224039f3b24", + "deployed_at": 18029385 + }, + "Base Mainnet": { + "on_ramp": "0xe2Eb229e88F56691e96bb98256707Bc62160FE73", + "deployed_at": 18029431 + }, + "Optimism Mainnet": { + "on_ramp": "0xCC19bC4D43d17eB6859F0d22BA300967C97780b0", + "deployed_at": 17636647 + }, + "Polygon Mainnet": { + "on_ramp": "0x0f27c8532457b66D6037141DEB0ed479Dad04B3c", + "deployed_at": 17636734 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x61135E701a2214C170c5F596D0067798FEfbaaE4", + "commit_store": "0x3d3467e1036Ee25F6F4aa15e3Abf77443A23144C", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Avalanche Mainnet": { + "off_ramp": "0x1C207dabc46902dF9028b27D6d301c3849b2D12c", + "commit_store": "0x40c558575093eC1099CC21B020d9b8D13c74417F", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "BSC Mainnet": { + "off_ramp": "0xC7176620daf49A39a17FF9A6C2DE1eAA6033EE94", + "commit_store": "0x7986C9892389854cAAbAC785ff18123B0070a5Fd", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Base Mainnet": { + "off_ramp": "0xfF51C00546AA3d9051a4B96Ae81346E14709CD24", + "commit_store": "0x2D1708ff2a15adbE313eA8C6035aA24d0FBA1c77", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Optimism Mainnet": { + "off_ramp": "0x41627a90f2c6238f2BADAB72D5aB050B857fdAb5", + "commit_store": "0x8bEFCa744c6f2b567b1863dcF055C593afdC11A0", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Polygon Mainnet": { + "off_ramp": "0xBDd822f3bC2EAB6818CfA3053107831D4E93fE72", + "commit_store": "0x20718EfbC25Dba60FD51c2c81362b83f7C411A6D", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Optimism Mainnet": { + "fee_token": "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6", + "bridge_tokens": [ + "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6" + ], + "bridge_tokens_pools": [ + "0x841b32B5309ba30cFbf4534667fC3D99EdF05B7A" + ], + "arm": "0x8C7C2C3362a42308BB5c368677Ad321D11693b81", + "router": "0x261c05167db67B2b619f9d312e0753f3721ad6E8", + "price_registry": "0x9270AAA75F4B9038f4c25fEc665B02a150a90361", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0xad1b1F2A6DD55627e3893B771A00Cd43F69DcE35", + "deployed_at": 106535110 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0x032F957BfbB8C535a1b2048f8b4FA27E1F2018Fd", + "commit_store": "0xa4D34ca38244F6c8AB640315d7257221408B6596", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + }, + "Polygon Mainnet": { + "fee_token": "0xb0897686c545045aFc77CF20eC7A532E3120E0F1", + "bridge_tokens": [ + "0xb0897686c545045aFc77CF20eC7A532E3120E0F1" + ], + "bridge_tokens_pools": [ + "0x086892015567fb8764d02c6845C85C25C8FcA389" + ], + "arm": "0xD7AcF65dA1E1f34b663aB199a474F209bF2b0523", + "router": "0x3C3D92629A02a8D95D5CB9650fe49C3544f69B43", + "price_registry": "0x68590799942eed65f9f1fB2277B9F6584A5957B8", + "wrapped_native": "0x0d500b1d8e8ef31e21c99d1db9a6444d3adf1270", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x47D945f7bbb814B65775a89c71F5D2229BE96CE9", + "deployed_at": 45041759 + }, + "Ethereum Mainnet": { + "on_ramp": "0xAE0e486Fa6577188d586A8e4c12360FB82E2a386", + "deployed_at": 44762064 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0xd59A3770c3e05479152b8581Ae0839f51b315E6A", + "commit_store": "0xC2870bF94E24657f7f5E75cF458e391D23CD84B5", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + }, + "Ethereum Mainnet": { + "off_ramp": "0xa73bf37F78CD1629ff11Fa2B397CED39F49F6efe", + "commit_store": "0x779cA414cAC21c76AbE9213861b1bE9187d495F9", + "receiver_dapp": "0xAFa2c441a83bBCEDc2E8c5c6f66248aFD8b9af3d" + } + } + } + } +} diff --git a/integration-tests/ccip-tests/contracts/laneconfig/parse_contracts.go b/integration-tests/ccip-tests/contracts/laneconfig/parse_contracts.go new file mode 100644 index 00000000000..332bd48ab31 --- /dev/null +++ b/integration-tests/ccip-tests/contracts/laneconfig/parse_contracts.go @@ -0,0 +1,227 @@ +package laneconfig + +import ( + _ "embed" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/ethereum/go-ethereum/common" + "go.uber.org/multierr" +) + +var ( + //go:embed contracts.json + ExistingContracts []byte + laneMu = &sync.Mutex{} +) + +type CommonContracts struct { + IsNativeFeeToken bool `json:"is_native_fee_token,omitempty"` + IsMockARM bool `json:"is_mock_arm,omitempty"` + FeeToken string `json:"fee_token"` + BridgeTokens []string `json:"bridge_tokens,omitempty"` + BridgeTokenPools []string `json:"bridge_tokens_pools,omitempty"` + PriceAggregators map[string]string `json:"price_aggregators,omitempty"` + ARM string `json:"arm"` + Router string `json:"router"` + PriceRegistry string `json:"price_registry,omitempty"` + WrappedNative string `json:"wrapped_native"` + Multicall string `json:"multicall,omitempty"` + TokenTransmitter string `json:"token_transmitter,omitempty"` + TokenMessenger string `json:"token_messenger,omitempty"` + TokenAdminRegistry string `json:"token_admin_registry,omitempty"` +} + +type SourceContracts struct { + OnRamp string `json:"on_ramp"` + DeployedAt uint64 `json:"deployed_at"` +} + +type DestContracts struct { + OffRamp string `json:"off_ramp"` + CommitStore string `json:"commit_store"` + ReceiverDapp string `json:"receiver_dapp"` +} + +type LaneConfig struct { + CommonContracts + SrcContractsMu *sync.Mutex `json:"-"` + SrcContracts map[string]SourceContracts `json:"src_contracts"` // key destination chain id + DestContractsMu *sync.Mutex `json:"-"` + DestContracts map[string]DestContracts `json:"dest_contracts"` // key source chain id +} + +func (l *LaneConfig) Validate() error { + var laneConfigError error + + if l.ARM == "" || !common.IsHexAddress(l.ARM) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for arm")) + } + + if l.FeeToken != "" && !common.IsHexAddress(l.FeeToken) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for fee_token")) + } + + for _, token := range l.BridgeTokens { + if token != "" && !common.IsHexAddress(token) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for bridge_tokens")) + } + } + + for _, pool := range l.BridgeTokenPools { + if pool != "" && !common.IsHexAddress(pool) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for bridge_tokens_pools")) + } + } + if l.Router == "" || !common.IsHexAddress(l.Router) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for router")) + } + if l.PriceRegistry == "" || !common.IsHexAddress(l.PriceRegistry) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for price_registry")) + } + if l.WrappedNative == "" || !common.IsHexAddress(l.WrappedNative) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for wrapped_native")) + } + if l.Multicall == "" || !common.IsHexAddress(l.Multicall) { + laneConfigError = multierr.Append(laneConfigError, errors.New("must set proper address for multicall")) + } + return laneConfigError +} + +type Lanes struct { + LaneConfigs map[string]*LaneConfig `json:"lane_configs"` +} + +func (l *Lanes) ReadLaneConfig(networkA string) *LaneConfig { + laneMu.Lock() + defer laneMu.Unlock() + cfg, ok := l.LaneConfigs[networkA] + if !ok { + l.LaneConfigs[networkA] = &LaneConfig{ + SrcContracts: make(map[string]SourceContracts), + DestContracts: make(map[string]DestContracts), + SrcContractsMu: &sync.Mutex{}, + DestContractsMu: &sync.Mutex{}, + } + return l.LaneConfigs[networkA] + } + if cfg.SrcContractsMu == nil { + l.LaneConfigs[networkA].SrcContractsMu = &sync.Mutex{} + } + if cfg.DestContractsMu == nil { + l.LaneConfigs[networkA].DestContractsMu = &sync.Mutex{} + } + return l.LaneConfigs[networkA] +} + +// CopyCommonContracts copies network config for common contracts from fromNetwork to toNetwork +// if the toNetwork already exists, it does nothing +// If reuse is set to false, it only retains the token contracts +func (l *Lanes) CopyCommonContracts(fromNetwork, toNetwork string, reuse, isTokenTransfer bool) { + laneMu.Lock() + defer laneMu.Unlock() + // if the toNetwork already exists, return + if _, ok := l.LaneConfigs[toNetwork]; ok { + return + } + existing, ok := l.LaneConfigs[fromNetwork] + if !ok { + l.LaneConfigs[toNetwork] = &LaneConfig{ + SrcContracts: make(map[string]SourceContracts), + DestContracts: make(map[string]DestContracts), + SrcContractsMu: &sync.Mutex{}, + DestContractsMu: &sync.Mutex{}, + } + return + } + cfg := &LaneConfig{ + SrcContracts: make(map[string]SourceContracts), + SrcContractsMu: &sync.Mutex{}, + DestContractsMu: &sync.Mutex{}, + DestContracts: make(map[string]DestContracts), + CommonContracts: CommonContracts{ + WrappedNative: existing.WrappedNative, + Multicall: existing.Multicall, + }, + } + // if reuse is set to true, it copies all the common contracts except the router + if reuse { + cfg.CommonContracts.FeeToken = existing.FeeToken + cfg.CommonContracts.PriceRegistry = existing.PriceRegistry + cfg.CommonContracts.TokenAdminRegistry = existing.TokenAdminRegistry + cfg.CommonContracts.PriceAggregators = existing.PriceAggregators + cfg.CommonContracts.ARM = existing.ARM + cfg.CommonContracts.IsMockARM = existing.IsMockARM + cfg.CommonContracts.Multicall = existing.Multicall + } + // if it is a token transfer, it copies the bridge token contracts + if isTokenTransfer { + cfg.CommonContracts.BridgeTokens = existing.BridgeTokens + if reuse { + cfg.CommonContracts.BridgeTokenPools = existing.BridgeTokenPools + } + } + l.LaneConfigs[toNetwork] = cfg +} + +func (l *Lanes) WriteLaneConfig(networkA string, cfg *LaneConfig) error { + laneMu.Lock() + defer laneMu.Unlock() + if l.LaneConfigs == nil { + l.LaneConfigs = make(map[string]*LaneConfig) + } + err := cfg.Validate() + if err != nil { + return err + } + l.LaneConfigs[networkA] = cfg + return nil +} + +func ReadLanesFromExistingDeployment(contracts []byte) (*Lanes, error) { + // if contracts is empty, use the existing contracts from contracts.json + if len(contracts) == 0 { + contracts = ExistingContracts + } + var lanes Lanes + if err := json.Unmarshal(contracts, &lanes); err != nil { + return nil, err + } + return &lanes, nil +} + +func CreateDeploymentJSON(path string) (*Lanes, error) { + existingLanes := Lanes{ + LaneConfigs: make(map[string]*LaneConfig), + } + err := WriteLanesToJSON(path, &existingLanes) + return &existingLanes, err +} + +func WriteLanesToJSON(path string, lanes *Lanes) error { + b, err := json.MarshalIndent(lanes, "", " ") + if err != nil { + return err + } + // Get the directory part of the file path. + dir := filepath.Dir(path) + // Check if the directory exists. + if _, err := os.Stat(dir); os.IsNotExist(err) { + // The directory does not exist, create it. + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + } + + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(b) + return err +} diff --git a/integration-tests/ccip-tests/contracts/multicall.go b/integration-tests/ccip-tests/contracts/multicall.go new file mode 100644 index 00000000000..7db7f37519b --- /dev/null +++ b/integration-tests/ccip-tests/contracts/multicall.go @@ -0,0 +1,280 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/erc20" +) + +const ( + MultiCallABI = "[{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"returnData\",\"type\":\"bytes[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowFailure\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call3[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate3\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowFailure\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call3Value[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate3Value\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"blockAndAggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBasefee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"basefee\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"getBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChainId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"chainid\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockCoinbase\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"coinbase\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockDifficulty\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"difficulty\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"gaslimit\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"getEthBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLastBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"requireSuccess\",\"type\":\"bool\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"tryAggregate\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"requireSuccess\",\"type\":\"bool\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"tryBlockAndAggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"}]" + MultiCallBIN = "0x608060405234801561001057600080fd5b50610ee0806100206000396000f3fe6080604052600436106100f35760003560e01c80634d2301cc1161008a578063a8b0574e11610059578063a8b0574e1461025a578063bce38bd714610275578063c3077fa914610288578063ee82ac5e1461029b57600080fd5b80634d2301cc146101ec57806372425d9d1461022157806382ad56cb1461023457806386d516e81461024757600080fd5b80633408e470116100c65780633408e47014610191578063399542e9146101a45780633e64a696146101c657806342cbb15c146101d957600080fd5b80630f28c97d146100f8578063174dea711461011a578063252dba421461013a57806327e86d6e1461015b575b600080fd5b34801561010457600080fd5b50425b6040519081526020015b60405180910390f35b61012d610128366004610a85565b6102ba565b6040516101119190610bbe565b61014d610148366004610a85565b6104ef565b604051610111929190610bd8565b34801561016757600080fd5b50437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0140610107565b34801561019d57600080fd5b5046610107565b6101b76101b2366004610c60565b610690565b60405161011193929190610cba565b3480156101d257600080fd5b5048610107565b3480156101e557600080fd5b5043610107565b3480156101f857600080fd5b50610107610207366004610ce2565b73ffffffffffffffffffffffffffffffffffffffff163190565b34801561022d57600080fd5b5044610107565b61012d610242366004610a85565b6106ab565b34801561025357600080fd5b5045610107565b34801561026657600080fd5b50604051418152602001610111565b61012d610283366004610c60565b61085a565b6101b7610296366004610a85565b610a1a565b3480156102a757600080fd5b506101076102b6366004610d18565b4090565b60606000828067ffffffffffffffff8111156102d8576102d8610d31565b60405190808252806020026020018201604052801561031e57816020015b6040805180820190915260008152606060208201528152602001906001900390816102f65790505b5092503660005b8281101561047757600085828151811061034157610341610d60565b6020026020010151905087878381811061035d5761035d610d60565b905060200281019061036f9190610d8f565b6040810135958601959093506103886020850185610ce2565b73ffffffffffffffffffffffffffffffffffffffff16816103ac6060870187610dcd565b6040516103ba929190610e32565b60006040518083038185875af1925050503d80600081146103f7576040519150601f19603f3d011682016040523d82523d6000602084013e6103fc565b606091505b50602080850191909152901515808452908501351761046d577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260846000fd5b5050600101610325565b508234146104e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d756c746963616c6c333a2076616c7565206d69736d6174636800000000000060448201526064015b60405180910390fd5b50505092915050565b436060828067ffffffffffffffff81111561050c5761050c610d31565b60405190808252806020026020018201604052801561053f57816020015b606081526020019060019003908161052a5790505b5091503660005b8281101561068657600087878381811061056257610562610d60565b90506020028101906105749190610e42565b92506105836020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166105a66020850185610dcd565b6040516105b4929190610e32565b6000604051808303816000865af19150503d80600081146105f1576040519150601f19603f3d011682016040523d82523d6000602084013e6105f6565b606091505b5086848151811061060957610609610d60565b602090810291909101015290508061067d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b50600101610546565b5050509250929050565b43804060606106a086868661085a565b905093509350939050565b6060818067ffffffffffffffff8111156106c7576106c7610d31565b60405190808252806020026020018201604052801561070d57816020015b6040805180820190915260008152606060208201528152602001906001900390816106e55790505b5091503660005b828110156104e657600084828151811061073057610730610d60565b6020026020010151905086868381811061074c5761074c610d60565b905060200281019061075e9190610e76565b925061076d6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166107906040850185610dcd565b60405161079e929190610e32565b6000604051808303816000865af19150503d80600081146107db576040519150601f19603f3d011682016040523d82523d6000602084013e6107e0565b606091505b506020808401919091529015158083529084013517610851577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260646000fd5b50600101610714565b6060818067ffffffffffffffff81111561087657610876610d31565b6040519080825280602002602001820160405280156108bc57816020015b6040805180820190915260008152606060208201528152602001906001900390816108945790505b5091503660005b82811015610a105760008482815181106108df576108df610d60565b602002602001015190508686838181106108fb576108fb610d60565b905060200281019061090d9190610e42565b925061091c6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff1661093f6020850185610dcd565b60405161094d929190610e32565b6000604051808303816000865af19150503d806000811461098a576040519150601f19603f3d011682016040523d82523d6000602084013e61098f565b606091505b506020830152151581528715610a07578051610a07576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b506001016108c3565b5050509392505050565b6000806060610a2b60018686610690565b919790965090945092505050565b60008083601f840112610a4b57600080fd5b50813567ffffffffffffffff811115610a6357600080fd5b6020830191508360208260051b8501011115610a7e57600080fd5b9250929050565b60008060208385031215610a9857600080fd5b823567ffffffffffffffff811115610aaf57600080fd5b610abb85828601610a39565b90969095509350505050565b6000815180845260005b81811015610aed57602081850181015186830182015201610ad1565b81811115610aff576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600082825180855260208086019550808260051b84010181860160005b84811015610bb1578583037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001895281518051151584528401516040858501819052610b9d81860183610ac7565b9a86019a9450505090830190600101610b4f565b5090979650505050505050565b602081526000610bd16020830184610b32565b9392505050565b600060408201848352602060408185015281855180845260608601915060608160051b870101935082870160005b82811015610c52577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0888703018452610c40868351610ac7565b95509284019290840190600101610c06565b509398975050505050505050565b600080600060408486031215610c7557600080fd5b83358015158114610c8557600080fd5b9250602084013567ffffffffffffffff811115610ca157600080fd5b610cad86828701610a39565b9497909650939450505050565b838152826020820152606060408201526000610cd96060830184610b32565b95945050505050565b600060208284031215610cf457600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610bd157600080fd5b600060208284031215610d2a57600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81833603018112610dc357600080fd5b9190910192915050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610e0257600080fd5b83018035915067ffffffffffffffff821115610e1d57600080fd5b602001915036819003821315610a7e57600080fd5b8183823760009101908152919050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610dc357600080fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1833603018112610dc357600080fdfea2646970667358221220bb2b5c71a328032f97c676ae39a1ec2148d3e5d6f73d95e9b17910152d61f16264736f6c634300080c0033" +) + +type CallWithValue struct { + Target common.Address + AllowFailure bool + Value *big.Int + CallData []byte +} + +type Call struct { + Target common.Address + AllowFailure bool + CallData []byte +} + +type Result struct { + Success bool + ReturnData []byte +} +type CCIPMsgData struct { + RouterAddr common.Address + ChainSelector uint64 + Msg router.ClientEVM2AnyMessage + Fee *big.Int +} + +func TransferTokenCallData(to common.Address, amount *big.Int) ([]byte, error) { + erc20ABI, err := abi.JSON(strings.NewReader(erc20.ERC20ABI)) + if err != nil { + return nil, err + } + transferToken := erc20ABI.Methods["transfer"] + inputs, err := transferToken.Inputs.Pack(to, amount) + if err != nil { + return nil, err + } + inputs = append(transferToken.ID[:], inputs...) + return inputs, nil +} + +// ApproveTokenCallData returns the call data for approving a token with approve function of erc20 contract +func ApproveTokenCallData(to common.Address, amount *big.Int) ([]byte, error) { + erc20ABI, err := abi.JSON(strings.NewReader(erc20.ERC20ABI)) + if err != nil { + return nil, err + } + approveToken := erc20ABI.Methods["approve"] + inputs, err := approveToken.Inputs.Pack(to, amount) + if err != nil { + return nil, err + } + inputs = append(approveToken.ID[:], inputs...) + return inputs, nil +} + +// CCIPSendCallData returns the call data for sending a CCIP message with ccipSend function of router contract +func CCIPSendCallData(msg CCIPMsgData) ([]byte, error) { + routerABI, err := abi.JSON(strings.NewReader(router.RouterABI)) + if err != nil { + return nil, err + } + ccipSend := routerABI.Methods["ccipSend"] + sendID := ccipSend.ID + inputs, err := ccipSend.Inputs.Pack( + msg.ChainSelector, + msg.Msg, + ) + if err != nil { + return nil, err + } + inputs = append(sendID[:], inputs...) + return inputs, nil +} + +func WaitForSuccessfulTxMined(evmClient blockchain.EVMClient, tx *types.Transaction) error { + log.Info().Str("tx", tx.Hash().Hex()).Msg("waiting for tx to be mined") + receipt, err := bind.WaitMined(context.Background(), evmClient.DeployBackend(), tx) + if err != nil { + return err + } + if receipt.Status != types.ReceiptStatusSuccessful { + // TODO: Add error reason from receipt/tx + return fmt.Errorf("tx failed %s", tx.Hash().Hex()) + } + log.Info().Str("tx", tx.Hash().Hex()).Str("Network", evmClient.GetNetworkName()).Msg("tx mined successfully") + return nil +} + +// MultiCallCCIP sends multiple CCIP messages in a single transaction +// if native is true, it will send msg with native as fee. In this case the msg should be sent with a +// msg.value equivalent to the total fee with the help of aggregate3Value +// +// if native is false, it will send msg with fee in specific feetoken. In this case the msg should be sent without value with the help of aggregate3. +// In both cases, if there are any bridge tokens included in ccip transfer, the amount for corresponding token should be approved to the router contract as spender. +// The approval should be done by calling approval function as part of the call data of aggregate3 or aggregate3Value +// If feetoken is used as fee, the amount for feetoken should be approved to the router contract as spender and should be done as part of the call data of aggregate3 +// In case of native as fee, there is no need for fee amount approval +func MultiCallCCIP( + evmClient blockchain.EVMClient, + address string, + msgData []CCIPMsgData, + native bool, +) (*types.Transaction, error) { + contractAddress := common.HexToAddress(address) + multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return nil, err + } + boundContract := bind.NewBoundContract(contractAddress, multiCallABI, evmClient.Backend(), evmClient.Backend(), evmClient.Backend()) + + // if native, use aggregate3Value to send msg with value + if native { + var callData []CallWithValue + allValue := big.NewInt(0) + // create call data for each msg + for _, msg := range msgData { + if msg.Msg.FeeToken != (common.Address{}) { + return nil, fmt.Errorf("fee token should be %s for native as fee", common.HexToAddress("0x0").Hex()) + } + // approve bridge token + for _, tokenAndAmount := range msg.Msg.TokenAmounts { + inputs, err := ApproveTokenCallData(msg.RouterAddr, tokenAndAmount.Amount) + if err != nil { + return nil, err + } + data := CallWithValue{Target: tokenAndAmount.Token, AllowFailure: false, Value: big.NewInt(0), CallData: inputs} + callData = append(callData, data) + } + inputs, err := CCIPSendCallData(msg) + if err != nil { + return nil, err + } + data := CallWithValue{Target: msg.RouterAddr, AllowFailure: false, Value: msg.Fee, CallData: inputs} + callData = append(callData, data) + allValue.Add(allValue, msg.Fee) + } + + opts, err := evmClient.TransactionOpts(evmClient.GetDefaultWallet()) + if err != nil { + return nil, err + } + // the value of transactionOpts is the sum of the value of all msg, which is the total fee of all ccip-sends + opts.Value = allValue + + // call aggregate3Value to group all msg call data and send them in a single transaction + tx, err := boundContract.Transact(opts, "aggregate3Value", callData) + if err != nil { + return nil, err + } + err = evmClient.MarkTxAsSentOnL2(tx) + if err != nil { + return nil, err + } + err = WaitForSuccessfulTxMined(evmClient, tx) + if err != nil { + return nil, errors.Wrapf(err, "multicall failed for ccip-send; multicall %s", contractAddress.Hex()) + } + return tx, nil + } + // if with feetoken, use aggregate3 to send msg without value + var callData []Call + // create call data for each msg + for _, msg := range msgData { + isFeeTokenAndBridgeTokenSame := false + // approve bridge token + for _, tokenAndAmount := range msg.Msg.TokenAmounts { + var inputs []byte + // if feetoken is same as bridge token, approve total amount including transfer amount + fee amount + if tokenAndAmount.Token == msg.Msg.FeeToken { + isFeeTokenAndBridgeTokenSame = true + inputs, err = ApproveTokenCallData(msg.RouterAddr, new(big.Int).Add(msg.Fee, tokenAndAmount.Amount)) + if err != nil { + return nil, err + } + } else { + inputs, err = ApproveTokenCallData(msg.RouterAddr, tokenAndAmount.Amount) + if err != nil { + return nil, err + } + } + + callData = append(callData, Call{Target: tokenAndAmount.Token, AllowFailure: false, CallData: inputs}) + } + // approve fee token if not already approved + if msg.Fee != nil && msg.Fee.Cmp(big.NewInt(0)) > 0 && !isFeeTokenAndBridgeTokenSame { + inputs, err := ApproveTokenCallData(msg.RouterAddr, msg.Fee) + if err != nil { + return nil, err + } + callData = append(callData, Call{Target: msg.Msg.FeeToken, AllowFailure: false, CallData: inputs}) + } + + inputs, err := CCIPSendCallData(msg) + if err != nil { + return nil, err + } + callData = append(callData, Call{Target: msg.RouterAddr, AllowFailure: false, CallData: inputs}) + } + opts, err := evmClient.TransactionOpts(evmClient.GetDefaultWallet()) + if err != nil { + return nil, err + } + + // call aggregate3 to group all msg call data and send them in a single transaction + tx, err := boundContract.Transact(opts, "aggregate3", callData) + if err != nil { + return nil, err + } + err = WaitForSuccessfulTxMined(evmClient, tx) + if err != nil { + return tx, errors.Wrapf(err, "multicall failed for ccip-send; router %s", contractAddress.Hex()) + } + return tx, nil +} + +func TransferTokens( + evmClient blockchain.EVMClient, + contractAddress common.Address, + tokens []*ERC20Token, +) error { + multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return err + } + var callData []Call + boundContract := bind.NewBoundContract(contractAddress, multiCallABI, evmClient.Backend(), evmClient.Backend(), evmClient.Backend()) + for _, token := range tokens { + var inputs []byte + balance, err := token.BalanceOf(context.Background(), contractAddress.Hex()) + if err != nil { + return err + } + inputs, err = TransferTokenCallData(common.HexToAddress(evmClient.GetDefaultWallet().Address()), balance) + if err != nil { + return err + } + data := Call{Target: token.ContractAddress, AllowFailure: false, CallData: inputs} + callData = append(callData, data) + } + + opts, err := evmClient.TransactionOpts(evmClient.GetDefaultWallet()) + if err != nil { + return err + } + + // call aggregate3 to group all msg call data and send them in a single transaction + tx, err := boundContract.Transact(opts, "aggregate3", callData) + if err != nil { + return err + } + err = WaitForSuccessfulTxMined(evmClient, tx) + if err != nil { + return errors.Wrapf(err, "token transfer failed for token; router %s", contractAddress.Hex()) + } + return nil +} diff --git a/integration-tests/ccip-tests/load/ccip_loadgen.go b/integration-tests/ccip-tests/load/ccip_loadgen.go new file mode 100644 index 00000000000..4ed54a45fdb --- /dev/null +++ b/integration-tests/ccip-tests/load/ccip_loadgen.go @@ -0,0 +1,363 @@ +package load + +import ( + "context" + crypto_rand "crypto/rand" + "encoding/base64" + "fmt" + "math/big" + "strconv" + "testing" + "time" + + "github.com/AlekSi/pointer" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + chain_selectors "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/wasp" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" +) + +// CCIPLaneOptimized is a light-weight version of CCIPLane, It only contains elements which are used during load triggering and validation +type CCIPLaneOptimized struct { + Logger *zerolog.Logger + SourceNetworkName string + DestNetworkName string + Source *actions.SourceCCIPModule + Dest *actions.DestCCIPModule + Reports *testreporters.CCIPLaneStats +} + +type CCIPE2ELoad struct { + t *testing.T + Lane *CCIPLaneOptimized + NoOfReq int64 // approx no of Request fired + CurrentMsgSerialNo *atomic.Int64 // current msg serial number in the load sequence + CallTimeOut time.Duration // max time to wait for various on-chain events + msg router.ClientEVM2AnyMessage + MaxDataBytes uint32 + SendMaxDataIntermittentlyInMsgCount int64 + SkipRequestIfAnotherRequestTriggeredWithin *config.Duration + LastFinalizedTxBlock atomic.Uint64 + LastFinalizedTimestamp atomic.Time + MsgProfiles *testconfig.MsgProfile + EOAReceiver []byte +} + +func NewCCIPLoad( + t *testing.T, + lane *actions.CCIPLane, + timeout time.Duration, + noOfReq int64, + m *testconfig.MsgProfile, + sendMaxDataIntermittentlyInEveryMsgCount int64, + SkipRequestIfAnotherRequestTriggeredWithin *config.Duration, +) *CCIPE2ELoad { + // to avoid holding extra data + loadLane := &CCIPLaneOptimized{ + Logger: lane.Logger, + SourceNetworkName: lane.SourceNetworkName, + DestNetworkName: lane.DestNetworkName, + Source: lane.Source, + Dest: lane.Dest, + Reports: lane.Reports, + } + + return &CCIPE2ELoad{ + t: t, + Lane: loadLane, + CurrentMsgSerialNo: atomic.NewInt64(1), + CallTimeOut: timeout, + NoOfReq: noOfReq, + SendMaxDataIntermittentlyInMsgCount: sendMaxDataIntermittentlyInEveryMsgCount, + SkipRequestIfAnotherRequestTriggeredWithin: SkipRequestIfAnotherRequestTriggeredWithin, + MsgProfiles: m, + } +} + +// BeforeAllCall funds subscription, approves the token transfer amount. +// Needs to be called before load sequence is started. +// Needs to approve and fund for the entire sequence. +func (c *CCIPE2ELoad) BeforeAllCall() { + sourceCCIP := c.Lane.Source + destCCIP := c.Lane.Dest + + receiver, err := utils.ABIEncode(`[{"type":"address"}]`, destCCIP.ReceiverDapp.EthAddress) + require.NoError(c.t, err, "Failed encoding the receiver address") + c.msg = router.ClientEVM2AnyMessage{ + Receiver: receiver, + FeeToken: common.HexToAddress(sourceCCIP.Common.FeeToken.Address()), + Data: []byte("message with Id 1"), + } + var tokenAndAmounts []router.ClientEVMTokenAmount + if len(c.Lane.Source.Common.BridgeTokens) > 0 { + for i := range c.Lane.Source.TransferAmount { + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + token := sourceCCIP.Common.BridgeTokens[0] + if i < len(sourceCCIP.Common.BridgeTokens) { + token = sourceCCIP.Common.BridgeTokens[i] + } + tokenAndAmounts = append(tokenAndAmounts, router.ClientEVMTokenAmount{ + Token: common.HexToAddress(token.Address()), Amount: c.Lane.Source.TransferAmount[i], + }) + } + c.msg.TokenAmounts = tokenAndAmounts + } + // we might need to change the receiver to the default wallet of destination based on the gaslimit of msg + // Get the receiver's bytecode to check if it's a contract or EOA + bytecode, err := c.Lane.Dest.Common.ChainClient.Backend().CodeAt(context.Background(), c.Lane.Dest.ReceiverDapp.EthAddress, nil) + require.NoError(c.t, err, "Failed to get bytecode of the receiver contract") + // if the bytecode is empty, it's an EOA, + // In that case save the receiver address as EOA to be used in the message + // Otherwise save destination's default wallet address as EOA + // so that it can be used later for msgs with gaslimit 0 + if len(bytecode) > 0 { + receiver, err := utils.ABIEncode(`[{"type":"address"}]`, common.HexToAddress(c.Lane.Dest.Common.ChainClient.GetDefaultWallet().Address())) + require.NoError(c.t, err, "Failed encoding the receiver address") + c.EOAReceiver = receiver + } else { + c.EOAReceiver = c.msg.Receiver + } + if c.SendMaxDataIntermittentlyInMsgCount > 0 { + c.MaxDataBytes, err = sourceCCIP.OnRamp.Instance.GetDynamicConfig(nil) + require.NoError(c.t, err, "failed to fetch dynamic config") + } + // if the msg is sent via multicall, transfer the token transfer amount to multicall contract + if sourceCCIP.Common.MulticallEnabled && + sourceCCIP.Common.MulticallContract != (common.Address{}) && + len(c.Lane.Source.Common.BridgeTokens) > 0 { + for i, amount := range sourceCCIP.TransferAmount { + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + token := sourceCCIP.Common.BridgeTokens[0] + if i < len(sourceCCIP.Common.BridgeTokens) { + token = sourceCCIP.Common.BridgeTokens[i] + } + amountToApprove := new(big.Int).Mul(amount, big.NewInt(c.NoOfReq)) + bal, err := token.BalanceOf(context.Background(), sourceCCIP.Common.MulticallContract.Hex()) + require.NoError(c.t, err, "Failed to get token balance") + if bal.Cmp(amountToApprove) < 0 { + err := token.Transfer(token.OwnerWallet, sourceCCIP.Common.MulticallContract.Hex(), amountToApprove) + require.NoError(c.t, err, "Failed to approve token transfer amount") + } + } + } + + c.LastFinalizedTxBlock.Store(c.Lane.Source.NewFinalizedBlockNum.Load()) + c.LastFinalizedTimestamp.Store(c.Lane.Source.NewFinalizedBlockTimestamp.Load()) + + sourceCCIP.Common.ChainClient.ParallelTransactions(false) + destCCIP.Common.ChainClient.ParallelTransactions(false) +} + +func (c *CCIPE2ELoad) CCIPMsg() (router.ClientEVM2AnyMessage, *testreporters.RequestStat, error) { + msgSerialNo := c.CurrentMsgSerialNo.Load() + c.CurrentMsgSerialNo.Inc() + msgDetails := c.MsgProfiles.MsgDetailsForIteration(msgSerialNo) + stats := testreporters.NewCCIPRequestStats(msgSerialNo, c.Lane.SourceNetworkName, c.Lane.DestNetworkName) + // form the message for transfer + msgLength := pointer.GetInt64(msgDetails.DataLength) + gasLimit := pointer.GetInt64(msgDetails.DestGasLimit) + msg := c.msg + if msgLength > 0 && msgDetails.IsDataTransfer() { + if c.SendMaxDataIntermittentlyInMsgCount > 0 { + // every SendMaxDataIntermittentlyInMsgCount message will have extra data with almost MaxDataBytes + if msgSerialNo%c.SendMaxDataIntermittentlyInMsgCount == 0 { + msgLength = int64(c.MaxDataBytes - 1) + } + } + b := make([]byte, msgLength) + _, err := crypto_rand.Read(b) + if err != nil { + return router.ClientEVM2AnyMessage{}, stats, fmt.Errorf("failed to generate random string %w", err) + } + randomString := base64.URLEncoding.EncodeToString(b) + msg.Data = []byte(randomString[:msgLength]) + } + if !msgDetails.IsTokenTransfer() { + msg.TokenAmounts = []router.ClientEVMTokenAmount{} + } + extraArgsV1, err := testhelpers.GetEVMExtraArgsV1(big.NewInt(gasLimit), false) + if err != nil { + return router.ClientEVM2AnyMessage{}, stats, err + } + msg.ExtraArgs = extraArgsV1 + // if gaslimit is 0, set the receiver to EOA + if gasLimit == 0 { + msg.Receiver = c.EOAReceiver + } + return msg, stats, nil +} + +func (c *CCIPE2ELoad) Call(_ *wasp.Generator) *wasp.Response { + res := &wasp.Response{} + sourceCCIP := c.Lane.Source + recentRequestFoundAt := sourceCCIP.IsRequestTriggeredWithinTimeframe(c.SkipRequestIfAnotherRequestTriggeredWithin) + if recentRequestFoundAt != nil { + c.Lane.Logger. + Info(). + Str("Found At=", recentRequestFoundAt.String()). + Msgf("Skipping ...Another Request found within given timeframe %s", c.SkipRequestIfAnotherRequestTriggeredWithin.String()) + return res + } + // if there is an connection error , we will skip sending the request + // this is to avoid sending the request when the connection is not restored yet + if sourceCCIP.Common.IsConnectionRestoredRecently != nil { + if !sourceCCIP.Common.IsConnectionRestoredRecently.Load() { + c.Lane.Logger.Info().Msg("RPC Connection Error.. skipping this request") + res.Failed = true + res.Error = "RPC Connection error .. this request was skipped" + return res + } + c.Lane.Logger.Info().Msg("Connection is restored, Resuming load") + } + msg, stats, err := c.CCIPMsg() + if err != nil { + res.Error = err.Error() + res.Failed = true + return res + } + msgSerialNo := stats.ReqNo + // create a sub-logger for the request + lggr := c.Lane.Logger.With().Int64("msg Number", stats.ReqNo).Logger() + + feeToken := sourceCCIP.Common.FeeToken.EthAddress + // initiate the transfer + lggr.Debug().Str("triggeredAt", time.Now().GoString()).Msg("triggering transfer") + var sendTx *types.Transaction + + destChainSelector, err := chain_selectors.SelectorFromChainId(sourceCCIP.DestinationChainId) + if err != nil { + res.Error = fmt.Sprintf("reqNo %d err %s - while getting selector from chainid", msgSerialNo, err.Error()) + res.Failed = true + return res + } + + // initiate the transfer + // if the token address is 0x0 it will use Native as fee token and the fee amount should be mentioned in bind.TransactOpts's value + fee, err := sourceCCIP.Common.Router.GetFee(destChainSelector, msg) + if err != nil { + res.Error = fmt.Sprintf("reqNo %d err %s - while getting fee from router", msgSerialNo, err.Error()) + res.Failed = true + return res + } + startTime := time.Now().UTC() + if feeToken != common.HexToAddress("0x0") { + sendTx, err = sourceCCIP.Common.Router.CCIPSendAndProcessTx(destChainSelector, msg, nil) + } else { + // add a bit buffer to fee + sendTx, err = sourceCCIP.Common.Router.CCIPSendAndProcessTx(destChainSelector, msg, new(big.Int).Add(big.NewInt(1e5), fee)) + } + if err != nil { + stats.UpdateState(&lggr, 0, testreporters.TX, time.Since(startTime), testreporters.Failure, nil) + res.Error = fmt.Sprintf("ccip-send tx error %s for reqNo %d", err.Error(), msgSerialNo) + res.Data = stats.StatusByPhase + res.Failed = true + return res + } + + // the msg is no longer needed, so we can clear it to avoid holding extra data during load + // nolint:ineffassign,staticcheck + msg = router.ClientEVM2AnyMessage{} + + txConfirmationTime := time.Now().UTC() + lggr = lggr.With().Str("Msg Tx", sendTx.Hash().String()).Logger() + + stats.UpdateState(&lggr, 0, testreporters.TX, txConfirmationTime.Sub(startTime), testreporters.Success, nil) + err = c.Validate(lggr, sendTx, txConfirmationTime, []*testreporters.RequestStat{stats}) + if err != nil { + res.Error = err.Error() + res.Failed = true + res.Data = stats.StatusByPhase + return res + } + res.Data = stats.StatusByPhase + return res +} + +func (c *CCIPE2ELoad) Validate(lggr zerolog.Logger, sendTx *types.Transaction, txConfirmationTime time.Time, stats []*testreporters.RequestStat) error { + // wait for + // - CCIPSendRequested Event log to be generated, + msgLogs, sourceLogTime, err := c.Lane.Source.AssertEventCCIPSendRequested(&lggr, sendTx.Hash().Hex(), c.CallTimeOut, txConfirmationTime, stats) + if err != nil { + return err + } + + lstFinalizedBlock := c.LastFinalizedTxBlock.Load() + var sourceLogFinalizedAt time.Time + // if the finality tag is enabled and the last finalized block is greater than the block number of the message + // consider the message finalized + if c.Lane.Source.Common.ChainClient.GetNetworkConfig().FinalityDepth == 0 && + lstFinalizedBlock != 0 && lstFinalizedBlock > msgLogs[0].LogInfo.BlockNumber { + sourceLogFinalizedAt = c.LastFinalizedTimestamp.Load() + for i, stat := range stats { + stat.UpdateState(&lggr, stat.SeqNum, testreporters.SourceLogFinalized, + sourceLogFinalizedAt.Sub(sourceLogTime), testreporters.Success, + &testreporters.TransactionStats{ + TxHash: msgLogs[i].LogInfo.TxHash.Hex(), + FinalizedByBlock: strconv.FormatUint(lstFinalizedBlock, 10), + FinalizedAt: sourceLogFinalizedAt.String(), + Fee: msgLogs[i].Fee.String(), + NoOfTokensSent: msgLogs[i].NoOfTokens, + MessageBytesLength: int64(msgLogs[i].DataLength), + MsgID: fmt.Sprintf("0x%x", msgLogs[i].MessageId[:]), + }) + } + } else { + var finalizingBlock uint64 + sourceLogFinalizedAt, finalizingBlock, err = c.Lane.Source.AssertSendRequestedLogFinalized( + &lggr, msgLogs[0].LogInfo.TxHash, msgLogs, sourceLogTime, stats) + if err != nil { + return err + } + c.LastFinalizedTxBlock.Store(finalizingBlock) + c.LastFinalizedTimestamp.Store(sourceLogFinalizedAt) + } + + for _, msgLog := range msgLogs { + seqNum := msgLog.SequenceNumber + var reqStat *testreporters.RequestStat + lggr = lggr.With().Str("MsgID", fmt.Sprintf("0x%x", msgLog.MessageId[:])).Logger() + for _, stat := range stats { + if stat.SeqNum == seqNum { + reqStat = stat + break + } + } + if reqStat == nil { + return fmt.Errorf("could not find request stat for seq number %d", seqNum) + } + // wait for + // - CommitStore to increase the seq number, + err = c.Lane.Dest.AssertSeqNumberExecuted(&lggr, seqNum, c.CallTimeOut, sourceLogFinalizedAt, reqStat) + if err != nil { + return err + } + // wait for ReportAccepted event + commitReport, reportAcceptedAt, err := c.Lane.Dest.AssertEventReportAccepted(&lggr, seqNum, c.CallTimeOut, sourceLogFinalizedAt, reqStat) + if err != nil || commitReport == nil { + return err + } + blessedAt, err := c.Lane.Dest.AssertReportBlessed(&lggr, seqNum, c.CallTimeOut, *commitReport, reportAcceptedAt, reqStat) + if err != nil { + return err + } + _, err = c.Lane.Dest.AssertEventExecutionStateChanged(&lggr, seqNum, c.CallTimeOut, blessedAt, reqStat, testhelpers.ExecutionStateSuccess) + if err != nil { + return err + } + } + + return nil +} diff --git a/integration-tests/ccip-tests/load/ccip_multicall_loadgen.go b/integration-tests/ccip-tests/load/ccip_multicall_loadgen.go new file mode 100644 index 00000000000..ad3960dee2e --- /dev/null +++ b/integration-tests/ccip-tests/load/ccip_multicall_loadgen.go @@ -0,0 +1,271 @@ +package load + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/common/model" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + chain_selectors "github.com/smartcontractkit/chain-selectors" + "github.com/smartcontractkit/wasp" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" +) + +// CCIPMultiCallLoadGenerator represents a load generator for the CCIP lanes originating from same network +// The purpose of this load generator is to group ccip-send calls for the CCIP lanes originating from same network +// This is to avoid the scenario of hitting rpc rate limit for the same network if the load generator is sending +// too many ccip-send calls to the same network hitting the rpc rate limit +type CCIPMultiCallLoadGenerator struct { + t *testing.T + logger zerolog.Logger + client blockchain.EVMClient + E2ELoads map[string]*CCIPE2ELoad + MultiCall string + NoOfRequestsPerUnitTime int64 + labels model.LabelSet + loki *wasp.LokiClient + responses chan map[string]MultiCallReturnValues + Done chan struct{} +} + +type MultiCallReturnValues struct { + Msgs []contracts.CCIPMsgData + Stats []*testreporters.RequestStat +} + +func NewMultiCallLoadGenerator(testCfg *testsetups.CCIPTestConfig, lanes []*actions.CCIPLane, noOfRequestsPerUnitTime int64, labels map[string]string) (*CCIPMultiCallLoadGenerator, error) { + // check if all lanes are from same network + source := lanes[0].Source.Common.ChainClient.GetChainID() + multiCall := lanes[0].Source.Common.MulticallContract.Hex() + if multiCall == "" { + return nil, fmt.Errorf("multicall address cannot be empty") + } + for i := 1; i < len(lanes); i++ { + if source.String() != lanes[i].Source.Common.ChainClient.GetChainID().String() { + return nil, fmt.Errorf("all lanes should be from same network; expected %s, got %s", source, lanes[i].Source.Common.ChainClient.GetChainID()) + } + if lanes[i].Source.Common.MulticallContract.Hex() != multiCall { + return nil, fmt.Errorf("multicall address should be same for all lanes") + } + } + client := lanes[0].Source.Common.ChainClient + lggr := logging.GetTestLogger(testCfg.Test).With().Str("Source Network", client.GetNetworkName()).Logger() + ls := wasp.LabelsMapToModel(labels) + if err := ls.Validate(); err != nil { + return nil, err + } + lokiConfig := testCfg.EnvInput.Logging.Loki + loki, err := wasp.NewLokiClient(wasp.NewLokiConfig(lokiConfig.Endpoint, lokiConfig.TenantId, nil, nil)) + if err != nil { + return nil, err + } + m := &CCIPMultiCallLoadGenerator{ + t: testCfg.Test, + client: client, + MultiCall: multiCall, + logger: lggr, + NoOfRequestsPerUnitTime: noOfRequestsPerUnitTime, + E2ELoads: make(map[string]*CCIPE2ELoad), + labels: ls, + loki: loki, + responses: make(chan map[string]MultiCallReturnValues), + Done: make(chan struct{}), + } + for _, lane := range lanes { + // for multicall load generator, we don't want to send max data intermittently, it might + // cause oversized data for multicall + ccipLoad := NewCCIPLoad( + testCfg.Test, lane, testCfg.TestGroupInput.PhaseTimeout.Duration(), + 100000, + testCfg.TestGroupInput.LoadProfile.MsgProfile, 0, + testCfg.TestGroupInput.LoadProfile.SkipRequestIfAnotherRequestTriggeredWithin, + ) + ccipLoad.BeforeAllCall() + m.E2ELoads[fmt.Sprintf("%s-%s", lane.SourceNetworkName, lane.DestNetworkName)] = ccipLoad + } + + m.StartLokiStream() + return m, nil +} + +func (m *CCIPMultiCallLoadGenerator) Stop() error { + m.Done <- struct{}{} + tokenMap := make(map[string]struct{}) + var tokens []*contracts.ERC20Token + for _, e2eLoad := range m.E2ELoads { + for i := range e2eLoad.Lane.Source.TransferAmount { + // if length of sourceCCIP.TransferAmount is more than available bridge token use first bridge token + token := e2eLoad.Lane.Source.Common.BridgeTokens[0] + if i < len(e2eLoad.Lane.Source.Common.BridgeTokens) { + token = e2eLoad.Lane.Source.Common.BridgeTokens[i] + } + if _, ok := tokenMap[token.Address()]; !ok { + tokens = append(tokens, e2eLoad.Lane.Source.Common.BridgeTokens[i]) + } + } + } + if len(tokens) > 0 { + return contracts.TransferTokens(m.client, common.HexToAddress(m.MultiCall), tokens) + } + return nil +} + +func (m *CCIPMultiCallLoadGenerator) StartLokiStream() { + go func() { + for { + select { + case <-m.Done: + m.logger.Info().Msg("stopping loki client from multi call load generator") + m.loki.Stop() + return + case rValues := <-m.responses: + m.HandleLokiLogs(rValues) + } + } + }() +} + +func (m *CCIPMultiCallLoadGenerator) HandleLokiLogs(rValues map[string]MultiCallReturnValues) { + for dest, rValue := range rValues { + labels := m.labels.Merge(model.LabelSet{ + "dest_chain": model.LabelValue(dest), + "test_data_type": "responses", + "go_test_name": model.LabelValue(m.t.Name()), + }) + for _, stat := range rValue.Stats { + err := m.loki.HandleStruct(labels, time.Now().UTC(), stat.StatusByPhase) + if err != nil { + m.logger.Error().Err(err).Msg("error while handling loki logs") + } + } + } +} + +func (m *CCIPMultiCallLoadGenerator) Call(_ *wasp.Generator) *wasp.Response { + res := &wasp.Response{} + msgs, returnValuesByDest, err := m.MergeCalls() + if err != nil { + res.Error = err.Error() + res.Failed = true + return res + } + defer func() { + m.responses <- returnValuesByDest + }() + m.logger.Info().Interface("msgs", msgs).Msgf("Sending %d ccip-send calls", len(msgs)) + startTime := time.Now().UTC() + // for now we are using all ccip-sends with native + sendTx, err := contracts.MultiCallCCIP(m.client, m.MultiCall, msgs, true) + if err != nil { + res.Error = err.Error() + res.Failed = true + return res + } + + lggr := m.logger.With().Str("Msg Tx", sendTx.Hash().String()).Logger() + txConfirmationTime := time.Now().UTC() + for _, rValues := range returnValuesByDest { + if len(rValues.Stats) != len(rValues.Msgs) { + res.Error = fmt.Sprintf("number of stats %d and msgs %d should be same", len(rValues.Stats), len(rValues.Msgs)) + res.Failed = true + return res + } + for _, stat := range rValues.Stats { + stat.UpdateState(&lggr, 0, testreporters.TX, startTime.Sub(txConfirmationTime), testreporters.Success, nil) + } + } + + validateGrp := errgroup.Group{} + // wait for + // - CCIPSendRequested Event log to be generated, + for _, rValues := range returnValuesByDest { + key := fmt.Sprintf("%s-%s", rValues.Stats[0].SourceNetwork, rValues.Stats[0].DestNetwork) + c, ok := m.E2ELoads[key] + if !ok { + res.Error = fmt.Sprintf("load for %s not found", key) + res.Failed = true + return res + } + + lggr = lggr.With().Str("Source Network", c.Lane.Source.Common.ChainClient.GetNetworkName()).Str("Dest Network", c.Lane.Dest.Common.ChainClient.GetNetworkName()).Logger() + stats := rValues.Stats + txConfirmationTime := txConfirmationTime + sendTx := sendTx + lggr := lggr + validateGrp.Go(func() error { + return c.Validate(lggr, sendTx, txConfirmationTime, stats) + }) + } + err = validateGrp.Wait() + if err != nil { + res.Error = err.Error() + res.Failed = true + return res + } + + return res +} + +func (m *CCIPMultiCallLoadGenerator) MergeCalls() ([]contracts.CCIPMsgData, map[string]MultiCallReturnValues, error) { + var ccipMsgs []contracts.CCIPMsgData + statDetails := make(map[string]MultiCallReturnValues) + + for _, e2eLoad := range m.E2ELoads { + destChainSelector, err := chain_selectors.SelectorFromChainId(e2eLoad.Lane.Source.DestinationChainId) + if err != nil { + return ccipMsgs, statDetails, err + } + + allFee := big.NewInt(0) + var allStatsForDest []*testreporters.RequestStat + var allMsgsForDest []contracts.CCIPMsgData + for i := int64(0); i < m.NoOfRequestsPerUnitTime; i++ { + msg, stats, err := e2eLoad.CCIPMsg() + if err != nil { + return ccipMsgs, statDetails, err + } + msg.FeeToken = common.Address{} + fee, err := e2eLoad.Lane.Source.Common.Router.GetFee(destChainSelector, msg) + if err != nil { + return ccipMsgs, statDetails, err + } + // transfer fee to the multicall address + if msg.FeeToken != (common.Address{}) { + allFee = new(big.Int).Add(allFee, fee) + } + msgData := contracts.CCIPMsgData{ + RouterAddr: e2eLoad.Lane.Source.Common.Router.EthAddress, + ChainSelector: destChainSelector, + Msg: msg, + Fee: fee, + } + ccipMsgs = append(ccipMsgs, msgData) + + allStatsForDest = append(allStatsForDest, stats) + allMsgsForDest = append(allMsgsForDest, msgData) + } + statDetails[e2eLoad.Lane.DestNetworkName] = MultiCallReturnValues{ + Stats: allStatsForDest, + Msgs: allMsgsForDest, + } + // transfer fee to the multicall address + if allFee.Cmp(big.NewInt(0)) > 0 { + if err := e2eLoad.Lane.Source.Common.FeeToken.Transfer(e2eLoad.Lane.Source.Common.MulticallContract.Hex(), allFee); err != nil { + return ccipMsgs, statDetails, err + } + } + } + return ccipMsgs, statDetails, nil +} diff --git a/integration-tests/ccip-tests/load/ccip_test.go b/integration-tests/ccip-tests/load/ccip_test.go new file mode 100644 index 00000000000..0d14549ec96 --- /dev/null +++ b/integration-tests/ccip-tests/load/ccip_test.go @@ -0,0 +1,331 @@ +package load + +import ( + "testing" + "time" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" +) + +func TestLoadCCIPStableRPS(t *testing.T) { + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr) + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + testArgs.TriggerLoadByLane() + testArgs.Wait() +} + +// TestLoadCCIPWithUpgradeNodeVersion starts all nodes with a specific version, triggers load and then upgrades the node version as the load is running +func TestLoadCCIPWithUpgradeNodeVersion(t *testing.T) { + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr) + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + testArgs.TriggerLoadByLane() + testArgs.lggr.Info().Msg("Waiting for load to start on all lanes") + // wait for load runner to start + testArgs.LoadStarterWg.Wait() + // sleep for 30s to let load run for a while + time.Sleep(30 * time.Second) + // upgrade node version for few nodes + err := testsetups.UpgradeNodes(testArgs.t, testArgs.lggr, testArgs.TestCfg, testArgs.TestSetupArgs.Env) + require.NoError(t, err) + // after upgrade send a request to all lanes as a sanity check + testArgs.SanityCheck() + // now wait for the load to finish + testArgs.Wait() +} + +func TestLoadCCIPStableRPSTriggerBySource(t *testing.T) { + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr) + testArgs.TestCfg.TestGroupInput.MulticallInOneTx = ptr.Ptr(true) + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + testArgs.TearDown() + }) + testArgs.TriggerLoadBySource() + testArgs.Wait() +} + +func TestLoadCCIPStableRequestTriggeringWithNetworkChaos(t *testing.T) { + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr) + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + testEnv := testArgs.TestSetupArgs.Env + require.NotNil(t, testEnv) + require.NotNil(t, testEnv.K8Env) + + // apply network chaos so that chainlink's RPC calls are affected by some network delay for the duration of the test + var gethNetworksLabels []string + for _, net := range testArgs.TestCfg.SelectedNetworks { + gethNetworksLabels = append(gethNetworksLabels, actions.GethLabel(net.Name)) + } + testEnv.ChaosLabelForAllGeth(t, gethNetworksLabels) + if testArgs.TestCfg.TestGroupInput.LoadProfile.NetworkChaosDelay == nil { + testArgs.TestCfg.TestGroupInput.LoadProfile.NetworkChaosDelay = config.MustNewDuration(200 * time.Millisecond) + } + chaosId, err := testEnv.K8Env.Chaos.Run( + chaos.NewNetworkLatency( + testEnv.K8Env.Cfg.Namespace, &chaos.Props{ + FromLabels: &map[string]*string{"geth": ptr.Ptr(actions.ChaosGroupCCIPGeth)}, + ToLabels: &map[string]*string{"app": ptr.Ptr("chainlink-0")}, + DurationStr: testArgs.TestCfg.TestGroupInput.LoadProfile.TestDuration.String(), + Delay: testArgs.TestCfg.TestGroupInput.LoadProfile.NetworkChaosDelay.Duration().String(), + })) + require.NoError(t, err) + + t.Cleanup(func() { + if chaosId != "" { + require.NoError(t, testEnv.K8Env.Chaos.Stop(chaosId)) + } + }) + + // now trigger the load + testArgs.TriggerLoadByLane() + testArgs.Wait() +} + +// This test applies pod chaos to the CL nodes asynchronously and sequentially while the load is running +// the pod chaos is applied at a regular interval throughout the test duration +// this test needs to be run for a longer duration to see the effects of pod chaos +// in this test commit and execution are set up to be on the same node +func TestLoadCCIPStableWithMajorityNodeFailure(t *testing.T) { + t.Parallel() + + inputs := []ChaosConfig{ + { + ChaosName: "CCIP works after majority of CL nodes are recovered from pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: ptr.Ptr("1")}, + DurationStr: "2m", + }, + }, + } + + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr, inputs...) + + var allChaosDur time.Duration + // to override the default duration of chaos with test input + for i := range inputs { + inputs[i].ChaosProps.DurationStr = testArgs.TestCfg.TestGroupInput.ChaosDuration.String() + allChaosDur += testArgs.TestCfg.TestGroupInput.ChaosDuration.Duration() + inputs[i].WaitBetweenChaos = testArgs.TestCfg.TestGroupInput.LoadProfile.WaitBetweenChaosDuringLoad.Duration() + allChaosDur += inputs[i].WaitBetweenChaos + } + + // the duration of load test should be greater than the duration of chaos + if testArgs.TestCfg.TestGroupInput.LoadProfile.TestDuration.Duration() < allChaosDur+2*time.Minute { + t.Fatalf("Skipping the test as the test duration is less than the chaos duration") + } + + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + + testEnv := testArgs.TestSetupArgs.Env + require.NotNil(t, testEnv) + require.NotNil(t, testEnv.K8Env) + + testArgs.TriggerLoadByLane() + testArgs.ApplyChaos() + testArgs.Wait() +} + +// This test applies pod chaos to the CL nodes asynchronously and sequentially while the load is running +// the pod chaos is applied at a regular interval throughout the test duration +// this test needs to be run for a longer duration to see the effects of pod chaos +// in this test commit and execution are set up to be on the same node +func TestLoadCCIPStableWithMinorityNodeFailure(t *testing.T) { + t.Parallel() + + inputs := []ChaosConfig{ + { + ChaosName: "CCIP works while minority of CL nodes are in failed state for pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: ptr.Ptr("1")}, + DurationStr: "4m", + }, + }, + } + + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr, inputs...) + + var allChaosDur time.Duration + // to override the default duration of chaos with test input + for i := range inputs { + inputs[i].ChaosProps.DurationStr = testArgs.TestCfg.TestGroupInput.ChaosDuration.String() + allChaosDur += testArgs.TestCfg.TestGroupInput.ChaosDuration.Duration() + inputs[i].WaitBetweenChaos = testArgs.TestCfg.TestGroupInput.LoadProfile.WaitBetweenChaosDuringLoad.Duration() + allChaosDur += inputs[i].WaitBetweenChaos + } + + // the duration of load test should be greater than the duration of chaos + if testArgs.TestCfg.TestGroupInput.LoadProfile.TestDuration.Duration() < allChaosDur+2*time.Minute { + t.Fatalf("Skipping the test as the test duration is less than the chaos duration") + } + + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + + testEnv := testArgs.TestSetupArgs.Env + require.NotNil(t, testEnv) + require.NotNil(t, testEnv.K8Env) + + testArgs.TriggerLoadByLane() + testArgs.ApplyChaos() + testArgs.Wait() +} + +// This test applies pod chaos to the CL nodes asynchronously and sequentially while the load is running +// the pod chaos is applied at a regular interval throughout the test duration +// in this test commit and execution are set up to be on different node +func TestLoadCCIPStableWithPodChaosDiffCommitAndExec(t *testing.T) { + t.Parallel() + inputs := []ChaosConfig{ + { + ChaosName: "CCIP Commit works after majority of CL nodes are recovered from pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaultyPlus: ptr.Ptr("1")}, + DurationStr: "2m", + }, + }, + { + ChaosName: "CCIP Execution works after majority of CL nodes are recovered from pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaultyPlus: ptr.Ptr("1")}, + DurationStr: "2m", + }, + }, + { + ChaosName: "CCIP Commit works while minority of CL nodes are in failed state for pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupCommitFaulty: ptr.Ptr("1")}, + DurationStr: "4m", + }, + }, + { + ChaosName: "CCIP Execution works while minority of CL nodes are in failed state for pod failure @pod-chaos", + ChaosFunc: chaos.NewFailPods, + ChaosProps: &chaos.Props{ + LabelsSelector: &map[string]*string{actions.ChaosGroupExecutionFaulty: ptr.Ptr("1")}, + DurationStr: "4m", + }, + }, + } + for _, in := range inputs { + in := in + t.Run(in.ChaosName, func(t *testing.T) { + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr, in) + testArgs.TestCfg.TestGroupInput.LoadProfile.TestDuration = config.MustNewDuration(5 * time.Minute) + testArgs.TestCfg.TestGroupInput.LoadProfile.TimeUnit = config.MustNewDuration(1 * time.Second) + testArgs.TestCfg.TestGroupInput.LoadProfile.RequestPerUnitTime = []int64{2} + testArgs.TestCfg.TestGroupInput.PhaseTimeout = config.MustNewDuration(15 * time.Minute) + + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + testArgs.SanityCheck() + testArgs.TriggerLoadByLane() + testArgs.ApplyChaos() + testArgs.Wait() + }) + } +} + +// TestLoadCCIPStableRPSAfterARMCurseAndUncurse validates that after ARM curse is lifted +// all pending requests get delivered. +// The test pauses loadgen while ARM is cursed and resumes it when curse is lifted. +// There is a known limitation of this test - if the test is run on remote-runner with high frequency +// the remote-runner pod gets evicted after the loadgen is resumed. +// The recommended frequency for this test 2req/min +func TestLoadCCIPStableRPSAfterARMCurseAndUncurse(t *testing.T) { + t.Skipf("need to be enabled as part of CCIP-2277") + t.Parallel() + lggr := logging.GetTestLogger(t) + testArgs := NewLoadArgs(t, lggr) + testArgs.Setup() + // if the test runs on remote runner + if len(testArgs.TestSetupArgs.Lanes) == 0 { + return + } + t.Cleanup(func() { + log.Info().Msg("Tearing down the environment") + require.NoError(t, testArgs.TestSetupArgs.TearDown()) + }) + testArgs.TriggerLoadByLane() + // wait for certain time so that few messages are sent + time.Sleep(2 * time.Minute) + // now validate the curse + testArgs.ValidateCurseFollowedByUncurse() + testArgs.Wait() +} diff --git a/integration-tests/ccip-tests/load/helper.go b/integration-tests/ccip-tests/load/helper.go new file mode 100644 index 00000000000..9522a6c346b --- /dev/null +++ b/integration-tests/ccip-tests/load/helper.go @@ -0,0 +1,483 @@ +package load + +import ( + "context" + "fmt" + "math" + "math/big" + "strings" + "sync" + "testing" + "time" + + "github.com/AlekSi/pointer" + "github.com/rs/zerolog" + "github.com/smartcontractkit/wasp" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos" + "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" +) + +type ChaosConfig struct { + ChaosName string + ChaosFunc chaos.ManifestFunc + ChaosProps *chaos.Props + WaitBetweenChaos time.Duration +} + +// WaspSchedule calculates the load schedule based on the provided request per unit time and duration +// if multiple step durations are provided, it will calculate the schedule based on the step duration and +// corresponding request per unit time by matching the index of the request per unit time and step duration slice +func WaspSchedule(rps []int64, duration *config.Duration, steps []*config.Duration) []*wasp.Segment { + var segments []*wasp.Segment + var segmentDuration time.Duration + + if len(rps) > 1 { + for i, req := range rps { + duration := steps[i].Duration() + segmentDuration += duration + segments = append(segments, wasp.Plain(req, duration)...) + } + totalDuration := duration.Duration() + repeatTimes := totalDuration.Seconds() / segmentDuration.Seconds() + return wasp.CombineAndRepeat(int(math.Round(repeatTimes)), segments) + } + return wasp.Plain(rps[0], duration.Duration()) +} + +type LoadArgs struct { + t *testing.T + Ctx context.Context + lggr *zerolog.Logger + RunnerWg *errgroup.Group // to wait on individual load generators run + LoadStarterWg *sync.WaitGroup // waits for all the runners to start + TestCfg *testsetups.CCIPTestConfig + TestSetupArgs *testsetups.CCIPTestSetUpOutputs + ChaosExps []ChaosConfig + LoadgenTearDowns []func() + Labels map[string]string + pauseLoad *atomic.Bool +} + +func (l *LoadArgs) SetReportParams() { + var qParams []string + for k, v := range l.Labels { + qParams = append(qParams, fmt.Sprintf("var-%s=%s", k, v)) + } + // add one of the source and destination network to the grafana query params + if len(l.TestSetupArgs.Lanes) > 0 { + qParams = append(qParams, fmt.Sprintf("var-source_chain=%s", l.TestSetupArgs.Lanes[0].ForwardLane.SourceNetworkName)) + qParams = append(qParams, fmt.Sprintf("var-dest_chain=%s", l.TestSetupArgs.Lanes[0].ForwardLane.DestNetworkName)) + } + err := l.TestSetupArgs.Reporter.AddToGrafanaDashboardQueryParams(qParams...) + require.NoError(l.t, err, "failed to set grafana query params") +} + +func (l *LoadArgs) Setup() { + lggr := l.lggr + existing := pointer.GetBool(l.TestCfg.TestGroupInput.ExistingDeployment) + envName := "load-ccip" + if existing { + envName = "ccip-runner" + } + l.TestSetupArgs = testsetups.CCIPDefaultTestSetUp(l.TestCfg.Test, lggr, envName, nil, l.TestCfg) + namespace := l.TestCfg.TestGroupInput.LoadProfile.TestRunName + if l.TestSetupArgs.Env != nil && l.TestSetupArgs.Env.K8Env != nil && l.TestSetupArgs.Env.K8Env.Cfg != nil { + namespace = l.TestSetupArgs.Env.K8Env.Cfg.Namespace + } + l.Labels = map[string]string{ + "test_group": "load", + "test_id": "ccip", + "namespace": namespace, + } + l.TestSetupArgs.Reporter.SetGrafanaURLProvider(l.TestCfg.EnvInput) + l.SetReportParams() +} + +func (l *LoadArgs) scheduleForDest(destNetworkName string) []*wasp.Segment { + require.Greater(l.t, len(l.TestCfg.TestGroupInput.LoadProfile.RequestPerUnitTime), 0, "RequestPerUnitTime must be set") + // try to locate if there is a frequency provided for the destination network + // to locate the frequency, we check if the destination network name contains the network name in the frequency map + // if found, use that frequency for the destination network + // otherwise, use the default frequency + if l.TestCfg.TestGroupInput.LoadProfile.FrequencyByDestination != nil { + for networkName, freq := range l.TestCfg.TestGroupInput.LoadProfile.FrequencyByDestination { + if strings.Contains(destNetworkName, networkName) { + return WaspSchedule( + freq.RequestPerUnitTime, + l.TestCfg.TestGroupInput.LoadProfile.TestDuration, + freq.StepDuration) + } + } + } + + return WaspSchedule( + l.TestCfg.TestGroupInput.LoadProfile.RequestPerUnitTime, + l.TestCfg.TestGroupInput.LoadProfile.TestDuration, + l.TestCfg.TestGroupInput.LoadProfile.StepDuration) +} + +func (l *LoadArgs) SanityCheck() { + var allLanes []*actions.CCIPLane + for _, lane := range l.TestSetupArgs.Lanes { + allLanes = append(allLanes, lane.ForwardLane) + if lane.ReverseLane != nil { + allLanes = append(allLanes, lane.ReverseLane) + } + } + for _, lane := range allLanes { + ccipLoad := NewCCIPLoad( + l.TestCfg.Test, lane, + l.TestCfg.TestGroupInput.PhaseTimeout.Duration(), + 1, l.TestCfg.TestGroupInput.LoadProfile.MsgProfile, + 0, nil, + ) + ccipLoad.BeforeAllCall() + resp := ccipLoad.Call(nil) + require.False(l.t, resp.Failed, "request failed in sanity check") + } +} + +// ValidateCurseFollowedByUncurse assumes the lanes under test are bi-directional. +// It assumes requests in both direction are in flight when this is called. +// It assumes the ARM is not already cursed, it will fail the test if it is in cursed state. +// It curses source ARM for forward lanes so that destination curse is also validated for reverse lanes. +// It waits for 2 minutes for curse to be seen by ccip plugins and contracts. +// It captures the curse timestamp to verify no execution state changed event is emitted after the cure is applied. +// It uncurses the source ARM at the end so that it can be verified that rest of the requests are processed as expected. +// Validates that even after uncursing the lane should not function for 30 more minutes. +func (l *LoadArgs) ValidateCurseFollowedByUncurse() { + var lanes []*actions.CCIPLane + for _, lane := range l.TestSetupArgs.Lanes { + lanes = append(lanes, lane.ForwardLane) + } + // check if source is already cursed + for _, lane := range lanes { + cursed, err := lane.Source.Common.IsCursed() + require.NoError(l.t, err, "cannot get cursed state") + if cursed { + require.Fail(l.t, "test will not work if ARM is already cursed") + } + } + // before cursing set pause + l.pauseLoad.Store(true) + // wait for some time for pause to be active in wasp + l.lggr.Info().Msg("Waiting for 1 minute after applying pause on load") + time.Sleep(1 * time.Minute) + curseTimeStamps := make(map[string]time.Time) + for _, lane := range lanes { + if _, exists := curseTimeStamps[lane.SourceNetworkName]; exists { + continue + } + curseTx, err := lane.Source.Common.CurseARM() + require.NoError(l.t, err, "error in cursing arm") + require.NotNil(l.t, curseTx, "invalid cursetx") + receipt, err := lane.Source.Common.ChainClient.GetTxReceipt(curseTx.Hash()) + require.NoError(l.t, err) + hdr, err := lane.Source.Common.ChainClient.HeaderByNumber(context.Background(), receipt.BlockNumber) + require.NoError(l.t, err) + curseTimeStamps[lane.SourceNetworkName] = hdr.Timestamp + l.lggr.Info().Str("Source", lane.SourceNetworkName).Msg("Curse is applied on source") + l.lggr.Info().Str("Destination", lane.SourceNetworkName).Msg("Curse is applied on destination") + } + + l.lggr.Info().Msg("Curse is applied on all lanes. Waiting for 2 minutes") + time.Sleep(2 * time.Minute) + + for _, lane := range lanes { + // try to send requests on lanes on which curse is applied on source RMN and the request should revert + // data-only transfer is sufficient + lane.Source.TransferAmount = []*big.Int{} + failedTx, _, _, err := lane.Source.SendRequest( + lane.Dest.ReceiverDapp.EthAddress, + big.NewInt(actions.DefaultDestinationGasLimit), // gas limit + ) + if lane.Source.Common.ChainClient.GetNetworkConfig().MinimumConfirmations > 0 { + require.Error(l.t, err) + } else { + require.NoError(l.t, err) + } + errReason, v, err := lane.Source.Common.ChainClient.RevertReasonFromTx(failedTx, router.RouterABI) + require.NoError(l.t, err) + require.Equal(l.t, "BadARMSignal", errReason) + lane.Logger.Info(). + Str("Revert Reason", errReason). + Interface("Args", v). + Str("FailedTx", failedTx.Hex()). + Msg("Msg sent while source ARM is cursed") + } + + // now uncurse all + for _, lane := range lanes { + require.NoError(l.t, lane.Source.Common.UnvoteToCurseARM(), "error to unvote in cursing arm") + } + l.lggr.Info().Msg("Curse is lifted on all lanes") + // lift the pause on load test + l.pauseLoad.Store(false) + + // now add the reverse lanes so that destination curse is also verified + // we add the reverse lanes now to verify absence of commit and execution for the reverse lanes + for _, lane := range l.TestSetupArgs.Lanes { + lanes = append(lanes, lane.ReverseLane) + } + + // verify that even after uncursing the lane should not function for 30 more minutes, + // i.e no execution state changed or commit report accepted event is generated + errGrp := &errgroup.Group{} + for _, lane := range lanes { + lane := lane + curseTimeStamp, exists := curseTimeStamps[lane.SourceNetworkName] + // if curse timestamp does not exist for source, it will exist for destination + if !exists { + curseTimeStamp, exists = curseTimeStamps[lane.DestNetworkName] + require.Truef(l.t, exists, "did not find curse time stamp for lane %s->%s", lane.SourceNetworkName, lane.DestNetworkName) + } + errGrp.Go(func() error { + lane.Logger.Info().Msg("Validating no CommitReportAccepted event is received for 29 minutes") + // we allow additional 1 minute after curse timestamp for curse to be visible by plugin + return lane.Dest.AssertNoReportAcceptedEventReceived(lane.Logger, 25*time.Minute, curseTimeStamp.Add(1*time.Minute)) + }) + errGrp.Go(func() error { + lane.Logger.Info().Msg("Validating no ExecutionStateChanged event is received for 25 minutes") + // we allow additional 1 minute after curse timestamp for curse to be visible by plugin + return lane.Dest.AssertNoExecutionStateChangedEventReceived(lane.Logger, 25*time.Minute, curseTimeStamp.Add(1*time.Minute)) + }) + } + l.lggr.Info().Msg("waiting for no commit/execution validation") + err := errGrp.Wait() + require.NoError(l.t, err, "error received to validate no commit/execution is generated after lane is cursed") +} + +func (l *LoadArgs) TriggerLoadByLane() { + l.TestSetupArgs.Reporter.SetDuration(l.TestCfg.TestGroupInput.LoadProfile.TestDuration.Duration()) + + // start load for a lane + startLoad := func(lane *actions.CCIPLane) { + lane.Logger.Info(). + Str("Source Network", lane.SourceNetworkName). + Str("Destination Network", lane.DestNetworkName). + Msg("Starting load for lane") + sendMaxData := pointer.GetInt64(l.TestCfg.TestGroupInput.LoadProfile.SendMaxDataInEveryMsgCount) + ccipLoad := NewCCIPLoad( + l.TestCfg.Test, lane, l.TestCfg.TestGroupInput.PhaseTimeout.Duration(), + 100000, l.TestCfg.TestGroupInput.LoadProfile.MsgProfile, sendMaxData, + l.TestCfg.TestGroupInput.LoadProfile.SkipRequestIfAnotherRequestTriggeredWithin, + ) + ccipLoad.BeforeAllCall() + // if it's not multicall set the tokens to nil to free up some space, + // we have already formed the msg to be sent in load, there is no need to store the bridge tokens anymore + // In case of multicall we still need the BridgeTokens to transfer amount from mutlicall to owner + if !lane.Source.Common.MulticallEnabled { + lane.Source.Common.BridgeTokens = nil + lane.Dest.Common.BridgeTokens = nil + } + // no need for price registry in load + lane.Source.Common.PriceRegistry = nil + lane.Dest.Common.PriceRegistry = nil + lokiConfig := l.TestCfg.EnvInput.Logging.Loki + labels := make(map[string]string) + for k, v := range l.Labels { + labels[k] = v + } + labels["source_chain"] = lane.SourceNetworkName + labels["dest_chain"] = lane.DestNetworkName + waspCfg := &wasp.Config{ + T: l.TestCfg.Test, + GenName: fmt.Sprintf("lane %s-> %s", lane.SourceNetworkName, lane.DestNetworkName), + Schedule: l.scheduleForDest(lane.DestNetworkName), + LoadType: wasp.RPS, + RateLimitUnitDuration: l.TestCfg.TestGroupInput.LoadProfile.TimeUnit.Duration(), + CallResultBufLen: 10, // we keep the last 10 call results for each generator, as the detailed report is generated at the end of the test + CallTimeout: (l.TestCfg.TestGroupInput.PhaseTimeout.Duration()) * 5, + Gun: ccipLoad, + Logger: *ccipLoad.Lane.Logger, + LokiConfig: wasp.NewLokiConfig(lokiConfig.Endpoint, lokiConfig.TenantId, nil, nil), + Labels: labels, + FailOnErr: pointer.GetBool(l.TestCfg.TestGroupInput.LoadProfile.FailOnFirstErrorInLoad), + } + waspCfg.LokiConfig.Timeout = time.Minute + loadRunner, err := wasp.NewGenerator(waspCfg) + require.NoError(l.TestCfg.Test, err, "initiating loadgen for lane %s --> %s", + lane.SourceNetworkName, lane.DestNetworkName) + loadRunner.Run(false) + l.AddToRunnerGroup(loadRunner) + } + + for _, lane := range l.TestSetupArgs.Lanes { + lane := lane + l.LoadStarterWg.Add(1) + go func() { + defer l.LoadStarterWg.Done() + startLoad(lane.ForwardLane) + }() + if pointer.GetBool(l.TestSetupArgs.Cfg.TestGroupInput.BiDirectionalLane) { + l.LoadStarterWg.Add(1) + go func() { + defer l.LoadStarterWg.Done() + startLoad(lane.ReverseLane) + }() + } + } +} + +func (l *LoadArgs) AddToRunnerGroup(gen *wasp.Generator) { + // watch for pause signal + go func(gen *wasp.Generator) { + ticker := time.NewTicker(time.Second) + pausedOnce := false + resumedAlready := false + for { + select { + case <-ticker.C: + if l.pauseLoad.Load() && !pausedOnce { + gen.Pause() + pausedOnce = true + continue + } + if pausedOnce && !resumedAlready && !l.pauseLoad.Load() { + gen.Resume() + resumedAlready = true + } + case <-l.Ctx.Done(): + return + } + } + }(gen) + l.RunnerWg.Go(func() error { + _, failed := gen.Wait() + if failed { + return fmt.Errorf("load run is failed") + } + if len(gen.Errors()) > 0 { + return fmt.Errorf("error in load sequence call %v", gen.Errors()) + } + return nil + }) +} + +func (l *LoadArgs) Wait() { + l.lggr.Info().Msg("Waiting for load to start on all lanes") + // wait for load runner to start + l.LoadStarterWg.Wait() + l.lggr.Info().Msg("Waiting for load to finish on all lanes") + // wait for load runner to finish + err := l.RunnerWg.Wait() + require.NoError(l.t, err, "load run is failed") + l.lggr.Info().Msg("Load finished on all lanes") +} + +func (l *LoadArgs) ApplyChaos() { + testEnv := l.TestSetupArgs.Env + if testEnv == nil || testEnv.K8Env == nil { + l.lggr.Warn().Msg("test environment is nil, skipping chaos") + return + } + testEnv.ChaosLabelForCLNodes(l.TestCfg.Test) + + for _, exp := range l.ChaosExps { + if exp.WaitBetweenChaos > 0 { + l.lggr.Info().Msgf("sleeping for %s after chaos %s", exp.WaitBetweenChaos, exp.ChaosName) + time.Sleep(exp.WaitBetweenChaos) + } + l.lggr.Info().Msgf("Starting to apply chaos %s at %s", exp.ChaosName, time.Now().UTC()) + // apply chaos + chaosId, err := testEnv.K8Env.Chaos.Run(exp.ChaosFunc(testEnv.K8Env.Cfg.Namespace, exp.ChaosProps)) + require.NoError(l.t, err) + if chaosId != "" { + chaosDur, err := time.ParseDuration(exp.ChaosProps.DurationStr) + require.NoError(l.t, err) + err = testEnv.K8Env.Chaos.WaitForAllRecovered(chaosId, chaosDur+1*time.Minute) + require.NoError(l.t, err) + l.lggr.Info().Msgf("chaos %s is recovered at %s", exp.ChaosName, time.Now().UTC()) + err = testEnv.K8Env.Chaos.Stop(chaosId) + require.NoError(l.t, err) + l.lggr.Info().Msgf("stopped chaos %s at %s", exp.ChaosName, time.Now().UTC()) + } + } +} + +func (l *LoadArgs) TearDown() { + for _, tearDn := range l.LoadgenTearDowns { + tearDn() + } + if l.TestSetupArgs.TearDown != nil { + require.NoError(l.t, l.TestSetupArgs.TearDown()) + } +} + +func (l *LoadArgs) TriggerLoadBySource() { + require.NotNil(l.t, l.TestCfg.TestGroupInput.LoadProfile.TestDuration, "test duration input is nil") + require.GreaterOrEqual(l.t, 1, len(l.TestCfg.TestGroupInput.LoadProfile.RequestPerUnitTime), "time unit input must be specified") + l.TestSetupArgs.Reporter.SetDuration(l.TestCfg.TestGroupInput.LoadProfile.TestDuration.Duration()) + var laneBySource = make(map[string][]*actions.CCIPLane) + for _, lane := range l.TestSetupArgs.Lanes { + laneBySource[lane.ForwardLane.SourceNetworkName] = append(laneBySource[lane.ForwardLane.SourceNetworkName], lane.ForwardLane) + if lane.ReverseLane != nil { + laneBySource[lane.ReverseLane.SourceNetworkName] = append(laneBySource[lane.ReverseLane.SourceNetworkName], lane.ReverseLane) + } + } + for source, lanes := range laneBySource { + source := source + lanes := lanes + l.LoadStarterWg.Add(1) + go func() { + defer l.LoadStarterWg.Done() + l.lggr.Info(). + Str("Source Network", source). + Msg("Starting load for source") + allLabels := make(map[string]string) + for k, v := range l.Labels { + allLabels[k] = v + } + allLabels["source_chain"] = source + multiCallGen, err := NewMultiCallLoadGenerator(l.TestCfg, lanes, l.TestCfg.TestGroupInput.LoadProfile.RequestPerUnitTime[0], allLabels) + require.NoError(l.t, err) + lokiConfig := l.TestCfg.EnvInput.Logging.Loki + loadRunner, err := wasp.NewGenerator(&wasp.Config{ + T: l.TestCfg.Test, + GenName: fmt.Sprintf("Source %s", source), + Schedule: wasp.Plain(1, l.TestCfg.TestGroupInput.LoadProfile.TestDuration.Duration()), // hardcoded request per unit time to 1 as we are using multiCallGen + LoadType: wasp.RPS, + RateLimitUnitDuration: l.TestCfg.TestGroupInput.LoadProfile.TimeUnit.Duration(), + CallResultBufLen: 10, // we keep the last 10 call results for each generator, as the detailed report is generated at the end of the test + CallTimeout: (l.TestCfg.TestGroupInput.PhaseTimeout.Duration()) * 5, + Gun: multiCallGen, + Logger: multiCallGen.logger, + LokiConfig: wasp.NewLokiConfig(lokiConfig.Endpoint, lokiConfig.TenantId, nil, nil), + Labels: allLabels, + FailOnErr: pointer.GetBool(l.TestCfg.TestGroupInput.LoadProfile.FailOnFirstErrorInLoad), + }) + require.NoError(l.TestCfg.Test, err, "initiating loadgen for source %s", source) + loadRunner.Run(false) + l.AddToRunnerGroup(loadRunner) + l.LoadgenTearDowns = append(l.LoadgenTearDowns, func() { + require.NoError(l.t, multiCallGen.Stop()) + }) + }() + } +} + +func NewLoadArgs(t *testing.T, lggr zerolog.Logger, chaosExps ...ChaosConfig) *LoadArgs { + wg, _ := errgroup.WithContext(testcontext.Get(t)) + ctx := testcontext.Get(t) + return &LoadArgs{ + t: t, + Ctx: ctx, + lggr: &lggr, + RunnerWg: wg, + TestCfg: testsetups.NewCCIPTestConfig(t, lggr, testconfig.Load), + ChaosExps: chaosExps, + LoadStarterWg: &sync.WaitGroup{}, + pauseLoad: atomic.NewBool(false), + } +} diff --git a/integration-tests/ccip-tests/smoke/ccip_test.go b/integration-tests/ccip-tests/smoke/ccip_test.go new file mode 100644 index 00000000000..9a34044a5d8 --- /dev/null +++ b/integration-tests/ccip-tests/smoke/ccip_test.go @@ -0,0 +1,1008 @@ +package smoke + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/AlekSi/pointer" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-testing-framework/logging" + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_pool" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testsetups" +) + +type testDefinition struct { + testName string + lane *actions.CCIPLane +} + +func TestSmokeCCIPForBidirectionalLane(t *testing.T) { + t.Parallel() + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke) + require.NotNil(t, TestCfg.TestGroupInput.MsgDetails.DestGasLimit) + gasLimit := big.NewInt(*TestCfg.TestGroupInput.MsgDetails.DestGasLimit) + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + log.Info().Msg("No lanes found") + return + } + + t.Cleanup(func() { + // If we are running a test that is a token transfer, we need to verify the balance. + // skip the balance check for existing deployment, there can be multiple external requests in progress for existing deployments + // other than token transfer initiated by the test, which can affect the balance check + // therefore we check the balance only for the ccip environment created by the test + if TestCfg.TestGroupInput.MsgDetails.IsTokenTransfer() && + !pointer.GetBool(TestCfg.TestGroupInput.USDCMockDeployment) && + !pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment) { + setUpOutput.Balance.Verify(t) + } + require.NoError(t, setUpOutput.TearDown()) + }) + + // Create test definitions for each lane. + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + if lane.ReverseLane != nil { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ReverseLane.SourceNetworkName, lane.ReverseLane.DestNetworkName), + lane: lane.ReverseLane, + }) + } + } + + // Execute tests. + log.Info().Int("Total Lanes", len(tests)).Msg("Starting CCIP test") + for _, test := range tests { + tc := test + t.Run(tc.testName, func(t *testing.T) { + t.Parallel() + tc.lane.Test = t + log.Info(). + Str("Source", tc.lane.SourceNetworkName). + Str("Destination", tc.lane.DestNetworkName). + Msgf("Starting lane %s -> %s", tc.lane.SourceNetworkName, tc.lane.DestNetworkName) + + tc.lane.RecordStateBeforeTransfer() + err := tc.lane.SendRequests(1, gasLimit) + require.NoError(t, err) + tc.lane.ValidateRequests() + }) + } +} + +func TestSmokeCCIPRateLimit(t *testing.T) { + t.Parallel() + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke) + require.True(t, TestCfg.TestGroupInput.MsgDetails.IsTokenTransfer(), "Test config should have token transfer message type") + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("Network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + } + + // if we are running in simulated or in testnet mode, we can set the rate limit to test friendly values + // For mainnet, we need to set this as false to avoid changing the deployed contract config + setRateLimit := true + AggregatedRateLimitCapacity := new(big.Int).Mul(big.NewInt(1e18), big.NewInt(30)) + AggregatedRateLimitRate := big.NewInt(1e17) + + TokenPoolRateLimitCapacity := new(big.Int).Mul(big.NewInt(1e17), big.NewInt(1)) + TokenPoolRateLimitRate := big.NewInt(1e14) + + for _, test := range tests { + tc := test + t.Run(fmt.Sprintf("%s - Rate Limit", tc.testName), func(t *testing.T) { + tc.lane.Test = t + src := tc.lane.Source + // add liquidity to pools on both networks + if !pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment) { + addLiquidity(t, src.Common, new(big.Int).Mul(AggregatedRateLimitCapacity, big.NewInt(20))) + addLiquidity(t, tc.lane.Dest.Common, new(big.Int).Mul(AggregatedRateLimitCapacity, big.NewInt(20))) + } + log.Info(). + Str("Source", tc.lane.SourceNetworkName). + Str("Destination", tc.lane.DestNetworkName). + Msgf("Starting lane %s -> %s", tc.lane.SourceNetworkName, tc.lane.DestNetworkName) + + // capture the rate limit config before we change it + prevRLOnRamp, err := src.OnRamp.Instance.CurrentRateLimiterState(nil) + require.NoError(t, err) + tc.lane.Logger.Info().Interface("rate limit", prevRLOnRamp).Msg("Initial OnRamp rate limiter state") + + prevOnRampRLTokenPool, err := src.Common.BridgeTokenPools[0].Instance.GetCurrentOutboundRateLimiterState( + nil, tc.lane.Source.DestChainSelector, + ) // TODO RENS maybe? + require.NoError(t, err) + tc.lane.Logger.Info(). + Interface("rate limit", prevOnRampRLTokenPool). + Str("pool", src.Common.BridgeTokenPools[0].Address()). + Str("onRamp", src.OnRamp.Address()). + Msg("Initial Token Pool rate limiter state") + + // some sanity checks + rlOffRamp, err := tc.lane.Dest.OffRamp.Instance.CurrentRateLimiterState(nil) + require.NoError(t, err) + tc.lane.Logger.Info().Interface("rate limit", rlOffRamp).Msg("Initial OffRamp rate limiter state") + if rlOffRamp.IsEnabled { + require.GreaterOrEqual(t, rlOffRamp.Capacity.Cmp(prevRLOnRamp.Capacity), 0, + "OffRamp Aggregated capacity should be greater than or equal to OnRamp Aggregated capacity", + ) + } + + prevOffRampRLTokenPool, err := tc.lane.Dest.Common.BridgeTokenPools[0].Instance.GetCurrentInboundRateLimiterState( + nil, tc.lane.Dest.SourceChainSelector, + ) // TODO RENS maybe? + require.NoError(t, err) + tc.lane.Logger.Info(). + Interface("rate limit", prevOffRampRLTokenPool). + Str("pool", tc.lane.Dest.Common.BridgeTokenPools[0].Address()). + Str("offRamp", tc.lane.Dest.OffRamp.Address()). + Msg("Initial Token Pool rate limiter state") + if prevOffRampRLTokenPool.IsEnabled { + require.GreaterOrEqual(t, prevOffRampRLTokenPool.Capacity.Cmp(prevOnRampRLTokenPool.Capacity), 0, + "OffRamp Token Pool capacity should be greater than or equal to OnRamp Token Pool capacity", + ) + } + + AggregatedRateLimitChanged := false + TokenPoolRateLimitChanged := false + + // reset the rate limit config to what it was before the tc + t.Cleanup(func() { + if AggregatedRateLimitChanged { + require.NoError(t, src.OnRamp.SetRateLimit(evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: prevRLOnRamp.IsEnabled, + Capacity: prevRLOnRamp.Capacity, + Rate: prevRLOnRamp.Rate, + }), "setting rate limit") + require.NoError(t, src.Common.ChainClient.WaitForEvents(), "waiting for events") + } + if TokenPoolRateLimitChanged { + require.NoError(t, src.Common.BridgeTokenPools[0].SetRemoteChainRateLimits(src.DestChainSelector, + token_pool.RateLimiterConfig{ + Capacity: prevOnRampRLTokenPool.Capacity, + IsEnabled: prevOnRampRLTokenPool.IsEnabled, + Rate: prevOnRampRLTokenPool.Rate, + })) + require.NoError(t, src.Common.ChainClient.WaitForEvents(), "waiting for events") + } + }) + + if setRateLimit { + if prevRLOnRamp.Capacity.Cmp(AggregatedRateLimitCapacity) != 0 || + prevRLOnRamp.Rate.Cmp(AggregatedRateLimitRate) != 0 || + !prevRLOnRamp.IsEnabled { + require.NoError(t, src.OnRamp.SetRateLimit(evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: AggregatedRateLimitCapacity, + Rate: AggregatedRateLimitRate, + }), "setting rate limit on onramp") + require.NoError(t, src.Common.ChainClient.WaitForEvents(), "waiting for events") + AggregatedRateLimitChanged = true + } + } else { + AggregatedRateLimitCapacity = prevRLOnRamp.Capacity + AggregatedRateLimitRate = prevRLOnRamp.Rate + } + + rlOnRamp, err := src.OnRamp.Instance.CurrentRateLimiterState(nil) + require.NoError(t, err) + tc.lane.Logger.Info().Interface("rate limit", rlOnRamp).Msg("OnRamp rate limiter state") + require.True(t, rlOnRamp.IsEnabled, "OnRamp rate limiter should be enabled") + + tokenPrice, err := src.Common.PriceRegistry.Instance.GetTokenPrice(nil, src.Common.BridgeTokens[0].ContractAddress) + require.NoError(t, err) + tc.lane.Logger.Info().Str("tokenPrice.Value", tokenPrice.String()).Msg("Price Registry Token Price") + + totalTokensForOnRampCapacity := new(big.Int).Mul( + big.NewInt(1e18), + new(big.Int).Div(rlOnRamp.Capacity, tokenPrice), + ) + + tc.lane.Source.Common.ChainClient.ParallelTransactions(true) + + // current tokens are equal to the full capacity - should fail + src.TransferAmount[0] = rlOnRamp.Tokens + tc.lane.Logger.Info().Str("tokensToSend", rlOnRamp.Tokens.String()).Msg("Aggregated Capacity") + // approve the tokens + require.NoError(t, src.Common.BridgeTokens[0].Approve( + tc.lane.Source.Common.ChainClient.GetDefaultWallet(), src.Common.Router.Address(), src.TransferAmount[0]), + ) + require.NoError(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + failedTx, _, _, err := tc.lane.Source.SendRequest( + tc.lane.Dest.ReceiverDapp.EthAddress, + big.NewInt(actions.DefaultDestinationGasLimit), // gas limit + ) + require.NoError(t, err) + require.Error(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + errReason, v, err := tc.lane.Source.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + tc.lane.Logger.Info(). + Str("Revert Reason", errReason). + Interface("Args", v). + Str("TokensSent", src.TransferAmount[0].String()). + Str("Token", tc.lane.Source.Common.BridgeTokens[0].Address()). + Str("FailedTx", failedTx.Hex()). + Msg("Msg sent with tokens more than AggregateValueMaxCapacity") + require.Equal(t, "AggregateValueMaxCapacityExceeded", errReason) + + // 99% of the aggregated capacity - should succeed + tokensToSend := new(big.Int).Div(new(big.Int).Mul(totalTokensForOnRampCapacity, big.NewInt(99)), big.NewInt(100)) + tc.lane.Logger.Info().Str("tokensToSend", tokensToSend.String()).Msg("99% of Aggregated Capacity") + tc.lane.RecordStateBeforeTransfer() + src.TransferAmount[0] = tokensToSend + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + + // try to send again with amount more than the amount refilled by rate and + // this should fail, as the refill rate is not enough to refill the capacity + src.TransferAmount[0] = new(big.Int).Mul(AggregatedRateLimitRate, big.NewInt(10)) + failedTx, _, _, err = tc.lane.Source.SendRequest( + tc.lane.Dest.ReceiverDapp.EthAddress, + big.NewInt(actions.DefaultDestinationGasLimit), // gas limit + ) + tc.lane.Logger.Info().Str("tokensToSend", src.TransferAmount[0].String()).Msg("More than Aggregated Rate") + require.NoError(t, err) + require.Error(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + errReason, v, err = tc.lane.Source.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + tc.lane.Logger.Info(). + Str("Revert Reason", errReason). + Interface("Args", v). + Str("TokensSent", src.TransferAmount[0].String()). + Str("Token", tc.lane.Source.Common.BridgeTokens[0].Address()). + Str("FailedTx", failedTx.Hex()). + Msg("Msg sent with tokens more than AggregateValueRate") + require.Equal(t, "AggregateValueRateLimitReached", errReason) + + // validate the successful request was delivered to the destination + tc.lane.ValidateRequests() + + // now set the token pool rate limit + if setRateLimit { + if prevOnRampRLTokenPool.Capacity.Cmp(TokenPoolRateLimitCapacity) != 0 || + prevOnRampRLTokenPool.Rate.Cmp(TokenPoolRateLimitRate) != 0 || + !prevOnRampRLTokenPool.IsEnabled { + require.NoError(t, src.Common.BridgeTokenPools[0].SetRemoteChainRateLimits( + src.DestChainSelector, + token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: TokenPoolRateLimitCapacity, + Rate: TokenPoolRateLimitRate, + }), "error setting rate limit on token pool") + require.NoError(t, src.Common.ChainClient.WaitForEvents(), "waiting for events") + TokenPoolRateLimitChanged = true + } + } else { + TokenPoolRateLimitCapacity = prevOnRampRLTokenPool.Capacity + TokenPoolRateLimitRate = prevOnRampRLTokenPool.Rate + } + + rlOnPool, err := src.Common.BridgeTokenPools[0].Instance.GetCurrentOutboundRateLimiterState(nil, src.DestChainSelector) + require.NoError(t, err) + require.True(t, rlOnPool.IsEnabled, "Token Pool rate limiter should be enabled") + + // try to send more than token pool capacity - should fail + tokensToSend = new(big.Int).Add(TokenPoolRateLimitCapacity, big.NewInt(2)) + + // wait for the AggregateCapacity to be refilled + onRampState, err := src.OnRamp.Instance.CurrentRateLimiterState(nil) + if err != nil { + return + } + if AggregatedRateLimitCapacity.Cmp(onRampState.Capacity) > 0 { + capacityToBeFilled := new(big.Int).Sub(AggregatedRateLimitCapacity, onRampState.Capacity) + durationToFill := time.Duration(new(big.Int).Div(capacityToBeFilled, AggregatedRateLimitRate).Int64()) + tc.lane.Logger.Info(). + Dur("wait duration", durationToFill). + Str("current capacity", onRampState.Capacity.String()). + Str("tokensToSend", tokensToSend.String()). + Msg("Waiting for aggregated capacity to be available") + time.Sleep(durationToFill * time.Second) + } + + src.TransferAmount[0] = tokensToSend + tc.lane.Logger.Info().Str("tokensToSend", tokensToSend.String()).Msg("More than Token Pool Capacity") + + failedTx, _, _, err = tc.lane.Source.SendRequest( + tc.lane.Dest.ReceiverDapp.EthAddress, + big.NewInt(actions.DefaultDestinationGasLimit), // gas limit + ) + require.NoError(t, err) + require.Error(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + errReason, v, err = tc.lane.Source.Common.ChainClient.RevertReasonFromTx(failedTx, lock_release_token_pool.LockReleaseTokenPoolABI) + require.NoError(t, err) + tc.lane.Logger.Info(). + Str("Revert Reason", errReason). + Interface("Args", v). + Str("TokensSent", src.TransferAmount[0].String()). + Str("Token", tc.lane.Source.Common.BridgeTokens[0].Address()). + Str("FailedTx", failedTx.Hex()). + Msg("Msg sent with tokens more than token pool capacity") + require.Equal(t, "TokenMaxCapacityExceeded", errReason) + + // try to send 99% of token pool capacity - should succeed + tokensToSend = new(big.Int).Div(new(big.Int).Mul(TokenPoolRateLimitCapacity, big.NewInt(99)), big.NewInt(100)) + src.TransferAmount[0] = tokensToSend + tc.lane.Logger.Info().Str("tokensToSend", tokensToSend.String()).Msg("99% of Token Pool Capacity") + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + + // try to send again with amount more than the amount refilled by token pool rate and + // this should fail, as the refill rate is not enough to refill the capacity + tokensToSend = new(big.Int).Mul(TokenPoolRateLimitRate, big.NewInt(20)) + tc.lane.Logger.Info().Str("tokensToSend", tokensToSend.String()).Msg("More than TokenPool Rate") + src.TransferAmount[0] = tokensToSend + // approve the tokens + require.NoError(t, src.Common.BridgeTokens[0].Approve( + src.Common.ChainClient.GetDefaultWallet(), src.Common.Router.Address(), src.TransferAmount[0]), + ) + require.NoError(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + failedTx, _, _, err = tc.lane.Source.SendRequest( + tc.lane.Dest.ReceiverDapp.EthAddress, + big.NewInt(actions.DefaultDestinationGasLimit), + ) + require.NoError(t, err) + require.Error(t, tc.lane.Source.Common.ChainClient.WaitForEvents()) + errReason, v, err = tc.lane.Source.Common.ChainClient.RevertReasonFromTx(failedTx, lock_release_token_pool.LockReleaseTokenPoolABI) + require.NoError(t, err) + tc.lane.Logger.Info(). + Str("Revert Reason", errReason). + Interface("Args", v). + Str("TokensSent", src.TransferAmount[0].String()). + Str("Token", tc.lane.Source.Common.BridgeTokens[0].Address()). + Str("FailedTx", failedTx.Hex()). + Msg("Msg sent with tokens more than TokenPool Rate") + require.Equal(t, "TokenRateLimitReached", errReason) + + // validate that the successful transfers are reflected in destination + tc.lane.ValidateRequests() + }) + } +} + +func TestSmokeCCIPOnRampLimits(t *testing.T) { + t.Parallel() + + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke, testsetups.WithNoTokensPerMessage(4), testsetups.WithTokensPerChain(4)) + require.False(t, pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment), + "This test modifies contract state. Before running it, ensure you are willing and able to do so.", + ) + err := contracts.MatchContractVersionsOrAbove(map[contracts.Name]contracts.Version{ + contracts.OffRampContract: contracts.V1_5_0_dev, + contracts.OnRampContract: contracts.V1_5_0_dev, + }) + require.NoError(t, err, "Required contract versions not met") + + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("Network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + } + + var ( + capacityLimit = big.NewInt(1e16) + overCapacityAmount = new(big.Int).Add(capacityLimit, big.NewInt(1)) + + // token without any transfer config + freeTokenIndex = 0 + // token with bps non-zero, no agg rate limit + bpsTokenIndex = 1 + // token with bps zero, with agg rate limit on + aggRateTokenIndex = 2 + // token with both bps and agg rate limit + bpsAndAggTokenIndex = 3 + ) + + for _, tc := range tests { + t.Run(fmt.Sprintf("%s - OnRamp Limits", tc.testName), func(t *testing.T) { + tc.lane.Test = t + src := tc.lane.Source + dest := tc.lane.Dest + require.GreaterOrEqual(t, len(src.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(src.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + addLiquidity(t, src.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + addLiquidity(t, dest.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + + var ( + freeToken = src.Common.BridgeTokens[freeTokenIndex] + bpsToken = src.Common.BridgeTokens[bpsTokenIndex] + aggRateToken = src.Common.BridgeTokens[aggRateTokenIndex] + bpsAndAggToken = src.Common.BridgeTokens[bpsAndAggTokenIndex] + ) + tc.lane.Logger.Info(). + Str("Free Token", freeToken.ContractAddress.Hex()). + Str("BPS Token", bpsToken.ContractAddress.Hex()). + Str("Agg Rate Token", aggRateToken.ContractAddress.Hex()). + Str("BPS and Agg Rate Token", bpsAndAggToken.ContractAddress.Hex()). + Msg("Tokens for rate limit testing") + err := tc.lane.DisableAllRateLimiting() + require.NoError(t, err, "Error disabling rate limits") + + // Set reasonable rate limits for the tokens + err = src.OnRamp.SetTokenTransferFeeConfig([]evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: bpsToken.ContractAddress, + AggregateRateLimitEnabled: false, + DeciBps: 10, + }, + { + Token: aggRateToken.ContractAddress, + AggregateRateLimitEnabled: true, + }, + { + Token: bpsAndAggToken.ContractAddress, + AggregateRateLimitEnabled: true, + DeciBps: 10, + }, + }) + require.NoError(t, err, "Error setting OnRamp transfer fee config") + err = src.OnRamp.SetRateLimit(evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: capacityLimit, + Rate: new(big.Int).Mul(capacityLimit, big.NewInt(500)), // Set a high rate to avoid it getting in the way + }) + require.NoError(t, err, "Error setting OnRamp rate limits") + err = src.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Send all tokens under their limits and ensure they succeed + src.TransferAmount[freeTokenIndex] = overCapacityAmount + src.TransferAmount[bpsTokenIndex] = overCapacityAmount + src.TransferAmount[aggRateTokenIndex] = big.NewInt(1) + src.TransferAmount[bpsAndAggTokenIndex] = big.NewInt(1) + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + tc.lane.ValidateRequests() + + // Check that capacity limits are enforced + src.TransferAmount[freeTokenIndex] = big.NewInt(0) + src.TransferAmount[bpsTokenIndex] = big.NewInt(0) + src.TransferAmount[aggRateTokenIndex] = overCapacityAmount + src.TransferAmount[bpsAndAggTokenIndex] = big.NewInt(0) + failedTx, _, _, err := tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Limited token transfer should immediately revert") + errReason, _, err := src.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + require.Equal(t, "AggregateValueMaxCapacityExceeded", errReason, "Expected capacity limit reached error") + tc.lane.Logger. + Info(). + Str("Token", aggRateToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + + src.TransferAmount[aggRateTokenIndex] = big.NewInt(0) + src.TransferAmount[bpsAndAggTokenIndex] = overCapacityAmount + failedTx, _, _, err = tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Limited token transfer should immediately revert") + errReason, _, err = src.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + require.Equal(t, "AggregateValueMaxCapacityExceeded", errReason, "Expected capacity limit reached error") + tc.lane.Logger. + Info(). + Str("Token", aggRateToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + + // Set a high price for the tokens to more easily trigger aggregate rate limits + // Aggregate rate limits are based on USD price of the tokens + err = src.Common.PriceRegistry.UpdatePrices([]contracts.InternalTokenPriceUpdate{ + { + SourceToken: aggRateToken.ContractAddress, + UsdPerToken: big.NewInt(100), + }, + { + SourceToken: bpsAndAggToken.ContractAddress, + UsdPerToken: big.NewInt(100), + }, + }, []contracts.InternalGasPriceUpdate{}) + require.NoError(t, err, "Error updating prices") + // Enable aggregate rate limiting for the limited tokens + err = src.OnRamp.SetRateLimit(evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(capacityLimit, big.NewInt(5000)), // Set a high capacity to avoid it getting in the way + Rate: big.NewInt(1), + }) + require.NoError(t, err, "Error setting OnRamp rate limits") + err = src.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Send aggregate unlimited tokens and ensure they succeed + src.TransferAmount[freeTokenIndex] = overCapacityAmount + src.TransferAmount[bpsTokenIndex] = overCapacityAmount + src.TransferAmount[aggRateTokenIndex] = big.NewInt(0) + src.TransferAmount[bpsAndAggTokenIndex] = big.NewInt(0) + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + tc.lane.ValidateRequests() + + // Check that aggregate rate limits are enforced on limited tokens + src.TransferAmount[freeTokenIndex] = big.NewInt(0) + src.TransferAmount[bpsTokenIndex] = big.NewInt(0) + src.TransferAmount[aggRateTokenIndex] = capacityLimit + src.TransferAmount[bpsAndAggTokenIndex] = big.NewInt(0) + failedTx, _, _, err = tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Aggregate rate limited token transfer should immediately revert") + errReason, _, err = src.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + require.Equal(t, "AggregateValueRateLimitReached", errReason, "Expected aggregate rate limit reached error") + tc.lane.Logger. + Info(). + Str("Token", aggRateToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + + src.TransferAmount[aggRateTokenIndex] = big.NewInt(0) + src.TransferAmount[bpsAndAggTokenIndex] = capacityLimit + failedTx, _, _, err = tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Aggregate rate limited token transfer should immediately revert") + errReason, _, err = src.Common.ChainClient.RevertReasonFromTx(failedTx, evm_2_evm_onramp.EVM2EVMOnRampABI) + require.NoError(t, err) + require.Equal(t, "AggregateValueRateLimitReached", errReason, "Expected aggregate rate limit reached error") + tc.lane.Logger. + Info(). + Str("Token", aggRateToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + }) + } +} + +func TestSmokeCCIPOffRampCapacityLimit(t *testing.T) { + t.Parallel() + + capacityLimited := contracts.RateLimiterConfig{ + IsEnabled: true, + Capacity: big.NewInt(1e16), + Rate: new(big.Int).Mul(big.NewInt(1e16), big.NewInt(10)), // Set a high rate limit to avoid it getting in the way + } + testOffRampRateLimits(t, capacityLimited) +} + +func TestSmokeCCIPOffRampAggRateLimit(t *testing.T) { + t.Parallel() + + aggRateLimited := contracts.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(big.NewInt(1e16), big.NewInt(10)), // Set a high capacity limit to avoid it getting in the way + Rate: big.NewInt(1), + } + testOffRampRateLimits(t, aggRateLimited) +} + +func TestSmokeCCIPTokenPoolRateLimits(t *testing.T) { + t.Parallel() + + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke, testsetups.WithNoTokensPerMessage(4), testsetups.WithTokensPerChain(4)) + require.False(t, pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment), + "This test modifies contract state. Before running it, ensure you are willing and able to do so.", + ) + err := contracts.MatchContractVersionsOrAbove(map[contracts.Name]contracts.Version{ + contracts.OffRampContract: contracts.V1_5_0_dev, + contracts.OnRampContract: contracts.V1_5_0_dev, + }) + require.NoError(t, err, "Required contract versions not met") + + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("Network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + } + + var ( + capacityLimit = big.NewInt(1e16) + overCapacityAmount = new(big.Int).Add(capacityLimit, big.NewInt(1)) + + // token without any limits + freeTokenIndex = 0 + // token with rate limits + limitedTokenIndex = 1 + ) + + for _, tc := range tests { + t.Run(fmt.Sprintf("%s - Token Pool Rate Limits", tc.testName), func(t *testing.T) { + tc.lane.Test = t + src := tc.lane.Source + dest := tc.lane.Dest + require.GreaterOrEqual(t, len(src.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(src.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + addLiquidity(t, src.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + addLiquidity(t, dest.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + + var ( + freeToken = src.Common.BridgeTokens[freeTokenIndex] + limitedToken = src.Common.BridgeTokens[limitedTokenIndex] + limitedTokenPool = src.Common.BridgeTokenPools[limitedTokenIndex] + ) + tc.lane.Logger.Info(). + Str("Free Token", freeToken.ContractAddress.Hex()). + Str("Limited Token", limitedToken.ContractAddress.Hex()). + Msg("Tokens for rate limit testing") + err := tc.lane.DisableAllRateLimiting() // Make sure this is pure + require.NoError(t, err, "Error disabling rate limits") + + // Check capacity limits + err = limitedTokenPool.SetRemoteChainRateLimits(src.DestChainSelector, token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: capacityLimit, + Rate: new(big.Int).Sub(capacityLimit, big.NewInt(1)), // Set as high rate as possible to avoid it getting in the way + }) + require.NoError(t, err, "Error setting token pool rate limit") + err = src.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Send all tokens under their limits and ensure they succeed + src.TransferAmount[freeTokenIndex] = overCapacityAmount + src.TransferAmount[limitedTokenIndex] = big.NewInt(1) + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + tc.lane.ValidateRequests() + + // Send limited token over capacity and ensure it fails + src.TransferAmount[freeTokenIndex] = big.NewInt(0) + src.TransferAmount[limitedTokenIndex] = overCapacityAmount + failedTx, _, _, err := tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Limited token transfer should immediately revert") + errReason, _, err := src.Common.ChainClient.RevertReasonFromTx(failedTx, lock_release_token_pool.LockReleaseTokenPoolABI) + require.NoError(t, err) + require.Equal(t, "TokenMaxCapacityExceeded", errReason, "Expected token capacity error") + tc.lane.Logger. + Info(). + Str("Token", limitedToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + + // Check rate limit + err = limitedTokenPool.SetRemoteChainRateLimits(src.DestChainSelector, token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(capacityLimit, big.NewInt(2)), // Set a high capacity to avoid it getting in the way + Rate: big.NewInt(1), + }) + require.NoError(t, err, "Error setting token pool rate limit") + err = src.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Send all tokens under their limits and ensure they succeed + src.TransferAmount[freeTokenIndex] = overCapacityAmount + src.TransferAmount[limitedTokenIndex] = capacityLimit + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + tc.lane.ValidateRequests() + + // Send limited token over rate limit and ensure it fails + src.TransferAmount[freeTokenIndex] = big.NewInt(0) + src.TransferAmount[limitedTokenIndex] = capacityLimit + failedTx, _, _, err = tc.lane.Source.SendRequest(tc.lane.Dest.ReceiverDapp.EthAddress, big.NewInt(actions.DefaultDestinationGasLimit)) + require.Error(t, err, "Limited token transfer should immediately revert") + errReason, _, err = src.Common.ChainClient.RevertReasonFromTx(failedTx, lock_release_token_pool.LockReleaseTokenPoolABI) + require.NoError(t, err) + require.Equal(t, "TokenRateLimitReached", errReason, "Expected rate limit reached error") + tc.lane.Logger. + Info(). + Str("Token", limitedToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on source chain (a good thing in this context)") + }) + } +} + +func TestSmokeCCIPMulticall(t *testing.T) { + t.Parallel() + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke) + // enable multicall in one tx for this test + TestCfg.TestGroupInput.MulticallInOneTx = ptr.Ptr(true) + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + if lane.ReverseLane != nil { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ReverseLane.SourceNetworkName, lane.ReverseLane.DestNetworkName), + lane: lane.ReverseLane, + }) + } + } + + log.Info().Int("Total Lanes", len(tests)).Msg("Starting CCIP test") + for _, test := range tests { + tc := test + t.Run(tc.testName, func(t *testing.T) { + t.Parallel() + tc.lane.Test = t + log.Info(). + Str("Source", tc.lane.SourceNetworkName). + Str("Destination", tc.lane.DestNetworkName). + Msgf("Starting lane %s -> %s", tc.lane.SourceNetworkName, tc.lane.DestNetworkName) + + tc.lane.RecordStateBeforeTransfer() + err := tc.lane.Multicall(TestCfg.TestGroupInput.NoOfSendsInMulticall, tc.lane.Source.Common.MulticallContract) + require.NoError(t, err) + tc.lane.ValidateRequests() + }) + } +} + +func TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas(t *testing.T) { + t.Parallel() + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke) + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + if TestCfg.TestGroupInput.MsgDetails.IsTokenTransfer() { + setUpOutput.Balance.Verify(t) + } + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + if lane.ReverseLane != nil { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("CCIP message transfer from network %s to network %s", + lane.ReverseLane.SourceNetworkName, lane.ReverseLane.DestNetworkName), + lane: lane.ReverseLane, + }) + } + } + + log.Info().Int("Total Lanes", len(tests)).Msg("Starting CCIP test") + for _, test := range tests { + tc := test + t.Run(tc.testName, func(t *testing.T) { + t.Parallel() + tc.lane.Test = t + log.Info(). + Str("Source", tc.lane.SourceNetworkName). + Str("Destination", tc.lane.DestNetworkName). + Msgf("Starting lane %s -> %s", tc.lane.SourceNetworkName, tc.lane.DestNetworkName) + + tc.lane.RecordStateBeforeTransfer() + // send with insufficient gas for ccip-receive to fail + err := tc.lane.SendRequests(1, big.NewInt(0)) + require.NoError(t, err) + tc.lane.ValidateRequests(actions.ExpectPhaseToFail(testreporters.ExecStateChanged)) + // wait for events + err = tc.lane.Dest.Common.ChainClient.WaitForEvents() + require.NoError(t, err) + // execute all failed ccip requests manually + err = tc.lane.ExecuteManually() + require.NoError(t, err) + if len(tc.lane.Source.TransferAmount) > 0 { + tc.lane.Source.UpdateBalance(int64(tc.lane.NumberOfReq), tc.lane.TotalFee, tc.lane.Balance) + tc.lane.Dest.UpdateBalance(tc.lane.Source.TransferAmount, int64(tc.lane.NumberOfReq), tc.lane.Balance) + } + }) + } +} + +// add liquidity to pools on both networks +func addLiquidity(t *testing.T, ccipCommon *actions.CCIPCommon, amount *big.Int) { + t.Helper() + + for i, btp := range ccipCommon.BridgeTokenPools { + token := ccipCommon.BridgeTokens[i] + err := btp.AddLiquidity( + token, token.OwnerWallet, amount, + ) + require.NoError(t, err) + } +} + +// testOffRampRateLimits tests the rate limiting functionality of the OffRamp contract +// it's broken into a helper to help parallelize and keep the tests DRY +func testOffRampRateLimits(t *testing.T, rateLimiterConfig contracts.RateLimiterConfig) { + t.Helper() + + log := logging.GetTestLogger(t) + TestCfg := testsetups.NewCCIPTestConfig(t, log, testconfig.Smoke) + require.False(t, pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment), + "This test modifies contract state. Before running it, ensure you are willing and able to do so.", + ) + err := contracts.MatchContractVersionsOrAbove(map[contracts.Name]contracts.Version{ + contracts.OffRampContract: contracts.V1_5_0_dev, + }) + require.NoError(t, err, "Required contract versions not met") + require.False(t, pointer.GetBool(TestCfg.TestGroupInput.ExistingDeployment), "This test modifies contract state and cannot be run on existing deployments") + + // Set the default permissionless exec threshold lower so that we can manually execute the transactions faster + // Tuning this too low stops any transactions from being realistically executed + actions.DefaultPermissionlessExecThreshold = 1 * time.Minute + + setUpOutput := testsetups.CCIPDefaultTestSetUp(t, &log, "smoke-ccip", nil, TestCfg) + if len(setUpOutput.Lanes) == 0 { + return + } + t.Cleanup(func() { + require.NoError(t, setUpOutput.TearDown()) + }) + + var tests []testDefinition + for _, lane := range setUpOutput.Lanes { + tests = append(tests, testDefinition{ + testName: fmt.Sprintf("Network %s to network %s", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName), + lane: lane.ForwardLane, + }) + } + + var ( + freeTokenIndex = 0 + limitedTokenIndex = 1 + ) + + for _, tc := range tests { + t.Run(fmt.Sprintf("%s - OffRamp Limits", tc.testName), func(t *testing.T) { + tc.lane.Test = t + src := tc.lane.Source + dest := tc.lane.Dest + var ( + capacityLimit = rateLimiterConfig.Capacity + overLimitAmount = new(big.Int).Add(capacityLimit, big.NewInt(1)) + ) + require.GreaterOrEqual(t, len(src.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(src.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokens), 2, "At least two bridge tokens needed for test") + require.GreaterOrEqual(t, len(dest.Common.BridgeTokenPools), 2, "At least two bridge token pools needed for test") + addLiquidity(t, src.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + addLiquidity(t, dest.Common, new(big.Int).Mul(capacityLimit, big.NewInt(20))) + + var ( + freeSrcToken = src.Common.BridgeTokens[freeTokenIndex] + freeDestToken = dest.Common.BridgeTokens[freeTokenIndex] + limitedSrcToken = src.Common.BridgeTokens[limitedTokenIndex] + limitedDestToken = dest.Common.BridgeTokens[limitedTokenIndex] + ) + tc.lane.Logger.Info(). + Str("Free Source Token", freeSrcToken.Address()). + Str("Free Dest Token", freeDestToken.Address()). + Str("Limited Source Token", limitedSrcToken.Address()). + Str("Limited Dest Token", limitedDestToken.Address()). + Msg("Tokens for rate limit testing") + + err := tc.lane.DisableAllRateLimiting() + require.NoError(t, err, "Error disabling rate limits") + + // Send both tokens with no rate limits and ensure they succeed + src.TransferAmount[freeTokenIndex] = overLimitAmount + src.TransferAmount[limitedTokenIndex] = overLimitAmount + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err) + tc.lane.ValidateRequests() + + // Enable capacity limiting on the destination chain for the limited token + err = dest.AddRateLimitTokens([]*contracts.ERC20Token{limitedSrcToken}, []*contracts.ERC20Token{limitedDestToken}) + require.NoError(t, err, "Error setting destination rate limits") + err = dest.OffRamp.SetRateLimit(rateLimiterConfig) + require.NoError(t, err, "Error setting destination rate limits") + err = dest.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + tc.lane.Logger.Debug().Str("Token", limitedSrcToken.ContractAddress.Hex()).Msg("Enabled capacity limit on destination chain") + + // Send free token that should not have a rate limit and should succeed + src.TransferAmount[freeTokenIndex] = overLimitAmount + src.TransferAmount[limitedTokenIndex] = big.NewInt(0) + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err, "Free token transfer failed") + tc.lane.ValidateRequests() + tc.lane.Logger.Info().Str("Token", freeSrcToken.ContractAddress.Hex()).Msg("Free token transfer succeeded") + + // Send limited token with rate limit that should fail on the destination chain + src.TransferAmount[freeTokenIndex] = big.NewInt(0) + src.TransferAmount[limitedTokenIndex] = overLimitAmount + tc.lane.RecordStateBeforeTransfer() + err = tc.lane.SendRequests(1, big.NewInt(actions.DefaultDestinationGasLimit)) + require.NoError(t, err, "Failed to send rate limited token transfer") + + // We should see the ExecStateChanged phase fail on the OffRamp + tc.lane.ValidateRequests(actions.ExpectPhaseToFail(testreporters.ExecStateChanged)) + tc.lane.Logger.Info(). + Str("Token", limitedSrcToken.ContractAddress.Hex()). + Msg("Limited token transfer failed on destination chain (a good thing in this context)") + + // Manually execute the rate limited token transfer and expect a similar error + tc.lane.Logger.Info().Str("Wait Time", actions.DefaultPermissionlessExecThreshold.String()).Msg("Waiting for Exec Threshold to Expire") + time.Sleep(actions.DefaultPermissionlessExecThreshold) // Give time to exit the window + // See above comment on timeout + err = tc.lane.ExecuteManually(actions.WithConfirmationTimeout(time.Minute)) + require.Error(t, err, "There should be errors executing manually at this point") + tc.lane.Logger.Debug().Str("Error", err.Error()).Msg("Manually executed rate limited token transfer failed as expected") + + // Change limits to make it viable + err = dest.OffRamp.SetRateLimit(contracts.RateLimiterConfig{ + IsEnabled: true, + Capacity: new(big.Int).Mul(capacityLimit, big.NewInt(100)), + Rate: new(big.Int).Mul(capacityLimit, big.NewInt(100)), + }) + require.NoError(t, err, "Error setting destination rate limits") + err = dest.Common.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Execute again manually and expect a pass + err = tc.lane.ExecuteManually() + require.NoError(t, err, "Error manually executing transaction after rate limit is lifted") + }) + } + +} diff --git a/integration-tests/ccip-tests/testconfig/README.md b/integration-tests/ccip-tests/testconfig/README.md new file mode 100644 index 00000000000..c32aee3d913 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/README.md @@ -0,0 +1,700 @@ +# CCIP Configuration + +The CCIP configuration is used to specify the test configuration for running the CCIP integration tests. +The configuration is specified in a TOML file. The configuration is used to specify the test environment, test type, test parameters, and other necessary details for running the tests. +The test config is read in following order: +- The test reads the default configuration from [ccip-default.toml](./tomls/ccip-default.toml). +- The default can be overridden by specifying the test config in a separate file. + - The file content needs to be encoded in base64 format and set in `BASE64_CCIP_CONFIG_OVERRIDE` environment variable. + - The config mentioned in this file will override the default config. + - Example override file - [override.toml.example](./examples/override.toml.example) +- If there are sensitive details like private keys, credentials in test config, they can be specified in a separate secret file. + - The file content needs to be encoded in base64 format and set in `BASE64_CCIP_SECRETS_CONFIG` environment variable. + - The config mentioned in this file will override the default and override config. + - Example secret file - [secrets.toml.example](./examples/secrets.toml.example) + +## CCIP.ContractVersions +Specifies contract versions of different contracts to be referred by test. +Supported versions are: +- **PriceRegistry**: '1.2.0', 'Latest' +- **OffRamp**: '1.2.0', 'Latest' +- **OnRamp**: '1.2.0', 'Latest' +- **TokenPool**: '1.4.0', 'Latest' +- **CommitStore**: '1.2.0', 'Latest' + +Example Usage: +```toml +[CCIP.ContractVersions] +PriceRegistry = "1.2.0" +OffRamp = "1.2.0" +OnRamp = "1.2.0" +TokenPool = "1.4.0" +CommitStore = "1.2.0" +``` + +## CCIP.Deployments +CCIP Deployment contains all necessary contract addresses for various networks. This is mandatory if the test are to be run for [existing deployments](#ccipgroupstestgroupexistingdeployment) +The deployment data can be specified - + - Under `CCIP.Deployments.Data` field with value as stringify format of json. + - Under `CCIP.Deployments.DataFile` field with value as the path of the file containing the deployment data in json format. + +The json schema is specified in https://github.com/smartcontractkit/ccip/blob/ccip-develop/integration-tests/ccip-tests/contracts/laneconfig/parse_contracts.go#L96 + +Example Usage: +```toml +[CCIP.Deployments] +Data = """ +{ + "lane_configs": { + "Arbitrum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4", + "bridge_tokens": ["0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"], + "bridge_tokens_pools": ["0x82aF49947D8a07e3bd95BD0d56f35241523fBab1"], + "arm": "0xe06b0e8c4bd455153e8794ad7Ea8Ff5A14B64E4b", + "router": "0x141fa059441E0ca23ce184B6A78bafD2A517DdE8", + "price_registry": "0x13015e4E6f839E1Aa1016DF521ea458ecA20438c", + "wrapped_native": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", + "src_contracts": { + "Ethereum Mainnet": { + "on_ramp": "0xCe11020D56e5FDbfE46D9FC3021641FfbBB5AdEE", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Ethereum Mainnet": { + "off_ramp": "0x542ba1902044069330e8c5b36A84EC503863722f", + "commit_store": "0x060331fEdA35691e54876D957B4F9e3b8Cb47d20", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Ethereum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x514910771AF9Ca656af840dff83E8264EcF986CA", + "bridge_tokens": ["0x8B63b3DE93431C0f756A493644d128134291fA1b"], + "bridge_tokens_pools": ["0x8B63b3DE93431C0f756A493644d128134291fA1b"], + "arm": "0x8B63b3DE93431C0f756A493644d128134291fA1b", + "router": "0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D", + "price_registry": "0x8c9b2Efb7c64C394119270bfecE7f54763b958Ad", + "wrapped_native": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x925228D7B82d883Dde340A55Fe8e6dA56244A22C", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xeFC4a18af59398FF23bfe7325F2401aD44286F4d", + "commit_store": "0x9B2EEd6A1e16cB50Ed4c876D2dD69468B21b7749", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + } + } +} +""" +``` +Or +```toml +[CCIP.Deployments] +DataFile = '' +``` + +## CCIP.Env +Specifies the environment details for the test to be run on. +Mandatory fields are: +- **Networks**: [CCIP.Env.Networks](#ccipenvnetworks) +- **NewCLCluster**: [CCIP.Env.NewCLCluster](#ccipenvnewclcluster) - This is mandatory if the test needs to deploy Chainlink nodes. +- **ExistingCLCluster**: [CCIP.Env.ExistingCLCluster](#ccipenvexistingclcluster) - This is mandatory if the test needs to run on existing Chainlink nodes to deploy ccip jobs. + +Test needs network/chain details to be set through configuration. This configuration is mandatory for running the tests. +you have option to set the network details in two ways: +1. Using [CCIP.Env.Networks](#ccipenvnetworks) +2. Using a separate network config file - + * refer to the example - [network_config.toml.example](./examples/network_config.toml.example) + * once all necessary values are set, encode the toml file content in base64 format, + * set the base64'ed string content in `BASE64_NETWORK_CONFIG` environment variable. + +### CCIP.Env.Networks +Specifies the network details for the test to be run. +The NetworkConfig is imported from https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/config/network.go#L39 + +#### CCIP.Env.Networks.selected_networks +It denotes the network names in which tests will be run. These networks are used to deploy ccip contracts and set up lanes between them. +If more than 2 networks are specified, then lanes will be set up between all possible pairs of networks. + +For example , if `selected_networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3']`, it denotes that lanes will be set up between SIMULATED_1 and SIMULATED_2, SIMULATED_1 and SIMULATED_3, SIMULATED_2 and SIMULATED_3 +This behaviour can be varied based on [NoOfNetworks](#ccipgroupstestgroupnoofnetworks), [NetworkPairs](#ccipgroupstestgroupnetworkpairs), [MaxNoOfLanes](#ccipgroupstestgroupmaxnooflanes) fields in test config. + +The name of the networks are taken from [known_networks](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/networks/known_networks.go#L884) in chainlink-testing-framework +If the network is not present in known_networks, then the network details can be specified in the config file itself under the following `EVMNetworks` key. + +#### CCIP.Env.Network.EVMNetworks +Specifies the network config to be used while creating blockchain EVMClient for test. +It is a map of network name to EVMNetworks where key is network name specified under `CCIP.Env.Networks.selected_networks` and value is `EVMNetwork`. +The EVMNetwork is imported from [EVMNetwork](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/blockchain/config.go#L43) in chainlink-testing-framework. + +If `CCIP.Env.Network.EVMNetworks` config is not set for a network name specified under `CCIP.Env.Networks.selected_networks`, test will try to find the corresponding network config from defined networks in `MappedNetworks` under [known_networks.go](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/networks/known_networks.go) + +#### CCIP.Env.Network.AnvilConfigs +If the test needs to run on chains created using Anvil, then the AnvilConfigs can be specified. +It is a map of network name to `AnvilConfig` where key is network name specified under `CCIP.Env.Networks.selected_networks` and value is `AnvilConfig`. +The AnvilConfig is imported from [AnvilConfig](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/config/network.go#L20) in chainlink-testing-framework. + + +**The following network configs are required for tests running on live networks. It can be ignored if the tests are running on simulated networks.** +Refer to [secrets.toml.example](./examples/secrets.toml.example) for details. + +#### CCIP.ENV.Network.RpcHttpUrls +RpcHttpUrls is the RPC HTTP endpoints for each network, key is the network name as declared in selected_networks slice. + +#### CCIP.ENV.Network.RpcWsUrls +RpcWsUrls is the RPC WS endpoints for each network, key is the network name as declared in selected_networks slice. + +#### CCIP.ENV.Network.WalletKeys +WalletKeys is the private keys for each network, key is the network name as declared in selected_networks slice. + +Example Usage of Network Config: + +```toml +[CCIP.Env.Network] +selected_networks= ['PRIVATE-CHAIN-1', 'PRIVATE-CHAIN-2'] + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-1] +evm_name = 'private-chain-1' +evm_chain_id = 2337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 400 + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-2] +evm_name = 'private-chain-2' +evm_chain_id = 1337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 400 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-1] +block_time = 1 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-2] +block_time = 1 +``` + +### CCIP.Env.NewCLCluster +The NewCLCluster config holds the overall deployment configuration for Chainlink nodes. + +#### CCIP.Env.NewCLCluster.NoOfNodes +Specifies the number of Chainlink nodes to be deployed. + +#### CCIP.Env.NewCLCluster.Common +Specifies the common configuration for all Chainlink nodes if they share the same configuration. +##### Name: +Name of the node. +##### NeedsUpgrade: +Indicates if the node needs an upgrade during test. +##### ChainlinkImage: +Configuration for the Chainlink image. + +##### ChainlinkUpgradeImage: +Configuration for the Chainlink upgrade image. It is used when the node needs an upgrade. + +##### BaseConfigTOML: +String containing the base configuration toml content for the Chainlink node config. + +##### CommonChainConfigTOML: +String containing the common chain configuration toml content for all EVMNodes in chainlink node config. + +##### ChainConfigTOMLByChain: +String containing the chain-specific configuration toml content for individual EVMNodes in chainlink node config. This is keyed by chain ID. + +##### DBImage: +Database image for the Chainlink node. + +##### DBTag: +Database tag/version for the Chainlink node. + +#### CCIP.Env.NewCLCluster.Nodes +Specifies the configuration for individual nodes if they differ from the common configuration. The fields are the same as the common configuration. + +#### CCIP.Env.NewCLCluster.NodeMemory +Specifies the memory to be allocated for each Chainlink node. This is valid only if the deployment is on Kubernetes. + +#### CCIP.Env.NewCLCluster.NodeCPU +Specifies the CPU to be allocated for each Chainlink node. This is valid only if the deployment is on Kubernetes. + +#### CCIP.Env.NewCLCluster.DBMemory +Specifies the memory to be allocated for the database. This is valid only if the deployment is on Kubernetes. + +#### CCIP.Env.NewCLCluster.DBCPU +Specifies the CPU to be allocated for the database. This is valid only if the deployment is on Kubernetes. + +#### CCIP.Env.NewCLCluster.IsStateful +Specifies whether the deployment is StatefulSet on Kubernetes. + +#### CCIP.Env.NewCLCluster.DBStorageClass +Specifies the storage class for the database. This is valid only if the deployment is StatefulSet on Kubernetes. + +#### CCIP.Env.NewCLCluster.DBCapacity +Specifies the capacity of the database. This is valid only if the deployment is StatefulSet on Kubernetes. + +#### CCIP.Env.NewCLCluster.PromPgExporter +Specifies whether to enable Prometheus PostgreSQL exporter. This is valid only if the deployment is on Kubernetes. + +#### CCIP.Env.NewCLCluster.DBArgs +Specifies the arguments to be passed to the database. This is valid only if the deployment is on Kubernetes. + +Example Usage: +```toml +[CCIP.Env.NewCLCluster] +NoOfNodes = 17 +NodeMemory = '12Gi' +NodeCPU = '6' +DBMemory = '10Gi' +DBCPU = '2' +DBStorageClass = 'gp3' +PromPgExporter = true +DBCapacity = '50Gi' +IsStateful = true +DBArgs = ['shared_buffers=2048MB', 'effective_cache_size=4096MB', 'work_mem=64MB'] + +[CCIP.Env.NewCLCluster.Common] +Name = 'node1' +DBImage = 'postgres' +DBTag = '13.12' +CommonChainConfigTOML = """ +[HeadTracker] +HistoryDepth = 400 + +[GasEstimator] +PriceMax = '200 gwei' +LimitDefault = 6000000 +FeeCapDefault = '200 gwei' +""" +``` + +### CCIP.Env.ExistingCLCluster +The ExistingCLCluster config holds the overall connection configuration for existing Chainlink nodes. +It is needed when the tests are to be run on Chainlink nodes already deployed on some environment. +If this is specified, test will not need to connect to k8 namespace using [CCIP.Env.EnvToConnect](#ccipenvenvtoconnect) . +Test can directly connect to the existing Chainlink nodes using node credentials without knowing the k8 namespace details. + +#### CCIP.Env.ExistingCLCluster.Name +Specifies the name of the existing Chainlink cluster. This is used to identify the cluster in the test. + +#### CCIP.Env.ExistingCLCluster.NoOfNodes +Specifies the number of Chainlink nodes in the existing cluster. + +#### CCIP.Env.ExistingCLCluster.NodeConfigs +Specifies the configuration for individual nodes in the existing cluster. Each node config contains the following fields to connect to the Chainlink node: +##### CCIP.Env.ExistingCLCluster.NodeConfigs.URL +The URL of the Chainlink node. +##### CCIP.Env.ExistingCLCluster.NodeConfigs.Email +The username/email of the Chainlink node credential. +##### CCIP.Env.ExistingCLCluster.NodeConfigs.Password +The password of the Chainlink node credential. +##### CCIP.Env.ExistingCLCluster.NodeConfigs.InternalIP +The internal IP of the Chainlink node. + +Example Usage: +```toml +[CCIP.Env.ExistingCLCluster] +Name = 'crib-sample' +NoOfNodes = 5 + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-sample-demo-node1.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-1' + + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-sample-demo-node2.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-2' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-sample-demo-node3.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-3' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node4.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-4' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-sample-demo-node5.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-5' +``` + +### CCIP.Env.EnvToConnect +This is specified when the test needs to connect to already existing k8s namespace. User needs to have access to the k8 namespace to run the tests through specific kubeconfig file. +Example usage: +```toml +[CCIP.Env] +EnvToConnect="load-ccip-c8972" +``` +### CCIP.ENV.TTL +Specifies the time to live for the k8 namespace. This is used to terminate the namespace after the tests are run. This is only valid if the tests are run on k8s. +Example usage: +```toml +[CCIP.Env] +TTL = "11h" +``` + +### CCIP.Env.Logging +Specifies the logging configuration for the test. Imported from [LoggingConfig](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/config/logging.go#L11) in chainlink-testing-framework. +Example usage: +```toml +[CCIP.Env.Logging] +test_log_collect = false # if set to true will save logs even if test did not fail + +[CCIP.Env.Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets = ["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout = "10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit = 10 + +[CCIP.Env.Logging.Loki] +tenant_id = "..." +endpoint = "https://loki...." + +[CCIP.Env.Logging.Grafana] +base_url = "https://grafana..../" +dashboard_url = "/d/6vjVx-1V8/ccip-long-running-tests" +``` + +### CCIP.Env.Lane.LeaderLaneEnabled +Specifies whether to enable the leader lane feature. This setting is only applicable for new deployments. + +## CCIP.Groups +Specifies the test config specific to each test type. Available test types are: +- **CCIP.Groups.load** +- **CCIP.Groups.smoke** +- **CCIP.Groups.chaos** + +### CCIP.Groups.[testgroup].KeepEnvAlive +Specifies whether to keep the k8 namespace alive after the test is run. This is only valid if the tests are run on k8s. + +### CCIP.Groups.[testgroup].BiDirectionalLane +Specifies whether to set up bi-directional lanes between networks. + +### CCIP.Groups.[testgroup].CommitAndExecuteOnSameDON +Specifies whether commit and execution jobs are to be run on the same Chainlink node. + +### CCIP.Groups.[testgroup].NoOfCommitNodes +Specifies the number of nodes on which commit jobs are to be run. This needs to be lesser than the total number of nodes mentioned in [CCIP.Env.NewCLCluster.NoOfNodes](#ccipenvnewclclusternoofnodes) or [CCIP.Env.ExistingCLCluster.NoOfNodes](#ccipenvexistingclclusternoofnodes). +If the value of total nodes is `n`, then the max value of NoOfCommitNodes should be less than `n-1`. As the first node is used for bootstrap job. +If the NoOfCommitNodes is lesser than `n-1`, then the remaining nodes are used for execution jobs if `CCIP.Groups.[testgroup].CommitAndExecuteOnSameDON` is set to false. + +### CCIP.Groups.[testgroup].TokenConfig +Specifies the token configuration for the test. The token configuration is used to set up tokens and token pools for all chains. + +#### CCIP.Groups.[testgroup].TokenConfig.NoOfTokensPerChain +Specifies the number of tokens to be set up for each chain. + +#### CCIP.Groups.[testgroup].TokenConfig.WithPipeline +Specifies whether to set up token pipelines in commit jobspec. If set to false, the token prices will be set with DynamicPriceGetterConfig. + +#### CCIP.Groups.[testgroup].TokenConfig.TimeoutForPriceUpdate +Specifies the timeout to wait for token and gas price updates to be available in price registry for each chain. + +#### CCIP.Groups.[testgroup].TokenConfig.NoOfTokensWithDynamicPrice +Specifies the number of tokens to be set up with dynamic price update. The rest of the tokens will be set up with static price. This is only valid if [WithPipeline](#ccipgroupstestgrouptokenconfigwithpipeline) is set to false. + +#### CCIP.Groups.[testgroup].TokenConfig.DynamicPriceUpdateInterval +Specifies the interval for dynamic price update for tokens. This is only valid if [NoOfTokensWithDynamicPrice](#ccipgroupstestgrouptokenconfignooftokenswithdynamicprice) is set to value greater tha zero. + +#### CCIP.Groups.[testgroup].TokenConfig.CCIPOwnerTokens +Specifies the tokens to be owned by the CCIP owner. If this is false, the tokens and pools will be owned by an address other than rest of CCIP contract admin addresses. +This is applicable only if the contract versions are '1.5' or higher. + +Example Usage: +```toml + +[CCIP.Groups.load.TokenConfig] +TimeoutForPriceUpdate = '15m' +NoOfTokensPerChain = 60 +NoOfTokensWithDynamicPrice = 15 +DynamicPriceUpdateInterval ='15s' +CCIPOwnerTokens = true +``` + +### CCIP.Groups.[testgroup].MsgDetails +Specifies the ccip message details to be sent by the test. +#### CCIP.Groups.[testgroup].MsgDetails.MsgType +Specifies the type of message to be sent. The supported message types are: +- **Token** +- **Data** +- **DataWithToken** + +#### CCIP.Groups.[testgroup].MsgDetails.DestGasLimit +Specifies the gas limit for the destination chain. This is used to in `ExtraArgs` field of CCIPMessage. Change this to 0, if you are doing ccip-send to an EOA in the destination chain. + +#### CCIP.Groups.[testgroup].MsgDetails.DataLength +Specifies the length of data to be sent in the message. This is only valid if [MsgType](#ccipgroupstestgroupmsgdetailsmsgtype) is set to 'Data' or 'DataWithToken'. + +#### CCIP.Groups.[testgroup].MsgDetails.NoOfTokens +Specifies the number of tokens to be sent in the message. This is only valid if [MsgType](#ccipgroupstestgroupmsgdetailsmsgtype) is set to 'Token' or 'DataWithToken'. +It needs to be less than or equal to [NoOfTokensPerChain](#ccipgroupstestgrouptokenconfignooftokensperchain) specified in the test config. + +#### CCIP.Groups.[testgroup].MsgDetails.TokenAmount +Specifies the amount for each token to be sent in the message. This is only valid if [MsgType](#ccipgroupstestgroupmsgdetailsmsgtype) is set to 'Token' or 'DataWithToken'. + +Example Usage: +```toml +[CCIP.Groups.smoke.MsgDetails] +MsgType = 'DataWithToken' +DestGasLimit = 100000 +DataLength = 1000 +NoOfTokens = 2 +AmountPerToken = 1 +``` + +### CCIP.Groups.[testgroup].MulticallInOneTx +Specifies whether to send multiple ccip messages in a single transaction. + +### CCIP.Groups.[testgroup].NoOfSendsInMulticall +Specifies the number of ccip messages to be sent in a single transaction. This is only valid if [MulticallInOneTx](#ccipgroupstestgroupmulticallinonetx) is set to true. + +### CCIP.Groups.[testgroup].PhaseTimeout +The test validates various events in a ccip request lifecycle, like commit, execute, etc. This field specifies the timeout for each phase in the lifecycle. +The timeout is calculated from the time the previous phase event is received. +The following contract events are validated: +- **CCIPSendRequested on OnRamp** +- **CCIPSendRequested event log to be Finalized** +- **ReportAccepted on CommitStore** +- **TaggedRootBlessed on ARM/RMN** +- **ExecutionStateChanged on OffRamp** + +### CCIP.Groups.[testgroup].LocalCluster +Specifies whether the test is to be run on a local docker. If set to true, the test environment will be set up on a local docker. + +### CCIP.Groups.[testgroup].ExistingDeployment +Specifies whether the test is to be run on existing deployments. If set to true, the test will use the deployment data specified in [CCIP.Deployments](#ccipdeployments) for interacting with the ccip contracts. +If the deployment data does not contain the required contract addresses, the test will fail. + +### CCIP.Groups.[testgroup].ReuseContracts +Test loads contract/lane config from [contracts.json](../contracts/laneconfig/contracts.json) if no lane config is specified in [CCIP.Deployments](#ccipdeployments) +If a certain contract is present in the contracts.json, the test will use the contract address from the contracts.json. +This field specifies whether to reuse the contracts from [contracts.json](../contracts/laneconfig/contracts.json) +For example if the contracts.json contains the contract address for PriceRegistry for `Arbitrum Mainnet`, the test by default will use the contract address from contracts.json instead of redeploying the contract. +If `ReuseContracts` is set to false, the test will redeploy the contract instead of using the contract address from contracts.json. + +### CCIP.Groups.[testgroup].NodeFunding +Specified the native token funding for each Chainlink node. It assumes that the native token decimals is 18. +The funding is done by the private key specified in [CCIP.Env.Networks](#ccipenvnetworks) for each network. +The funding is done only if the test is run on local docker or k8s. This is not applicable for [existing deployments](#ccipgroupstestgroupexistingdeployment) is set to true. + +### CCIP.Groups.[testgroup].NetworkPairs +Specifies the network pairs for which the test is to be run. The test will set up lanes only between the specified network pairs. +If the network pairs are not specified, the test will set up lanes between all possible pairs of networks mentioned in selected_networks in [CCIP.Env.Networks](#ccipenvnetworksselectednetworks) + +### CCIP.Groups.[testgroup].NoOfNetworks +Specifies the number of networks to be used for the test. +If the number of networks is greater than the total number of networks specified in [CCIP.Env.Networks.selected_networks](#ccipenvnetworksselectednetworks) : +- the test will fail if the networks are live networks. +- the test will create equal number of replicas of the first network with a new chain id if the networks are simulated networks. + For example, if the `selected_networks` is ['SIMULATED_1','SIMULATED_2'] and `NoOfNetworks` is 3, the test will create 1 more network config by copying the network config of `SIMULATED_1` with a different chain id and use that as 3rd network. + +### CCIP.Groups.[testgroup].NoOfRoutersPerPair +Specifies the number of routers to be set up for each network. + +### CCIP.Groups.[testgroup].MaxNoOfLanes +Specifies the maximum number of lanes to be set up between networks. If this values is not set, the test will set up lanes between all possible pairs of networks mentioned in `selected_networks` in [CCIP.Env.Networks](#ccipenvnetworksselectednetworks). +For example, if `selected_networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3']`, and `MaxNoOfLanes` is set to 2, it denotes that the test will randomly select the 2 lanes between all possible pairs `SIMULATED_1`, `SIMULATED_2`, and `SIMULATED_3` for the test run. + +### CCIP.Groups.[testgroup].DenselyConnectedNetworkChainIds +This is applicable only if [MaxNoOfLanes](#ccipgroupstestgroupmaxnooflanes) is specified. +Specifies the chain ids for networks to be densely connected. If this is provided the test will include all possible pairs of networks mentioned in `DenselyConnectedNetworkChainIds`. +The rest of the networks will be connected randomly based on the value of `MaxNoOfLanes`. + +### CCIP.Groups.[testgroup].ChaosDuration +Specifies the duration for which the chaos experiment is to be run. This is only valid if the test type is 'chaos'. + +### CCIP.Groups.[testgroup].USDCMockDeployment +Specifies whether to deploy USDC mock contract for the test. This is only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +The following fields are used for various parameters in OCR2 commit and execution jobspecs. All of these are only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). +### CCIP.Groups.[testgroup].CommitOCRParams +Specifies the OCR parameters for the commit job. This is only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +### CCIP.Groups.[testgroup].ExecuteOCRParams +Specifies the OCR parameters for the execute job. This is only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +### CCIP.Groups.[testgroup].CommitInflightExpiry +Specifies the value for the `InflightExpiry` in commit job's offchain config. This is only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +### CCIP.Groups.[testgroup].OffRampConfig +Specifies the offramp configuration for the execution job. This is only valid if the test is not run on [existing deployments](#ccipgroupstestgroupexistingdeployment). +This is used to set values for following fields in execution jobspec's offchain and onchain config: +- **OffRampConfig.MaxDataBytes** +- **OffRampConfig.BatchGasLimit** +- **OffRampConfig.InflightExpiry** +- **OffRampConfig.RootSnooze** + +Example Usage: +```toml +[CCIP.Groups.load] +CommitInflightExpiry = '5m' + +[CCIP.Groups.load.CommitOCRParams] +DeltaProgress = '2m' +DeltaResend = '5s' +DeltaRound = '75s' +DeltaGrace = '5s' +MaxDurationQuery = '100ms' +MaxDurationObservation = '35s' +MaxDurationReport = '10s' +MaxDurationShouldAcceptFinalizedReport = '5s' +MaxDurationShouldTransmitAcceptedReport = '10s' + +[CCIP.Groups.load.ExecOCRParams] +DeltaProgress = '2m' +DeltaResend = '5s' +DeltaRound = '75s' +DeltaGrace = '5s' +MaxDurationQuery = '100ms' +MaxDurationObservation = '35s' +MaxDurationReport = '10s' +MaxDurationShouldAcceptFinalizedReport = '5s' +MaxDurationShouldTransmitAcceptedReport = '10s' + +[CCIP.Groups.load.OffRampConfig] +BatchGasLimit = 11000000 +MaxDataBytes = 1000 +InflightExpiry = '5m' +RootSnooze = '5m' + +``` + +### CCIP.Groups.[testgroup].StoreLaneConfig +This is only valid if the tests are run on remote runners in k8s. If set to true, the test will store the lane config in the remote runner. + +### CCIP.Groups.[testgroup].LoadProfile +Specifies the load profile for the test. Only valid if the testgroup is 'load'. + +### CCIP.Groups.[testgroup].LoadProfile.LoadFrequency.[DestNetworkName] + +#### CCIP.Groups.[testgroup].LoadProfile.RequestPerUnitTime +Specifies the number of requests to be sent per unit time. This is applicable to all networks if [LoadFrequency](#ccipgroupstestgrouploadprofileloadfrequencydestnetworkname) is not specified for a destination network. + +#### CCIP.Groups.[testgroup].LoadProfile.TimeUnit +Specifies the unit of time for the load profile. This is applicable to all networks if [LoadFrequency](#ccipgroupstestgrouploadprofileloadfrequencydestnetworkname) is not specified for a destination network. + +#### CCIP.Groups.[testgroup].LoadProfile.StepDuration +Specifies the duration for each step in the load profile. This is applicable to all networks if [LoadFrequency](#ccipgroupstestgrouploadprofileloadfrequencydestnetworkname) is not specified for a destination network. + +#### CCIP.Groups.[testgroup].LoadProfile.TestDuration +Specifies the total duration for the load test. + +#### CCIP.Groups.[testgroup].LoadProfile.NetworkChaosDelay +Specifies the duration network delay used for `NetworkChaos` experiment. This is only valid if the test is run on k8s and not on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +#### CCIP.Groups.[testgroup].LoadProfile.WaitBetweenChaosDuringLoad +If there are multiple chaos experiments, this specifies the duration to wait between each chaos experiment. This is only valid if the test is run on k8s and not on [existing deployments](#ccipgroupstestgroupexistingdeployment). + +#### CCIP.Groups.[testgroup].LoadProfile.SkipRequestIfAnotherRequestTriggeredWithin +If a request is triggered within this duration, the test will skip sending another request during load run. For Example, if `SkipRequestIfAnotherRequestTriggeredWithin` is set to `40m`, and a request is triggered at 0th second, the test will skip sending another request for another 40m. +This particular field is used to avoid sending multiple requests in a short duration during load run. + +#### CCIP.Groups.[testgroup].LoadProfile.OptimizeSpace +This is used internally to optimize memory usage during load run. If set to true, after the initial lane set up is over the test will discard the lane config to save memory. +The test will only store contract addresses strictly necessary to trigger/validate ccip-send requests. +Except for following contracts all other contract addresses will be discarded after the initial lane set up - +- Router +- ARM +- CommitStore +- OffRamp +- OnRamp + +#### CCIP.Groups.[testgroup].LoadProfile.FailOnFirstErrorInLoad +If set to true, the test will fail on the first error encountered during load run. If set to false, the test will continue to run even if there are errors during load run. + +#### CCIP.Groups.[testgroup].LoadProfile.SendMaxDataInEveryMsgCount +Specifies the number of requests to send with maximum data in every mentioned count iteration. +For example, if `SendMaxDataInEveryMsgCount` is set to 5, the test will send ccip message with max allowable data length(as set in onRamp config) in every 5th request. + +#### CCIP.Groups.[testgroup].LoadProfile.TestRunName +Specifies the name of the test run. This is used to identify the test run in CCIP test dashboard or logs. If multiple tests are run with same `TestRunName`, the test results will be aggregated under the same test run in grafana dashboard. +This is used when multiple iterations of tests are run against same release version to aggregate the results under same dashboard view. + +#### CCIP.Groups.[testgroup].LoadProfile.MsgProfile +Specifies the message profile for the test. The message profile is used to set up multiple ccip message details during load test. + +##### CCIP.Groups.[testgroup].LoadProfile.MsgProfile.Frequencies +Specifies the frequency of each message profile. +For example, if `Frequencies` is set to [1, 2, 3], the test will send 1st message profile 1 time, 2nd message profile 2 times, and 3rd message profile 3 times in each iteration. Each iteration will be defined by (1+2+3) = 6 requests. +Example Breakdown: +- Frequencies = [4, 12, 3, 1] +- Total Sum of Frequencies = 4 + 12 + 3 + 1 = 20 +- Percentages: + - Message Type 1: (4 / 20) * 100% = 20% + - Message Type 2: (12 / 20) * 100% = 60% + - Message Type 3: (3 / 20) * 100% = 15% + - Message Type 4: (1 / 20) * 100% = 5% + These percentages reflect how often each message type should appear in the total set of messages. + Please note - if the total set of messages is not equal to the multiple of sum of frequencies, the percentages will not be accurate. + +##### CCIP.Groups.[testgroup].LoadProfile.MsgProfile.MsgDetails +Specifies the message details for each message profile. The fields are the same as [CCIP.Groups.[testgroup].MsgDetails](#ccipgroupstestgroupmsgdetails). + +example usage: +```toml +# to represent 20%, 60%, 15%, 5% of the total messages +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [4,12,3,1] + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Token' +DestGasLimit = 0 +DataLength = 0 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' +DestGasLimit = 500000 +DataLength = 5000 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 800000 +DataLength = 10000 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 2500000 +DataLength = 10000 +``` \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/ccip.go b/integration-tests/ccip-tests/testconfig/ccip.go new file mode 100644 index 00000000000..60d7055cb31 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/ccip.go @@ -0,0 +1,416 @@ +package testconfig + +import ( + "fmt" + "math/big" + "os" + + "github.com/AlekSi/pointer" + "github.com/pelletier/go-toml/v2" + "github.com/rs/zerolog" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + ctfconfig "github.com/smartcontractkit/chainlink-testing-framework/config" + ctfK8config "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" + + ccipcontracts "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" + testutils "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/contracts" +) + +const ( + CONTRACTS_OVERRIDE_CONFIG string = "BASE64_CCIP_CONFIG_OVERRIDE_CONTRACTS" + TokenOnlyTransfer string = "Token" + DataOnlyTransfer string = "Data" + DataAndTokenTransfer string = "DataWithToken" +) + +type OffRampConfig struct { + MaxDataBytes *uint32 `toml:",omitempty"` + BatchGasLimit *uint32 `toml:",omitempty"` + InflightExpiry *config.Duration `toml:",omitempty"` + RootSnooze *config.Duration `toml:",omitempty"` +} + +type MsgDetails struct { + MsgType *string `toml:",omitempty"` + DestGasLimit *int64 `toml:",omitempty"` + DataLength *int64 `toml:",omitempty"` + NoOfTokens *int `toml:",omitempty"` + AmountPerToken *int64 `toml:",omitempty"` +} + +func (m *MsgDetails) IsTokenTransfer() bool { + return pointer.GetString(m.MsgType) == "Token" || pointer.GetString(m.MsgType) == "DataWithToken" +} + +func (m *MsgDetails) IsDataTransfer() bool { + return pointer.GetString(m.MsgType) == "Data" || pointer.GetString(m.MsgType) == "DataWithToken" +} + +func (m *MsgDetails) TransferAmounts() []*big.Int { + var transferAmounts []*big.Int + if m.IsTokenTransfer() { + for i := 0; i < pointer.GetInt(m.NoOfTokens); i++ { + transferAmounts = append(transferAmounts, big.NewInt(pointer.GetInt64(m.AmountPerToken))) + } + } + return transferAmounts +} + +func (m *MsgDetails) Validate() error { + if m == nil { + return fmt.Errorf("msg details should be set") + } + if m.MsgType == nil { + return fmt.Errorf("msg type should be set") + } + if m.IsDataTransfer() { + if m.DataLength == nil || *m.DataLength == 0 { + return fmt.Errorf("data length should be set and greater than 0") + } + } + if m.DestGasLimit == nil { + return fmt.Errorf("destination gas limit should be set") + } + if pointer.GetString(m.MsgType) != DataOnlyTransfer && + pointer.GetString(m.MsgType) != TokenOnlyTransfer && + pointer.GetString(m.MsgType) != DataAndTokenTransfer { + return fmt.Errorf("msg type should be - %s/%s/%s", DataOnlyTransfer, TokenOnlyTransfer, DataAndTokenTransfer) + } + + if m.IsTokenTransfer() { + if pointer.GetInt64(m.AmountPerToken) == 0 { + return fmt.Errorf("token amount should be greater than 0") + } + + if pointer.GetInt(m.NoOfTokens) == 0 { + return fmt.Errorf("number of tokens in msg should be greater than 0") + } + } + + return nil +} + +// TokenConfig defines the configuration for tokens in a CCIP test group +type TokenConfig struct { + NoOfTokensPerChain *int `toml:",omitempty"` + WithPipeline *bool `toml:",omitempty"` + TimeoutForPriceUpdate *config.Duration `toml:",omitempty"` + NoOfTokensWithDynamicPrice *int `toml:",omitempty"` + DynamicPriceUpdateInterval *config.Duration `toml:",omitempty"` + // CCIPOwnerTokens dictates if tokens are deployed and controlled by the default CCIP owner account + // By default, all tokens are deployed and owned by a separate address + CCIPOwnerTokens *bool `toml:",omitempty"` +} + +func (tc *TokenConfig) IsDynamicPriceUpdate() bool { + return tc.NoOfTokensWithDynamicPrice != nil && *tc.NoOfTokensWithDynamicPrice > 0 +} + +func (tc *TokenConfig) IsPipelineSpec() bool { + return pointer.GetBool(tc.WithPipeline) +} + +func (tc *TokenConfig) Validate() error { + if tc == nil { + return fmt.Errorf("token config should be set") + } + if tc.TimeoutForPriceUpdate == nil || tc.TimeoutForPriceUpdate.Duration().Minutes() == 0 { + return fmt.Errorf("timeout for price update should be set") + } + if tc.NoOfTokensWithDynamicPrice != nil && *tc.NoOfTokensWithDynamicPrice > 0 { + if tc.DynamicPriceUpdateInterval == nil || tc.DynamicPriceUpdateInterval.Duration().Minutes() == 0 { + return fmt.Errorf("dynamic price update interval should be set if NoOfTokensWithDynamicPrice is greater than 0") + } + } + return nil +} + +type MsgProfile struct { + MsgDetails *[]*MsgDetails `toml:",omitempty"` + Frequencies []int `toml:",omitempty"` + matrixByFreq []int + mapMsgDetails map[int]*MsgDetails +} + +// msgDetailsIndexMatrixByFrequency creates a matrix of msg details index based on their frequency +// This matrix is used to select a msg detail based on the iteration number +// For example, if we have 3 msg details (msg1,msg2,msg3) with frequencies 2, 3, 5 respectively, +// the matrixByFreq will be [0,0,1,1,1,2,2,2,2,2] +// and mapMsgDetails will be {0:msg1, 1:msg2, 2:msg3} +// So, for iteration 0, msg1 will be selected, for iteration 1, msg1 will be selected, for iteration 2, msg2 will be selected and so on +// This is useful to select a msg detail based on the iteration number +func (m *MsgProfile) msgDetailsIndexMatrixByFrequency() { + m.mapMsgDetails = make(map[int]*MsgDetails) + for i, msg := range *m.MsgDetails { + m.mapMsgDetails[i] = msg + } + m.matrixByFreq = make([]int, 0) + for i, freq := range m.Frequencies { + for j := 0; j < freq; j++ { + m.matrixByFreq = append(m.matrixByFreq, i) + } + } + // we do not need frequencies and msg details after creating the matrix + m.Frequencies = nil + m.MsgDetails = nil +} + +// MsgDetailsForIteration returns the msg details for the given iteration +// The iteration is used to select the msg details based on their frequency +// Refer to msgDetailsIndexMatrixByFrequency for more details +// If the iteration is greater than the number of matrixByFreq, it will loop back to the first msg detail +// if the final iteration in a load run is lesser than the number of matrixByFreq, there is a chance that some of the msg details might not be selected +func (m *MsgProfile) MsgDetailsForIteration(it int64) *MsgDetails { + index := (it - 1) % int64(len(m.matrixByFreq)) + return m.mapMsgDetails[m.matrixByFreq[index]] +} + +// MsgDetailWithMaxToken returns the msg details with the max no of tokens in the msg profile +func (m *MsgProfile) MsgDetailWithMaxToken() *MsgDetails { + allDetails := *m.MsgDetails + msgDetails := allDetails[0] + for _, msg := range allDetails { + if msg.NoOfTokens != nil && pointer.GetInt(msg.NoOfTokens) > pointer.GetInt(msgDetails.NoOfTokens) { + msgDetails = msg + } + } + return msgDetails +} + +func (m *MsgProfile) Validate() error { + if m == nil { + return fmt.Errorf("msg profile should be set") + } + if m.MsgDetails == nil { + return fmt.Errorf("msg details should be set") + } + allDetails := *m.MsgDetails + if len(allDetails) == 0 { + return fmt.Errorf("msg details should be set") + } + if len(m.Frequencies) == 0 { + return fmt.Errorf("frequencies should be set") + } + if len(allDetails) != len(m.Frequencies) { + return fmt.Errorf("number of msg details %d and frequencies %d should be same", len(allDetails), len(m.Frequencies)) + } + for _, msg := range allDetails { + if err := msg.Validate(); err != nil { + return err + } + } + return nil +} + +type LoadFrequency struct { + RequestPerUnitTime []int64 `toml:",omitempty"` + TimeUnit *config.Duration `toml:",omitempty"` + StepDuration []*config.Duration `toml:",omitempty"` +} + +type LoadProfile struct { + MsgProfile *MsgProfile `toml:",omitempty"` + FrequencyByDestination map[string]*LoadFrequency `toml:",omitempty"` + RequestPerUnitTime []int64 `toml:",omitempty"` + TimeUnit *config.Duration `toml:",omitempty"` + StepDuration []*config.Duration `toml:",omitempty"` + TestDuration *config.Duration `toml:",omitempty"` + NetworkChaosDelay *config.Duration `toml:",omitempty"` + WaitBetweenChaosDuringLoad *config.Duration `toml:",omitempty"` + SkipRequestIfAnotherRequestTriggeredWithin *config.Duration `toml:",omitempty"` + OptimizeSpace *bool `toml:",omitempty"` + FailOnFirstErrorInLoad *bool `toml:",omitempty"` + SendMaxDataInEveryMsgCount *int64 `toml:",omitempty"` + TestRunName string `toml:",omitempty"` +} + +func (l *LoadProfile) Validate() error { + if l == nil { + return fmt.Errorf("load profile should be set") + } + if err := l.MsgProfile.Validate(); err != nil { + return err + } + if len(l.RequestPerUnitTime) == 0 { + return fmt.Errorf("request per unit time should be set") + } + if l.TimeUnit == nil || l.TimeUnit.Duration().Minutes() == 0 { + return fmt.Errorf("time unit should be set") + } + if l.TestDuration == nil || l.TestDuration.Duration().Minutes() == 0 { + return fmt.Errorf("test duration should be set") + } + return nil +} + +func (l *LoadProfile) SetTestRunName(name string) { + if l.TestRunName == "" && name != "" { + l.TestRunName = name + } +} + +// CCIPTestGroupConfig defines configuration input to change how a particular CCIP test group should run +type CCIPTestGroupConfig struct { + Type string `toml:",omitempty"` + KeepEnvAlive *bool `toml:",omitempty"` + BiDirectionalLane *bool `toml:",omitempty"` + CommitAndExecuteOnSameDON *bool `toml:",omitempty"` + NoOfCommitNodes int `toml:",omitempty"` + MsgDetails *MsgDetails `toml:",omitempty"` + TokenConfig *TokenConfig `toml:",omitempty"` + MulticallInOneTx *bool `toml:",omitempty"` + NoOfSendsInMulticall int `toml:",omitempty"` + PhaseTimeout *config.Duration `toml:",omitempty"` + LocalCluster *bool `toml:",omitempty"` + ExistingDeployment *bool `toml:",omitempty"` + ReuseContracts *bool `toml:",omitempty"` + NodeFunding float64 `toml:",omitempty"` + NetworkPairs []string `toml:",omitempty"` + DenselyConnectedNetworkChainIds []string `toml:",omitempty"` + NoOfNetworks int `toml:",omitempty"` + NoOfRoutersPerPair int `toml:",omitempty"` + MaxNoOfLanes int `toml:",omitempty"` + ChaosDuration *config.Duration `toml:",omitempty"` + USDCMockDeployment *bool `toml:",omitempty"` + CommitOCRParams *contracts.OffChainAggregatorV2Config `toml:",omitempty"` + ExecOCRParams *contracts.OffChainAggregatorV2Config `toml:",omitempty"` + OffRampConfig *OffRampConfig `toml:",omitempty"` + CommitInflightExpiry *config.Duration `toml:",omitempty"` + StoreLaneConfig *bool `toml:",omitempty"` + LoadProfile *LoadProfile `toml:",omitempty"` +} + +func (c *CCIPTestGroupConfig) Validate() error { + if c.Type == Load { + if err := c.LoadProfile.Validate(); err != nil { + return err + } + if c.MsgDetails == nil { + c.MsgDetails = c.LoadProfile.MsgProfile.MsgDetailWithMaxToken() + } + c.LoadProfile.MsgProfile.msgDetailsIndexMatrixByFrequency() + if c.ExistingDeployment != nil && *c.ExistingDeployment { + if c.LoadProfile.TestRunName == "" && os.Getenv(ctfK8config.EnvVarJobImage) != "" { + return fmt.Errorf("test run name should be set if existing deployment is true and test is running in k8s") + } + } + } + err := c.MsgDetails.Validate() + if err != nil { + return err + } + if c.PhaseTimeout != nil && (c.PhaseTimeout.Duration().Minutes() < 1 || c.PhaseTimeout.Duration().Minutes() > 50) { + return fmt.Errorf("phase timeout should be between 1 and 50 minutes") + } + + if c.NoOfCommitNodes < 4 { + return fmt.Errorf("insuffcient number of commit nodes provided") + } + if err := c.TokenConfig.Validate(); err != nil { + return err + } + + if c.MsgDetails.IsTokenTransfer() { + if pointer.GetInt(c.TokenConfig.NoOfTokensPerChain) == 0 { + return fmt.Errorf("number of tokens per chain should be greater than 0") + } + } + if c.MulticallInOneTx != nil { + if c.NoOfSendsInMulticall == 0 { + return fmt.Errorf("number of sends in multisend should be greater than 0 if multisend is true") + } + } + + return nil +} + +type CCIPContractConfig struct { + DataFile *string `toml:",omitempty"` + Data string `toml:",omitempty"` +} + +func (c *CCIPContractConfig) DataFilePath() string { + return pointer.GetString(c.DataFile) +} + +// ContractsData reads the contract config passed in TOML +// CCIPContractConfig can accept contract config in string mentioned in Data field +// It also accepts DataFile. Data takes precedence over DataFile +// If you are providing contract config in DataFile, this will read the content of the file +// and set it to CONTRACTS_OVERRIDE_CONFIG env var in base 64 encoded format. +// This comes handy while running tests in remote runner. It ensures that you won't have to deal with copying the +// DataFile to remote runner pod. Instead, you can pass the base64ed content of the file with the help of +// an env var. +func (c *CCIPContractConfig) ContractsData() ([]byte, error) { + // check if CONTRACTS_OVERRIDE_CONFIG is provided + // load config from env var if specified for contracts + rawConfig := os.Getenv(CONTRACTS_OVERRIDE_CONFIG) + if rawConfig != "" { + err := DecodeConfig(rawConfig, &c) + if err != nil { + return nil, err + } + } + if c == nil { + return nil, nil + } + if c.Data != "" { + return []byte(c.Data), nil + } + // if DataFilePath is given, update c.Data with the content of file so that we can set CONTRACTS_OVERRIDE_CONFIG + // to pass the file content to remote runner with override config var + if c.DataFilePath() != "" { + // if there is regex provided in filepath, reformat the filepath with actual filepath matching the regex + filePath, err := testutils.FirstFileFromMatchingPath(c.DataFilePath()) + if err != nil { + return nil, fmt.Errorf("error finding contract config file %s: %w", c.DataFilePath(), err) + } + dataContent, err := os.ReadFile(filePath) + if err != nil { + return dataContent, fmt.Errorf("error reading contract config file %s : %w", filePath, err) + } + c.Data = string(dataContent) + // encode it to base64 and set to CONTRACTS_OVERRIDE_CONFIG so that the same content can be passed to remote runner + // we add TEST_ prefix to CONTRACTS_OVERRIDE_CONFIG to ensure the env var is ported to remote runner. + _, err = EncodeConfigAndSetEnv(c, fmt.Sprintf("TEST_%s", CONTRACTS_OVERRIDE_CONFIG)) + return dataContent, err + } + return nil, nil +} + +type CCIP struct { + Env *Common `toml:",omitempty"` + ContractVersions map[ccipcontracts.Name]ccipcontracts.Version `toml:",omitempty"` + Deployments *CCIPContractConfig `toml:",omitempty"` + Groups map[string]*CCIPTestGroupConfig `toml:",omitempty"` +} + +func (c *CCIP) Validate() error { + if c.Env != nil { + err := c.Env.Validate() + if err != nil { + return err + } + } + + for name, grp := range c.Groups { + grp.Type = name + if err := grp.Validate(); err != nil { + return err + } + } + return nil +} + +func (c *CCIP) ApplyOverrides(fromCfg *CCIP) error { + if fromCfg == nil { + return nil + } + logBytes, err := toml.Marshal(fromCfg) + if err != nil { + return err + } + return ctfconfig.BytesToAnyTomlStruct(zerolog.Logger{}, "", "", c, logBytes) +} diff --git a/integration-tests/ccip-tests/testconfig/examples/network_config.toml.example b/integration-tests/ccip-tests/testconfig/examples/network_config.toml.example new file mode 100644 index 00000000000..ffed99a7718 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/examples/network_config.toml.example @@ -0,0 +1,168 @@ +[RpcHttpUrls] +ETHEREUM_MAINNET = [ + 'https:....', + 'https:......', +] +AVALANCHE_MAINNET = [ + 'https:....', + 'https:......', +] +BASE_MAINNET = [ + 'https:....', + 'https:......', +] +ARBITRUM_MAINNET = [ + 'https:....', + 'https:......', +] +BSC_MAINNET = [ + 'https:....', + 'https:......', +] +OPTIMISM_MAINNET = [ + 'https:....', + 'https:......', +] +POLYGON_MAINNET = [ + 'https:....', + 'https:......', +] +WEMIX_MAINNET = [ + 'https:....', + 'https:......', +] +KROMA_MAINNET = [ + 'https:....', + 'https:......', +] + +OPTIMISM_SEPOLIA = [ + 'https:....', + 'https:......', +] +SEPOLIA = [ + 'https:....', + 'https:......', +] +AVALANCHE_FUJI = [ + 'https:....', + 'https:......', +] +ARBITRUM_SEPOLIA = [ + 'https:....', + 'https:......', +] +POLYGON_MUMBAI = [ + 'https:....', + 'https:......', +] +BASE_SEPOLIA = [ + 'https:....', + 'https:......', +] +BSC_TESTNET = [ + 'https:....', + 'https:......', +] +KROMA_SEPOLIA = [ + 'https:....', + 'https:......', +] +WEMIX_TESTNET = [ + 'https:....', + 'https:......', +] + +[RpcWsUrls] +ETHEREUM_MAINNET = [ + 'wss://......', + 'wss://.........' +] +AVALANCHE_MAINNET = [ + 'wss://......', + 'wss://.........' +] +BASE_MAINNET = [ + 'wss://......', + 'wss://.........' +] +ARBITRUM_MAINNET = [ + 'wss://......', + 'wss://.........' +] +BSC_MAINNET = [ + 'wss://......', + 'wss://.........' +] +POLYGON_MAINNET = [ + 'wss://......', + 'wss://.........' +] +OPTIMISM_MAINNET = [ + 'wss://......', + 'wss://.........' +] +WEMIX_MAINNET = [ + 'wss://......', + 'wss://.........' +] +KROMA_MAINNET = [ + 'wss://......', + 'wss://.........' +] +OPTIMISM_SEPOLIA = [ + 'wss://......', + 'wss://.........' +] +SEPOLIA = [ + 'wss://......', + 'wss://.........' +] +AVALANCHE_FUJI = [ + 'wss://......', + 'wss://.........' +] +ARBITRUM_SEPOLIA = [ + 'wss://......', + 'wss://.........' +] +POLYGON_MUMBAI = [ + 'wss://......', + 'wss://.........' +] +BASE_SEPOLIA = [ + 'wss://......', + 'wss://.........' +] +BSC_TESTNET = [ + 'wss://......', + 'wss://.........' +] +KROMA_SEPOLIA = [ + 'wss://......', + 'wss://.........' +] +WEMIX_TESTNET = [ + 'wss://......', + 'wss://.........' +] + +[WalletKeys] +ETHEREUM_MAINNET = [''] +AVALANCHE_MAINNET = [''] +BASE_MAINNET = [''] +ARBITRUM_MAINNET = [''] +BSC_MAINNET = [''] +POLYGON_MAINNET = [''] +OPTIMISM_MAINNET = [''] +WEMIX_MAINNET = [''] +KROMA_MAINNET = [''] +OPTIMISM_SEPOLIA = [''] +SEPOLIA = [''] +AVALANCHE_FUJI = [''] +ARBITRUM_SEPOLIA = [''] +POLYGON_MUMBAI = [''] +BASE_SEPOLIA = [''] +BSC_TESTNET = [''] +KROMA_SEPOLIA = [''] +WEMIX_TESTNET = [''] \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/examples/override.toml.example b/integration-tests/ccip-tests/testconfig/examples/override.toml.example new file mode 100644 index 00000000000..281a4e8963f --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/examples/override.toml.example @@ -0,0 +1,119 @@ + +[CCIP] +[CCIP.Deployments] +Data = """ +{ + "lane_configs": { + "geth_1337": { + "is_mock_arm": true, + "fee_token": "0x42699A7612A82f1d9C36148af9C77354759b210b", + "bridge_tokens": [ + "0x42699A7612A82f1d9C36148af9C77354759b210b" + ], + "bridge_tokens_pools": [ + "0xecb550de5c73e6690ab4521c03ec9d476617167e" + ], + "arm": "0x99c6a236913907dce5714cfa4a179d4f2c0b93d9", + "router": "0x96c1f5d31c4c627d6e84a046d4790cac4f17d3ed", + "price_registry": "0x625c70baf2dfb2cf06cf6673e6bbad1672427605", + "wrapped_native": "0x9B8397f1B0FEcD3a1a40CdD5E8221Fa461898517", + "src_contracts": { + "geth_2337": { + "on_ramp": "0xe62b93777e666224cc8029c21a31311554e2f10e", + "deployed_at": 71207 + } + }, + "dest_contracts": { + "geth_2337": { + "off_ramp": "0xba1a4f08001416a630e19e34abd260f039874e92", + "commit_store": "0x297e29cd7be020c211495f98a3d794a8ae000165", + "receiver_dapp": "" + } + } + }, + "geth_2337": { + "is_mock_arm": true, + "fee_token": "0x42699A7612A82f1d9C36148af9C77354759b210b", + "bridge_tokens": [ + "0x42699A7612A82f1d9C36148af9C77354759b210b" + ], + "bridge_tokens_pools": [ + "0xc7555581de61f6db45ea28547d6d5e0722ed6fbe" + ], + "arm": "0x2b33e63e99cbb1847a2735e08c61d9034b13a171", + "router": "0x27a107a95b36c4510ea926f0f886ff7772248e66", + "price_registry": "0x4a6ea541263c363478da333239e38e96e2cc8653", + "wrapped_native": "0x9B8397f1B0FEcD3a1a40CdD5E8221Fa461898517", + "src_contracts": { + "geth_1337": { + "on_ramp": "0x96c1f5d31c4c627d6e84a046d4790cac4f17d3ed", + "deployed_at": 71209 + } + }, + "dest_contracts": { + "geth_1337": { + "off_ramp": "0xe62b93777e666224cc8029c21a31311554e2f10e", + "commit_store": "0xa1dc9167b1a8f201d15b48bdd5d77f8360845ced", + "receiver_dapp": "" + } + } + } + } +} +""" + +[CCIP.Env] +EnvUser = 'crib-deployment' +Mockserver = 'http://mockserver:1080' + +[CCIP.Env.Network] +selected_networks = ['geth_1337', 'geth_2337'] + +[CCIP.Env.Network.EVMNetworks.geth_1337] +evm_name = 'geth_1337' +evm_chain_id = 1337 +evm_urls = ['wss://chain-alpha-rpc.nodeops.sand.cldev.sh/ws/'] +evm_http_urls = ['https://chain-alpha-rpc.nodeops.sand.cldev.sh/'] +evm_keys = ['8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 500000 +evm_transaction_timeout = '2m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = false +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.EVMNetworks.geth_2337] +evm_name = 'geth_2337' +evm_chain_id = 2337 +evm_urls = ['wss://chain-beta-rpc.nodeops.sand.cldev.sh/ws/'] +evm_http_urls = ['https://chain-beta-rpc.nodeops.sand.cldev.sh/'] +evm_keys = ['8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 500000 +evm_transaction_timeout = '2m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = false +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.ExistingCLCluster] +Name = 'crib-mono-repo-test' + +[CCIP.Groups] +[CCIP.Groups.smoke] +LocalCluster = false +ExistingDeployment = true + + +[CCIP.Groups.smoke.MsgDetails] +MsgType = 'DataWithToken' +DestGasLimit = 100000 +DataLength = 1000 +NoOfTokens = 1 +AmountPerToken = 1 + diff --git a/integration-tests/ccip-tests/testconfig/examples/secrets.toml.example b/integration-tests/ccip-tests/testconfig/examples/secrets.toml.example new file mode 100644 index 00000000000..3045f51759d --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/examples/secrets.toml.example @@ -0,0 +1,52 @@ +# This file contains all secret parameters for ccip tests. +# DO NOT UPDATE THIS FILE WITH ANY SECRET VALUES. +# Use this file as a template for the actual secret file and update all the parameter values accordingly. +# DO NOT COMMIT THE ACTUAL SECRET FILE TO THE REPOSITORY. +[CCIP] +[CCIP.Env] + +# ChainlinkImage is mandatory for all tests. +[CCIP.Env.NewCLCluster] +[CCIP.Env.NewCLCluster.Common] +[CCIP.Env.NewCLCluster.Common.ChainlinkImage] +image = "chainlink-ccip" +version = "latest" + +# Chainlink upgrade image is used only for upgrade tests +#[CCIP.Env.NewCLCluster.Common.ChainlinkUpgradeImage] +#image = "***.dkr.ecr.***.amazonaws.com/chainlink-ccip" +#version = "****" + + +# Networks configuration with rpc urls and wallet keys are mandatory only for tests running on live networks +# The following example is for 3 networks: Ethereum, Base and Arbitrum +# Network configuration can be ignored for tests running on simulated/private networks +[CCIP.Env.Network] +selected_networks= [ + 'ETHEREUM_MAINNET','BASE_MAINNET', 'ARBITRUM_MAINNET', +] + +[CCIP.Env.Network.RpcHttpUrls] +ETHEREUM_MAINNET = ['', ''] +BASE_MAINNET = ['', ''] +ARBITRUM_MAINNET = ['', ''] + +[CCIP.Env.Network.RpcWsUrls] +ETHEREUM_MAINNET = ['', ''] +BASE_MAINNET = ['', ''] +ARBITRUM_MAINNET = ['', ''] + +[CCIP.Env.Network.WalletKeys] +ETHEREUM_MAINNET = [''] +BASE_MAINNET = [''] +ARBITRUM_MAINNET = [''] + +# Used for tests using 1. loki logging for test results. +# Mandatory for load tests +[CCIP.Env.Logging.Loki] +tenant_id="" +endpoint="" + +[CCIP.Env.Logging.Grafana] +base_url="" +dashboard_url="/d/6vjVx-1V8/ccip-long-running-tests" diff --git a/integration-tests/ccip-tests/testconfig/global.go b/integration-tests/ccip-tests/testconfig/global.go new file mode 100644 index 00000000000..331737c5fbf --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/global.go @@ -0,0 +1,457 @@ +package testconfig + +import ( + "bytes" + _ "embed" + "encoding/base64" + "fmt" + "os" + "strings" + + "github.com/AlekSi/pointer" + "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + "github.com/smartcontractkit/seth" + + "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/utils/osutil" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + ctfconfig "github.com/smartcontractkit/chainlink-testing-framework/config" + + "github.com/smartcontractkit/chainlink/integration-tests/client" +) + +const ( + OVERIDECONFIG = "BASE64_CCIP_CONFIG_OVERRIDE" + + SECRETSCONFIG = "BASE64_CCIP_SECRETS_CONFIG" + ErrReadConfig = "failed to read TOML config" + ErrUnmarshalConfig = "failed to unmarshal TOML config" + Load string = "load" + Chaos string = "chaos" + Smoke string = "smoke" + ProductCCIP = "CCIP" +) + +var ( + //go:embed tomls/ccip-default.toml + DefaultConfig []byte +) + +func GlobalTestConfig() *Config { + var err error + cfg, err := NewConfig() + if err != nil { + log.Fatal().Err(err).Msg("Failed to load config") + } + return cfg +} + +// GenericConfig is an interface for all product based config types to implement +type GenericConfig interface { + Validate() error + ApplyOverrides(from interface{}) error +} + +// Config is the top level config struct. It contains config for all product based tests. +type Config struct { + CCIP *CCIP `toml:",omitempty"` +} + +func (c *Config) Validate() error { + return c.CCIP.Validate() +} + +func (c *Config) TOMLString() string { + buf := new(bytes.Buffer) + err := toml.NewEncoder(buf).Encode(c) + if err != nil { + log.Fatal().Err(err).Msg("Failed to encode config to TOML") + } + return buf.String() +} + +func DecodeConfig(rawConfig string, c any) error { + d, err := base64.StdEncoding.DecodeString(rawConfig) + if err != nil { + return errors.Wrap(err, ErrReadConfig) + } + err = toml.Unmarshal(d, c) + if err != nil { + return errors.Wrap(err, ErrUnmarshalConfig) + } + return nil +} + +// EncodeConfigAndSetEnv encodes the given struct to base64 +// and sets env var ( if not empty) with the encoded base64 string +func EncodeConfigAndSetEnv(c any, envVar string) (string, error) { + srcBytes, err := toml.Marshal(c) + if err != nil { + return "", err + } + encodedStr := base64.StdEncoding.EncodeToString(srcBytes) + if envVar == "" { + return encodedStr, nil + } + return encodedStr, os.Setenv(envVar, encodedStr) +} + +func NewConfig() (*Config, error) { + cfg := &Config{} + var override *Config + var secrets *Config + // load config from default file + err := config.DecodeTOML(bytes.NewReader(DefaultConfig), cfg) + if err != nil { + return nil, errors.Wrap(err, ErrReadConfig) + } + + // load config from env var if specified + rawConfig, _ := osutil.GetEnv(OVERIDECONFIG) + if rawConfig != "" { + err = DecodeConfig(rawConfig, &override) + if err != nil { + return nil, fmt.Errorf("failed to decode override config: %w", err) + } + } + if override != nil { + // apply overrides for all products + if override.CCIP != nil { + if cfg.CCIP == nil { + cfg.CCIP = override.CCIP + } else { + err = cfg.CCIP.ApplyOverrides(override.CCIP) + if err != nil { + return nil, err + } + } + } + } + // read secrets for all products + if cfg.CCIP != nil { + // load config from env var if specified for secrets + secretRawConfig, _ := osutil.GetEnv(SECRETSCONFIG) + if secretRawConfig != "" { + err = DecodeConfig(secretRawConfig, &secrets) + if err != nil { + return nil, fmt.Errorf("failed to decode secrets config: %w", err) + } + if secrets != nil { + // apply secrets for all products + if secrets.CCIP != nil { + err = cfg.CCIP.ApplyOverrides(secrets.CCIP) + if err != nil { + return nil, fmt.Errorf("failed to apply secrets: %w", err) + } + } + } + } + // validate all products + err = cfg.CCIP.Validate() + if err != nil { + return nil, err + } + } + + return cfg, nil +} + +// Common is the generic config struct which can be used with product specific configs. +// It contains generic DON and networks config which can be applied to all product based tests. +type Common struct { + EnvUser string `toml:",omitempty"` + EnvToConnect *string `toml:",omitempty"` + TTL *config.Duration `toml:",omitempty"` + ExistingCLCluster *CLCluster `toml:",omitempty"` // ExistingCLCluster is the existing chainlink cluster to use, if specified it will be used instead of creating a new one + Mockserver *string `toml:",omitempty"` + NewCLCluster *ChainlinkDeployment `toml:",omitempty"` // NewCLCluster is the new chainlink cluster to create, if specified along with ExistingCLCluster this will be ignored + Network *ctfconfig.NetworkConfig `toml:",omitempty"` + PrivateEthereumNetworks map[string]*ctfconfig.EthereumNetworkConfig `toml:",omitempty"` + Logging *ctfconfig.LoggingConfig `toml:",omitempty"` +} + +func (p *Common) GetNodeConfig() *ctfconfig.NodeConfig { + return &ctfconfig.NodeConfig{ + BaseConfigTOML: p.NewCLCluster.Common.BaseConfigTOML, + CommonChainConfigTOML: p.NewCLCluster.Common.CommonChainConfigTOML, + ChainConfigTOMLByChainID: p.NewCLCluster.Common.ChainConfigTOMLByChain, + } +} + +func (p *Common) GetSethConfig() *seth.Config { + return nil +} + +func (p *Common) Validate() error { + if err := p.Logging.Validate(); err != nil { + return fmt.Errorf("error validating logging config %w", err) + } + if p.Network == nil { + return errors.New("no networks specified") + } + // read the default network config, if specified + p.Network.UpperCaseNetworkNames() + p.Network.OverrideURLsAndKeysFromEVMNetwork() + err := p.Network.Default() + if err != nil { + return fmt.Errorf("error reading default network config %w", err) + } + if err := p.Network.Validate(); err != nil { + return fmt.Errorf("error validating networks config %w", err) + } + if p.NewCLCluster == nil && p.ExistingCLCluster == nil { + return errors.New("no chainlink or existing cluster specified") + } + + for k, v := range p.PrivateEthereumNetworks { + // this is the only value we need to generate dynamically before starting a new simulated chain + if v.EthereumChainConfig != nil { + p.PrivateEthereumNetworks[k].EthereumChainConfig.GenerateGenesisTimestamp() + } + + builder := test_env.NewEthereumNetworkBuilder() + ethNetwork, err := builder.WithExistingConfig(*v).Build() + if err != nil { + return fmt.Errorf("error building private ethereum network ethNetworks %w", err) + } + + p.PrivateEthereumNetworks[k] = ðNetwork.EthereumNetworkConfig + } + + if p.ExistingCLCluster != nil { + if err := p.ExistingCLCluster.Validate(); err != nil { + return fmt.Errorf("error validating existing chainlink cluster config %w", err) + } + if p.Mockserver == nil { + return errors.New("no mockserver specified for existing chainlink cluster") + } + log.Warn().Msg("Using existing chainlink cluster, overriding new chainlink cluster config if specified") + p.NewCLCluster = nil + } else { + if p.NewCLCluster != nil { + if err := p.NewCLCluster.Validate(); err != nil { + return fmt.Errorf("error validating chainlink config %w", err) + } + } + } + return nil +} + +func (p *Common) EVMNetworks() ([]blockchain.EVMNetwork, []string, error) { + evmNetworks := networks.MustGetSelectedNetworkConfig(p.Network) + if len(p.Network.SelectedNetworks) != len(evmNetworks) { + return nil, p.Network.SelectedNetworks, fmt.Errorf("selected networks %v do not match evm networks %v", p.Network.SelectedNetworks, evmNetworks) + } + return evmNetworks, p.Network.SelectedNetworks, nil +} + +func (p *Common) GetLoggingConfig() *ctfconfig.LoggingConfig { + return p.Logging +} + +func (p *Common) GetChainlinkImageConfig() *ctfconfig.ChainlinkImageConfig { + return p.NewCLCluster.Common.ChainlinkImage +} + +func (p *Common) GetPyroscopeConfig() *ctfconfig.PyroscopeConfig { + return nil +} + +func (p *Common) GetPrivateEthereumNetworkConfig() *ctfconfig.EthereumNetworkConfig { + return nil +} + +func (p *Common) GetNetworkConfig() *ctfconfig.NetworkConfig { + return p.Network +} + +// Returns Grafana URL from Logging config +func (p *Common) GetGrafanaBaseURL() (string, error) { + if p.Logging.Grafana == nil || p.Logging.Grafana.BaseUrl == nil { + return "", errors.New("grafana base url not set") + } + + return strings.TrimSuffix(*p.Logging.Grafana.BaseUrl, "/"), nil +} + +// Returns Grafana Dashboard URL from Logging config +func (p *Common) GetGrafanaDashboardURL() (string, error) { + if p.Logging.Grafana == nil || p.Logging.Grafana.DashboardUrl == nil { + return "", errors.New("grafana dashboard url not set") + } + + url := *p.Logging.Grafana.DashboardUrl + if !strings.HasPrefix(url, "/") { + url = "/" + url + } + + return url, nil +} + +type CLCluster struct { + Name *string `toml:",omitempty"` + NoOfNodes *int `toml:",omitempty"` + NodeConfigs []*client.ChainlinkConfig `toml:",omitempty"` +} + +func (c *CLCluster) Validate() error { + if c.NoOfNodes == nil || len(c.NodeConfigs) == 0 { + return fmt.Errorf("no chainlink nodes specified") + } + if *c.NoOfNodes != len(c.NodeConfigs) { + return fmt.Errorf("number of nodes %d does not match number of node configs %d", *c.NoOfNodes, len(c.NodeConfigs)) + } + for i, nodeConfig := range c.NodeConfigs { + if nodeConfig.URL == "" { + return fmt.Errorf("node %d url not specified", i+1) + } + if nodeConfig.Password == "" { + return fmt.Errorf("node %d password not specified", i+1) + } + if nodeConfig.Email == "" { + return fmt.Errorf("node %d email not specified", i+1) + } + if nodeConfig.InternalIP == "" { + return fmt.Errorf("node %d internal ip not specified", i+1) + } + } + + return nil +} + +type ChainlinkDeployment struct { + Common *Node `toml:",omitempty"` + NodeMemory string `toml:",omitempty"` + NodeCPU string `toml:",omitempty"` + DBMemory string `toml:",omitempty"` + DBCPU string `toml:",omitempty"` + DBCapacity string `toml:",omitempty"` + DBStorageClass *string `toml:",omitempty"` + PromPgExporter *bool `toml:",omitempty"` + IsStateful *bool `toml:",omitempty"` + DBArgs []string `toml:",omitempty"` + NoOfNodes *int `toml:",omitempty"` + Nodes []*Node `toml:",omitempty"` // to be mentioned only if diff nodes follow diff configs; not required if all nodes follow CommonConfig +} + +func (c *ChainlinkDeployment) Validate() error { + if c.Common == nil { + return errors.New("common config can't be empty") + } + if c.Common.ChainlinkImage == nil { + return errors.New("chainlink image can't be empty") + } + if err := c.Common.ChainlinkImage.Validate(); err != nil { + return err + } + if c.Common.DBImage == "" || c.Common.DBTag == "" { + return errors.New("must provide db image and tag") + } + if c.NoOfNodes == nil { + return errors.New("chainlink config is invalid, NoOfNodes should be specified") + } + if c.Nodes != nil && len(c.Nodes) > 0 { + noOfNodes := pointer.GetInt(c.NoOfNodes) + if noOfNodes != len(c.Nodes) { + return errors.New("chainlink config is invalid, NoOfNodes and Nodes length mismatch") + } + for i := range c.Nodes { + // merge common config with node specific config + c.Nodes[i].Merge(c.Common) + node := c.Nodes[i] + if node.ChainlinkImage == nil { + return fmt.Errorf("node %s: chainlink image can't be empty", node.Name) + } + if err := node.ChainlinkImage.Validate(); err != nil { + return fmt.Errorf("node %s: %w", node.Name, err) + } + if node.DBImage == "" || node.DBTag == "" { + return fmt.Errorf("node %s: must provide db image and tag", node.Name) + } + } + } + return nil +} + +type Node struct { + Name string `toml:",omitempty"` + NeedsUpgrade *bool `toml:",omitempty"` + ChainlinkImage *ctfconfig.ChainlinkImageConfig `toml:"ChainlinkImage"` + ChainlinkUpgradeImage *ctfconfig.ChainlinkImageConfig `toml:"ChainlinkUpgradeImage"` + BaseConfigTOML string `toml:",omitempty"` + CommonChainConfigTOML string `toml:",omitempty"` + ChainConfigTOMLByChain map[string]string `toml:",omitempty"` // key is chainID + DBImage string `toml:",omitempty"` + DBTag string `toml:",omitempty"` +} + +// Merge merges non-empty values +func (n *Node) Merge(from *Node) { + if from == nil || n == nil { + return + } + if n.Name == "" { + n.Name = from.Name + } + if n.ChainlinkImage == nil { + if from.ChainlinkImage != nil { + n.ChainlinkImage = &ctfconfig.ChainlinkImageConfig{ + Image: from.ChainlinkImage.Image, + Version: from.ChainlinkImage.Version, + } + } + } else { + if n.ChainlinkImage.Image == nil && from.ChainlinkImage != nil { + n.ChainlinkImage.Image = from.ChainlinkImage.Image + } + if n.ChainlinkImage.Version == nil && from.ChainlinkImage != nil { + n.ChainlinkImage.Version = from.ChainlinkImage.Version + } + } + // merge upgrade image only if the nodes is marked as NeedsUpgrade to true + if pointer.GetBool(n.NeedsUpgrade) { + if n.ChainlinkUpgradeImage == nil { + if from.ChainlinkUpgradeImage != nil { + n.ChainlinkUpgradeImage = &ctfconfig.ChainlinkImageConfig{ + Image: from.ChainlinkUpgradeImage.Image, + Version: from.ChainlinkUpgradeImage.Version, + } + } + } else { + if n.ChainlinkUpgradeImage.Image == nil && from.ChainlinkUpgradeImage != nil { + n.ChainlinkUpgradeImage.Image = from.ChainlinkUpgradeImage.Image + } + if n.ChainlinkUpgradeImage.Version == nil && from.ChainlinkUpgradeImage != nil { + n.ChainlinkUpgradeImage.Version = from.ChainlinkUpgradeImage.Version + } + } + } + + if n.DBImage == "" { + n.DBImage = from.DBImage + } + if n.DBTag == "" { + n.DBTag = from.DBTag + } + if n.BaseConfigTOML == "" { + n.BaseConfigTOML = from.BaseConfigTOML + } + if n.CommonChainConfigTOML == "" { + n.CommonChainConfigTOML = from.CommonChainConfigTOML + } + if n.ChainConfigTOMLByChain == nil { + n.ChainConfigTOMLByChain = from.ChainConfigTOMLByChain + } else { + for k, v := range from.ChainConfigTOMLByChain { + if _, ok := n.ChainConfigTOMLByChain[k]; !ok { + n.ChainConfigTOMLByChain[k] = v + } + } + } +} diff --git a/integration-tests/ccip-tests/testconfig/override/mainnet-secondary.toml b/integration-tests/ccip-tests/testconfig/override/mainnet-secondary.toml new file mode 100644 index 00000000000..7d457774b02 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/override/mainnet-secondary.toml @@ -0,0 +1,712 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Deployments] +Data = """ +{ + "lane_configs": { + "Arbitrum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4", + "bridge_tokens": ["0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"], + "bridge_tokens_pools": ["0x34700F5faE61Ba628c4269BdCbA12DA53bbfa726"], + "arm": "0xe06b0e8c4bd455153e8794ad7Ea8Ff5A14B64E4b", + "router": "0x141fa059441E0ca23ce184B6A78bafD2A517DdE8", + "price_registry": "0x13015e4E6f839E1Aa1016DF521ea458ecA20438c", + "wrapped_native": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", + "version" : "1.4.0", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x05B723f3db92430FbE4395fD03E40Cc7e9D17988", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x77b60F85b25fD501E3ddED6C1fe7bF565C08A22A", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x79f3ABeCe5A3AFFf32D47F4CFe45e7b65c9a2D91", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xCe11020D56e5FDbfE46D9FC3021641FfbBB5AdEE", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xC09b72E8128620C40D89649019d995Cc79f030C3", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x122F05F49e90508F089eE8D0d868d1a4f3E5a809", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x66a0046ac9FA104eB38B04cfF391CcD0122E6FbC", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0xe0109912157d5B75ea8b3181123Cf32c73bc9920", + "commit_store": "0xDaa61b8Cd85977820f92d1e749E1D9F55Da6CCEA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xdB19F77F87661f9be0F557cf9a1ebeCf7D8F206c", + "commit_store": "0x6e37f4c82d9A31cc42B445874dd3c3De97AB553f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0xB1b705c2315fced1B38baE463BE7DDef531e47fA", + "commit_store": "0x310cECbFf14Ad0307EfF762F461a487C1abb90bf", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x542ba1902044069330e8c5b36A84EC503863722f", + "commit_store": "0x060331fEdA35691e54876D957B4F9e3b8Cb47d20", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xeeed4D86F3E0e6d32A6Ad29d8De6A0Dc91963A5f", + "commit_store": "0xbbB563c4d98020b9c0f3Cc34c2C0Ef9676806E35", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x9bDA7c8DCda4E39aFeB483cc0B7E3C1f6E0D5AB1", + "commit_store": "0x63a0AeaadAe851b990bBD9dc41f5C1B08b32026d", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xEEf5Fb4c4953F9cA9ab1f25cE590776AfFc2c455", + "commit_store": "0xD268286A277095a9C3C90205110831a84505881c", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Avalanche Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x5947BB275c521040051D82396192181b413227A3", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xdFD6C0dc67666DE3bB36b31eec5c7B1542A82C1E", + "router": "0xF4c7E640EdA248ef95972845a62bdC74237805dB", + "price_registry": "0xfA4edD04eaAcDB07c8D73621bc1790eC50D8c489", + "wrapped_native": "0xB31f66AA3C1e785363F0875A1B74E27b85FD66c7", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x98f51B041e493fc4d72B8BD33218480bA0c66DDF", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x268fb4311D2c6CB2bbA01CCA9AC073Fb3bfd1C7c", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x8eaae6462816CB4957184c48B86afA7642D8Bf2B", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xD0701FcC7818c31935331B02Eb21e91eC71a1704", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x8629008887E073260c5434D6CaCFc83C3001d211", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x97500490d9126f34cf9aA0126d64623E170319Ef", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x9b1ed9De069Be4d50957464b359f98eD0Bf34dd5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x770b1375F86E7a9bf30DBe3F97bea67193dC9135", + "commit_store": "0x23E2b34Ce8e12c53f8a39AD4b3FFCa845f8E617C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0x4d6A796Bc85dcDF41ce9AaEB50B094C6b589748f", + "commit_store": "0xc4C4358FA01a04D6c6FE3b96a351946d4c2715C2", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x83F53Fc798FEbfFbdF84830AD403b9989187a06C", + "commit_store": "0xD8ceCE2D7794385E00Ce3EF94550E732b0A0B959", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x5B833BD6456c604Eb396C0fBa477aD49e82B1A2a", + "commit_store": "0x23E23958D220B774680f91c2c91a6f2B2f610d7e", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xb68A3EE8bD0A09eE221cf1859Dd5a4d5765188Fe", + "commit_store": "0x83DCeeCf822981F9F8552925eEfd88CAc1905dEA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x19250aBE66B88F214d02B6f3BF80F4118290C619", + "commit_store": "0x87A0935cE6254dB1252bBac90d1D07D04846aDCA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x317dE8bc5c3292E494b6496586696d4966A922B0", + "commit_store": "0x97Fbf3d6DEac16adC721aE9187CeEa1e610aC7Af", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Base Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x88Fb150BDc53A65fe94Dea0c9BA0a6dAf8C6e196", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x38660c8CC222c0192b635c2ac09687B4F25cCE5F", + "router": "0x881e3A65B4d4a04dD529061dd0071cf975F58bCD", + "price_registry": "0x6337a58D4BD7Ba691B66341779e8f87d4679923a", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x1E5Ca70d1e7A1B26061125738a880BBeA42FeB21", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xBE5a9E336D9614024B4Fa10D8112671fc9A42d96", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xdd4Fb402d41Beb0eEeF6CfB1bf445f50bDC8c981", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xDEA286dc0E01Cb4755650A6CF8d1076b454eA1cb", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xd952FEAcDd5919Cc5E9454b53bF45d4E73dD6457", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x3DB8Bea142e41cA3633890d0e5640F99a895D6A5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x8531E63aE9279a1f0D09eba566CD1b092b95f3D5", + "commit_store": "0x327E13f54c7871a2416006B33B4822eAAD357916", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8345F2fF67e5A65e85dc955DE1414832608E00aD", + "commit_store": "0xd0b13be4c53A6262b47C5DDd36F0257aa714F562", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x48a51f5D38BE630Ddd6417Ea2D9052B8efc91a18", + "commit_store": "0xF97127e77252284EC9D4bc13C247c9D1A99F72B0", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xEC0cFe335a4d53dBA70CB650Ab56eEc32788F0BB", + "commit_store": "0x0ae3c2c7FB789bd05A450CD3075D11f6c2Ca4F77", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xf50c0d2a8B6Db60f1D93E60f03d0413D56153E4F", + "commit_store": "0x16f72C15165f7C9d74c12fDF188E399d4d3724e4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x75F29f058b31106F99caFdc17c9b26ADfcC7b5D7", + "commit_store": "0xb719616E732581B570232DfB13Ca49D27667Af9f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "BSC Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x404460C6A5EdE2D891e8297795264fDe62ADBB75", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x3DB43b96B2625F4232e9Df900d464dd2c64C0021", + "router": "0x34B03Cb9086d7D758AC55af71584F81A598759FE", + "price_registry": "0xd64aAbD70A71d9f0A00B99F6EFc1626aA2dD43C7", + "wrapped_native": "0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x6aa72a998859eF93356c6521B72155D355D0Cfd2", + "deployed_at": 11111111 + }, + "Arbitrum Mainnet": { + "on_ramp": "0x2788b46BAcFF49BD89562e6bA5c5FBbbE5Fa92F7", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x70bC7f7a6D936b289bBF5c0E19ECE35B437E2e36", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x0Bf40b034872D0b364f3DCec04C7434a4Da1C8d9", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x4FEB11A454C9E8038A8d0aDF599Fe7612ce114bA", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6bD4754D86fc87FE5b463D368f26a3587a08347c", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x1467fF8f249f5bc604119Af26a47035886f856BE", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0x37a6fa55fe61061Ae97bF7314Ae270eCF71c5ED3", + "commit_store": "0x1f558F6dcf0224Ef1F78A24814FED548B9602c80", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Arbitrum Mainnet": { + "off_ramp": "0x3DA330fd8Ef10d93cFB7D4f8ecE7BC1F10811feC", + "commit_store": "0x86D55Ff492cfBBAf0c0D42D4EE615144E78b3D02", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0x574c697deab06B805D8780898B3F136a1F4892Dc", + "commit_store": "0x002B164b1dcf4E92F352DC625A01Be0E890EdEea", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x181Bb1E97b0bDD1D85E741ad0943552D3682cc35", + "commit_store": "0x3fF27A34fF0FA77921C3438e67f58da1a83e9Ce1", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xE7E080C8d62d595a223C577C7C8d1f75d9A5E664", + "commit_store": "0xF4d53346bDb6d393C74B0B72Aa7D6689a3eAad79", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x26af2046Da85d7f6712D5edCa81B9E3b2e7A60Ab", + "commit_store": "0x4C1dA405a789AC2853A69D8290B8B9b47a0374F8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xC027C5AEb230008c243Be463A73571e581F94c13", + "commit_store": "0x2EB426C8C54D740d1FC856eB3Ff96feA03957978", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Ethereum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x514910771AF9Ca656af840dff83E8264EcF986CA", + "bridge_tokens": ["0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"], + "bridge_tokens_pools": ["0x69c24c970B65e22Ac26864aF10b2295B7d78f93A"], + "arm": "0x8B63b3DE93431C0f756A493644d128134291fA1b", + "router": "0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D", + "price_registry": "0x8c9b2Efb7c64C394119270bfecE7f54763b958Ad", + "wrapped_native": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x925228D7B82d883Dde340A55Fe8e6dA56244A22C", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x3df8dAe2d123081c4D5E946E655F7c109B9Dd630", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0xe2c2AB221AA0b957805f229d2AA57fBE2f4dADf7", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x91D25A56Db77aD5147437d8B83Eb563D46eBFa69", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x86B47d8411006874eEf8E4584BdFD7be8e5549d1", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x35F0ca9Be776E4B38659944c257bDd0ba75F1B8B", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0xCbE7e5DA76dC99Ac317adF6d99137005FDA4E2C4", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xeFC4a18af59398FF23bfe7325F2401aD44286F4d", + "commit_store": "0x9B2EEd6A1e16cB50Ed4c876D2dD69468B21b7749", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x569940e02D4425eac61A7601632eC00d69f75c17", + "commit_store": "0x2aa101BF99CaeF7fc1355D4c493a1fe187A007cE", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xdf85c8381954694E74abD07488f452b4c2Cddfb3", + "commit_store": "0x8DC27D621c41a32140e22E2a4dAf1259639BAe04", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x7Afe7088aff57173565F4b034167643AA8b9171c", + "commit_store": "0x87c55D48DF6EF7B08153Ab079e76bFEcbb793D75", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xB095900fB91db00E6abD247A5A5AD1cee3F20BF7", + "commit_store": "0x4af4B497c998007eF83ad130318eB2b925a79dc8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x0af338F0E314c7551bcE0EF516d46d855b0Ee395", + "commit_store": "0xD37a60E8C36E802D2E1a6321832Ee85556Beeb76", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x3a129e6C18b23d18BA9E6Aa14Dc2e79d1f91c6c5", + "commit_store": "0x31f6ab382DDeb9A316Ab61C3945a5292a50a89AB", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Kroma Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xC1F6f7622ad37C3f46cDF6F8AA0344ADE80BF450", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xB59779d3364BC6d71168245f9ebb96469E5a5a98", + "router": "0xE93E8B0d1b1CEB44350C8758ed1E2799CCee31aB", + "price_registry": "0x8155B4710e7bbC90924E957104F94Afd4f95Eca2", + "wrapped_native": "0x4200000000000000000000000000000000000001", + "src_contracts": { + "WeMix Mainnet": { + "on_ramp": "0x3C5Ab46fA1dB1dECD854224654313a69bf9fcAD3", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "WeMix Mainnet": { + "off_ramp": "0x2B555774B3D1dcbcd76efb7751F3c5FbCFABC5C4", + "commit_store": "0x213124614aAf31eBCE7c612A12aac5f8aAD77DE4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Optimism Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6", + "bridge_tokens": ["0x4200000000000000000000000000000000000006"], + "bridge_tokens_pools": ["0x86E715415D8C8435903d1e8204fA1e9784Aa7305"], + "arm": "0x8C7C2C3362a42308BB5c368677Ad321D11693b81", + "router": "0x3206695CaE29952f4b0c22a169725a865bc8Ce0f", + "price_registry": "0xb52545aECE8C73A97E52a146757EC15b90Ed8488", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xD0D3E757bFBce7ae1881DDD7F6d798DDcE588445", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x0b1760A8112183303c5526C6b24569fd3A274f3B", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xa3c9544B82846C45BE37593d5d9ACffbE61BF3A6", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x55183Db1d2aE0b63e4c92A64bEF2CBfc2032B127", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6B57145e322c877E7D91Ed8E31266eB5c02F7EfC", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x82e9f4C5ec4a84E310d60D462a12042E5cbA0954", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "commit_store": "0x55028780918330FD00a34a61D9a7Efd3f43ca845", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8dc6490A6204dF846BaBE809cB695ba17Df1F9B1", + "commit_store": "0xA190660787B6B183Dd82B243eA10e609327c7308", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xBAE6560eCa9B77Cb047158C783e36F7735C86037", + "commit_store": "0x6168aDF58e1Ad446BaD45c6275Bef60Ef4FFBAb8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0xE14501F2838F2fA1Ceb52E78ABdA289EcE1705EA", + "commit_store": "0xa8DD25B29787527Df283211C24Ac72B17150A696", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xd2D98Be6a1C241e86C807e51cED6ABb51d044203", + "commit_store": "0x4d75A5cE454b264b187BeE9e189aF1564a68408D", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x7c6221880A1D62506b1A08Dab3Bf695A49AcDD22", + "commit_store": "0x0684076EE3595221861C50cDb9Cb66402Ec11Cb9", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x3e5B3b7559D39563a74434157b31781322dA712D", + "commit_store": "0x7954372FF6f80908e5A2dC2a19d796A1005f91D2", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Polygon Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xb0897686c545045aFc77CF20eC7A532E3120E0F1", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xD7AcF65dA1E1f34b663aB199a474F209bF2b0523", + "router": "0x849c5ED5a80F5B408Dd4969b78c2C8fdf0565Bfe", + "price_registry": "0x30D873664Ba766C983984C7AF9A921ccE36D34e1", + "wrapped_native": "0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0xD16D025330Edb91259EEA8ed499daCd39087c295", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x5FA30697e90eB30954895c45b028F7C0dDD39b12", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x20B028A2e0F6CCe3A11f3CE5F2B8986F932e89b4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xF5b5A2fC11BF46B1669C3B19d98B19C79109Dca9", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xFd77c53AA4eF0E3C01f5Ac012BF7Cc7A3ECf5168", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x3111cfbF5e84B5D9BD952dd8e957f4Ca75f728Cf", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x5060eF647a1F66BE6eE27FAe3046faf8D53CeB2d", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xa8a9eDa2867c2E0CE0d5ECe273961F1EcC3CC25B", + "commit_store": "0xbD4480658dca8496a65046dfD1BDD44EF897Bdb5", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0xB9e3680639c9F0C4e0b02FD81C445094426244Ae", + "commit_store": "0x8c63d4e67f7c4af6FEd2f56A34fB4e01CB807CFF", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xD0FA7DE2D18A0c59D3fD7dfC7aB4e913C6Aa7b68", + "commit_store": "0xF88053B9DAC8Dd3039a4eFa8639159aaa3F2D4Cb", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x592773924741F0Da889a0dfdab71171Dd11E054C", + "commit_store": "0xEC4d35E1A85f770f4D93BA43a462c9d87Ef7017e", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x45320085fF051361D301eC1044318213A5387A15", + "commit_store": "0x4Dc771B5ef21ef60c33e2987E092345f2b63aE08", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xBa754ecd3CFA7E9093F688EAc3860cf9D07Fc0AC", + "commit_store": "0x04C0D5302E3D8Ca0A0019141a52a23B59cdb70e4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xd7c877ea02310Cce9278D9A048Aa1Bb9aF72F00d", + "commit_store": "0x92A1C927E8E10Ab6A40E5A5154e2300D278d1a67", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "WeMix Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x80f1FcdC96B55e459BF52b998aBBE2c364935d69", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x07aaC8B69A62dB5bd3d244091916EbF2fac17b76", + "router": "0x7798b795Fde864f4Cd1b124a38Ba9619B7F8A442", + "price_registry": "0x252863688762aD86868D3d3076233Eacd80c7055", + "wrapped_native": "0x7D72b22a74A216Af4a002a1095C8C707d6eC1C5f", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x9aBfd6f4C865610692AB6fb1Be862575809fFabf", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xbE0Cfae74677F8dd16a246a3a5c8cbB1973118f4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x56657ec4D15C71f7F3C17ba2b21C853A24Dc5381", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x70f3b0FD7e6a4B9B623e9AB859604A9EE03e48BD", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x777058C1e1dcE4eB8001F38631a1cd9450816e5a", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x190bcE84CF2d500B878966F4Cf98a50d78f2675E", + "deployed_at": 11111111 + }, + "Kroma Mainnet": { + "on_ramp": "0x47E9AE0A815C94836202E696748A5d5476aD8735", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x2ba68a395B72a6E3498D312efeD755ed2f3CF223", + "commit_store": "0xdAeC234DA83F68707Bb8AcB2ee6a01a5FD4c2391", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0xFac907F9a1087B846Faa75A14C5d34A8639233d8", + "commit_store": "0xF2812063446c7deD2CA306c67A68364BdDcbEfc5", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x6ec9ca4Cba62cA17c55F05ad2000B46192f02035", + "commit_store": "0x84534BE763366a69710E119c100832955795B34B", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0x87220D01DF0fF27149B47227897074653788fd23", + "commit_store": "0xF8dD2be2C6FA43e48A17146380CbEBBB4291807b", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x8f0229804513A9Bc00c1308414AB279Dbc718ae1", + "commit_store": "0x3A85D1b8641d83a87957C6ECF1b62151213e0842", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xF92Fa796F5307b029c65CA26f322a6D86f211194", + "commit_store": "0xbeC110FF43D52be2066B06525304A9924E16b73b", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Kroma Mainnet": { + "off_ramp": "0xF886d8DC64E544af4835cbf91e5678A54D95B80e", + "commit_store": "0x8794C9534658fdCC44f2FF6645Bf31cf9F6d2d5D", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + } + } +} +""" + +[CCIP.Env] +TTL = '8h' + +[CCIP.Env.Network] +selected_networks = [ + 'ETHEREUM_MAINNET', + 'ARBITRUM_MAINNET', + 'OPTIMISM_MAINNET' + ] + +[CCIP.Groups.load] +NetworkPairs = [ + 'ETHEREUM_MAINNET,OPTIMISM_MAINNET', + 'ETHEREUM_MAINNET,ARBITRUM_MAINNET', + 'ARBITRUM_MAINNET,OPTIMISM_MAINNET' # added as batch 1 +] + +BiDirectionalLane = true +PhaseTimeout = '40m' +ExistingDeployment = true + +[CCIP.Groups.load.TokenConfig] +NoOfTokensPerChain = 1 +CCIPOwnerTokens = true + +[CCIP.Groups.load.LoadProfile] +RequestPerUnitTime = [1] +TimeUnit = '3h' +TestDuration = '24h' +TestRunName = 'mainnet-2.7-ccip1.2' +FailOnFirstErrorInLoad = true +SkipRequestIfAnotherRequestTriggeredWithin = '40m' + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 0 +DataLength = 100 +NoOfTokens = 1 +AmountPerToken = 1 + +[CCIP.Groups.smoke] +# these are all the valid network pairs +NetworkPairs = [ + 'ETHEREUM_MAINNET,OPTIMISM_MAINNET', + 'ETHEREUM_MAINNET,ARBITRUM_MAINNET', + 'ARBITRUM_MAINNET,OPTIMISM_MAINNET' +] + +BiDirectionalLane = true +DestGasLimit = 0 +PhaseTimeout = '20m' +LocalCluster = false +ExistingDeployment = true +ReuseContracts = true + +[CCIP.Groups.smoke.TokenConfig] +NoOfTokensPerChain = 1 +CCIPOwnerTokens = true + +[CCIP.Groups.smoke.MsgDetails] +MsgType = 'Data' +DestGasLimit = 0 +DataLength = 100 +NoOfTokens = 1 +AmountPerToken = 1 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/override/mainnet.toml b/integration-tests/ccip-tests/testconfig/override/mainnet.toml new file mode 100644 index 00000000000..72695ba7545 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/override/mainnet.toml @@ -0,0 +1,767 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Deployments] +Data = """ +{ + "lane_configs": { + "Arbitrum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xe06b0e8c4bd455153e8794ad7Ea8Ff5A14B64E4b", + "router": "0x141fa059441E0ca23ce184B6A78bafD2A517DdE8", + "price_registry": "0x13015e4E6f839E1Aa1016DF521ea458ecA20438c", + "wrapped_native": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1", + "version" : "1.4.0", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x05B723f3db92430FbE4395fD03E40Cc7e9D17988", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x77b60F85b25fD501E3ddED6C1fe7bF565C08A22A", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x79f3ABeCe5A3AFFf32D47F4CFe45e7b65c9a2D91", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xCe11020D56e5FDbfE46D9FC3021641FfbBB5AdEE", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xC09b72E8128620C40D89649019d995Cc79f030C3", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x122F05F49e90508F089eE8D0d868d1a4f3E5a809", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x66a0046ac9FA104eB38B04cfF391CcD0122E6FbC", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0xe0109912157d5B75ea8b3181123Cf32c73bc9920", + "commit_store": "0xDaa61b8Cd85977820f92d1e749E1D9F55Da6CCEA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xdB19F77F87661f9be0F557cf9a1ebeCf7D8F206c", + "commit_store": "0x6e37f4c82d9A31cc42B445874dd3c3De97AB553f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0xB1b705c2315fced1B38baE463BE7DDef531e47fA", + "commit_store": "0x310cECbFf14Ad0307EfF762F461a487C1abb90bf", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x542ba1902044069330e8c5b36A84EC503863722f", + "commit_store": "0x060331fEdA35691e54876D957B4F9e3b8Cb47d20", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xeeed4D86F3E0e6d32A6Ad29d8De6A0Dc91963A5f", + "commit_store": "0xbbB563c4d98020b9c0f3Cc34c2C0Ef9676806E35", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x9bDA7c8DCda4E39aFeB483cc0B7E3C1f6E0D5AB1", + "commit_store": "0x63a0AeaadAe851b990bBD9dc41f5C1B08b32026d", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xEEf5Fb4c4953F9cA9ab1f25cE590776AfFc2c455", + "commit_store": "0xD268286A277095a9C3C90205110831a84505881c", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Avalanche Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x5947BB275c521040051D82396192181b413227A3", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xdFD6C0dc67666DE3bB36b31eec5c7B1542A82C1E", + "router": "0xF4c7E640EdA248ef95972845a62bdC74237805dB", + "price_registry": "0xfA4edD04eaAcDB07c8D73621bc1790eC50D8c489", + "wrapped_native": "0xB31f66AA3C1e785363F0875A1B74E27b85FD66c7", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x98f51B041e493fc4d72B8BD33218480bA0c66DDF", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x268fb4311D2c6CB2bbA01CCA9AC073Fb3bfd1C7c", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x8eaae6462816CB4957184c48B86afA7642D8Bf2B", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xD0701FcC7818c31935331B02Eb21e91eC71a1704", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x8629008887E073260c5434D6CaCFc83C3001d211", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x97500490d9126f34cf9aA0126d64623E170319Ef", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x9b1ed9De069Be4d50957464b359f98eD0Bf34dd5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x770b1375F86E7a9bf30DBe3F97bea67193dC9135", + "commit_store": "0x23E2b34Ce8e12c53f8a39AD4b3FFCa845f8E617C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0x4d6A796Bc85dcDF41ce9AaEB50B094C6b589748f", + "commit_store": "0xc4C4358FA01a04D6c6FE3b96a351946d4c2715C2", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x83F53Fc798FEbfFbdF84830AD403b9989187a06C", + "commit_store": "0xD8ceCE2D7794385E00Ce3EF94550E732b0A0B959", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x5B833BD6456c604Eb396C0fBa477aD49e82B1A2a", + "commit_store": "0x23E23958D220B774680f91c2c91a6f2B2f610d7e", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xb68A3EE8bD0A09eE221cf1859Dd5a4d5765188Fe", + "commit_store": "0x83DCeeCf822981F9F8552925eEfd88CAc1905dEA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x19250aBE66B88F214d02B6f3BF80F4118290C619", + "commit_store": "0x87A0935cE6254dB1252bBac90d1D07D04846aDCA", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x317dE8bc5c3292E494b6496586696d4966A922B0", + "commit_store": "0x97Fbf3d6DEac16adC721aE9187CeEa1e610aC7Af", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Base Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x88Fb150BDc53A65fe94Dea0c9BA0a6dAf8C6e196", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x38660c8CC222c0192b635c2ac09687B4F25cCE5F", + "router": "0x881e3A65B4d4a04dD529061dd0071cf975F58bCD", + "price_registry": "0x6337a58D4BD7Ba691B66341779e8f87d4679923a", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x1E5Ca70d1e7A1B26061125738a880BBeA42FeB21", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xBE5a9E336D9614024B4Fa10D8112671fc9A42d96", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xdd4Fb402d41Beb0eEeF6CfB1bf445f50bDC8c981", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xDEA286dc0E01Cb4755650A6CF8d1076b454eA1cb", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0xd952FEAcDd5919Cc5E9454b53bF45d4E73dD6457", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x3DB8Bea142e41cA3633890d0e5640F99a895D6A5", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x8531E63aE9279a1f0D09eba566CD1b092b95f3D5", + "commit_store": "0x327E13f54c7871a2416006B33B4822eAAD357916", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8345F2fF67e5A65e85dc955DE1414832608E00aD", + "commit_store": "0xd0b13be4c53A6262b47C5DDd36F0257aa714F562", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x48a51f5D38BE630Ddd6417Ea2D9052B8efc91a18", + "commit_store": "0xF97127e77252284EC9D4bc13C247c9D1A99F72B0", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xEC0cFe335a4d53dBA70CB650Ab56eEc32788F0BB", + "commit_store": "0x0ae3c2c7FB789bd05A450CD3075D11f6c2Ca4F77", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xf50c0d2a8B6Db60f1D93E60f03d0413D56153E4F", + "commit_store": "0x16f72C15165f7C9d74c12fDF188E399d4d3724e4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x75F29f058b31106F99caFdc17c9b26ADfcC7b5D7", + "commit_store": "0xb719616E732581B570232DfB13Ca49D27667Af9f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "BSC Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x404460C6A5EdE2D891e8297795264fDe62ADBB75", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x3DB43b96B2625F4232e9Df900d464dd2c64C0021", + "router": "0x34B03Cb9086d7D758AC55af71584F81A598759FE", + "price_registry": "0xd64aAbD70A71d9f0A00B99F6EFc1626aA2dD43C7", + "wrapped_native": "0xbb4CdB9CBd36B01bD1cBaEBF2De08d9173bc095c", + "src_contracts": { + "Avalanche Mainnet": { + "on_ramp": "0x6aa72a998859eF93356c6521B72155D355D0Cfd2", + "deployed_at": 11111111 + }, + "Arbitrum Mainnet": { + "on_ramp": "0x2788b46BAcFF49BD89562e6bA5c5FBbbE5Fa92F7", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x70bC7f7a6D936b289bBF5c0E19ECE35B437E2e36", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x0Bf40b034872D0b364f3DCec04C7434a4Da1C8d9", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x4FEB11A454C9E8038A8d0aDF599Fe7612ce114bA", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6bD4754D86fc87FE5b463D368f26a3587a08347c", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x1467fF8f249f5bc604119Af26a47035886f856BE", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Avalanche Mainnet": { + "off_ramp": "0x37a6fa55fe61061Ae97bF7314Ae270eCF71c5ED3", + "commit_store": "0x1f558F6dcf0224Ef1F78A24814FED548B9602c80", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Arbitrum Mainnet": { + "off_ramp": "0x3DA330fd8Ef10d93cFB7D4f8ecE7BC1F10811feC", + "commit_store": "0x86D55Ff492cfBBAf0c0D42D4EE615144E78b3D02", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0x574c697deab06B805D8780898B3F136a1F4892Dc", + "commit_store": "0x002B164b1dcf4E92F352DC625A01Be0E890EdEea", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x181Bb1E97b0bDD1D85E741ad0943552D3682cc35", + "commit_store": "0x3fF27A34fF0FA77921C3438e67f58da1a83e9Ce1", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xE7E080C8d62d595a223C577C7C8d1f75d9A5E664", + "commit_store": "0xF4d53346bDb6d393C74B0B72Aa7D6689a3eAad79", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x26af2046Da85d7f6712D5edCa81B9E3b2e7A60Ab", + "commit_store": "0x4C1dA405a789AC2853A69D8290B8B9b47a0374F8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xC027C5AEb230008c243Be463A73571e581F94c13", + "commit_store": "0x2EB426C8C54D740d1FC856eB3Ff96feA03957978", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Ethereum Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x514910771AF9Ca656af840dff83E8264EcF986CA", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x8B63b3DE93431C0f756A493644d128134291fA1b", + "router": "0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D", + "price_registry": "0x8c9b2Efb7c64C394119270bfecE7f54763b958Ad", + "wrapped_native": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x925228D7B82d883Dde340A55Fe8e6dA56244A22C", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x3df8dAe2d123081c4D5E946E655F7c109B9Dd630", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0xe2c2AB221AA0b957805f229d2AA57fBE2f4dADf7", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x91D25A56Db77aD5147437d8B83Eb563D46eBFa69", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x86B47d8411006874eEf8E4584BdFD7be8e5549d1", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x35F0ca9Be776E4B38659944c257bDd0ba75F1B8B", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0xCbE7e5DA76dC99Ac317adF6d99137005FDA4E2C4", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xeFC4a18af59398FF23bfe7325F2401aD44286F4d", + "commit_store": "0x9B2EEd6A1e16cB50Ed4c876D2dD69468B21b7749", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x569940e02D4425eac61A7601632eC00d69f75c17", + "commit_store": "0x2aa101BF99CaeF7fc1355D4c493a1fe187A007cE", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xdf85c8381954694E74abD07488f452b4c2Cddfb3", + "commit_store": "0x8DC27D621c41a32140e22E2a4dAf1259639BAe04", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x7Afe7088aff57173565F4b034167643AA8b9171c", + "commit_store": "0x87c55D48DF6EF7B08153Ab079e76bFEcbb793D75", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xB095900fB91db00E6abD247A5A5AD1cee3F20BF7", + "commit_store": "0x4af4B497c998007eF83ad130318eB2b925a79dc8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x0af338F0E314c7551bcE0EF516d46d855b0Ee395", + "commit_store": "0xD37a60E8C36E802D2E1a6321832Ee85556Beeb76", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x3a129e6C18b23d18BA9E6Aa14Dc2e79d1f91c6c5", + "commit_store": "0x31f6ab382DDeb9A316Ab61C3945a5292a50a89AB", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Kroma Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xC1F6f7622ad37C3f46cDF6F8AA0344ADE80BF450", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xB59779d3364BC6d71168245f9ebb96469E5a5a98", + "router": "0xE93E8B0d1b1CEB44350C8758ed1E2799CCee31aB", + "price_registry": "0x8155B4710e7bbC90924E957104F94Afd4f95Eca2", + "wrapped_native": "0x4200000000000000000000000000000000000001", + "src_contracts": { + "WeMix Mainnet": { + "on_ramp": "0x3C5Ab46fA1dB1dECD854224654313a69bf9fcAD3", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "WeMix Mainnet": { + "off_ramp": "0x2B555774B3D1dcbcd76efb7751F3c5FbCFABC5C4", + "commit_store": "0x213124614aAf31eBCE7c612A12aac5f8aAD77DE4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Optimism Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x8C7C2C3362a42308BB5c368677Ad321D11693b81", + "router": "0x3206695CaE29952f4b0c22a169725a865bc8Ce0f", + "price_registry": "0xb52545aECE8C73A97E52a146757EC15b90Ed8488", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xD0D3E757bFBce7ae1881DDD7F6d798DDcE588445", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x0b1760A8112183303c5526C6b24569fd3A274f3B", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xa3c9544B82846C45BE37593d5d9ACffbE61BF3A6", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x55183Db1d2aE0b63e4c92A64bEF2CBfc2032B127", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x6B57145e322c877E7D91Ed8E31266eB5c02F7EfC", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x82e9f4C5ec4a84E310d60D462a12042E5cbA0954", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x0C9BE7Cfd12c735E5aaE047C1dCB845d54E518C3", + "commit_store": "0x55028780918330FD00a34a61D9a7Efd3f43ca845", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0x8dc6490A6204dF846BaBE809cB695ba17Df1F9B1", + "commit_store": "0xA190660787B6B183Dd82B243eA10e609327c7308", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xBAE6560eCa9B77Cb047158C783e36F7735C86037", + "commit_store": "0x6168aDF58e1Ad446BaD45c6275Bef60Ef4FFBAb8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0xE14501F2838F2fA1Ceb52E78ABdA289EcE1705EA", + "commit_store": "0xa8DD25B29787527Df283211C24Ac72B17150A696", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xd2D98Be6a1C241e86C807e51cED6ABb51d044203", + "commit_store": "0x4d75A5cE454b264b187BeE9e189aF1564a68408D", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x7c6221880A1D62506b1A08Dab3Bf695A49AcDD22", + "commit_store": "0x0684076EE3595221861C50cDb9Cb66402Ec11Cb9", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0x3e5B3b7559D39563a74434157b31781322dA712D", + "commit_store": "0x7954372FF6f80908e5A2dC2a19d796A1005f91D2", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Polygon Mainnet": { + "is_native_fee_token": true, + "fee_token": "0xb0897686c545045aFc77CF20eC7A532E3120E0F1", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0xD7AcF65dA1E1f34b663aB199a474F209bF2b0523", + "router": "0x849c5ED5a80F5B408Dd4969b78c2C8fdf0565Bfe", + "price_registry": "0x30D873664Ba766C983984C7AF9A921ccE36D34e1", + "wrapped_native": "0x0d500B1d8E8eF31E21C99d1Db9A6444d3ADf1270", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0xD16D025330Edb91259EEA8ed499daCd39087c295", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0x5FA30697e90eB30954895c45b028F7C0dDD39b12", + "deployed_at": 11111111 + }, + "Base Mainnet": { + "on_ramp": "0x20B028A2e0F6CCe3A11f3CE5F2B8986F932e89b4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0xF5b5A2fC11BF46B1669C3B19d98B19C79109Dca9", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0xFd77c53AA4eF0E3C01f5Ac012BF7Cc7A3ECf5168", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x3111cfbF5e84B5D9BD952dd8e957f4Ca75f728Cf", + "deployed_at": 11111111 + }, + "WeMix Mainnet": { + "on_ramp": "0x5060eF647a1F66BE6eE27FAe3046faf8D53CeB2d", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0xa8a9eDa2867c2E0CE0d5ECe273961F1EcC3CC25B", + "commit_store": "0xbD4480658dca8496a65046dfD1BDD44EF897Bdb5", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0xB9e3680639c9F0C4e0b02FD81C445094426244Ae", + "commit_store": "0x8c63d4e67f7c4af6FEd2f56A34fB4e01CB807CFF", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Mainnet": { + "off_ramp": "0xD0FA7DE2D18A0c59D3fD7dfC7aB4e913C6Aa7b68", + "commit_store": "0xF88053B9DAC8Dd3039a4eFa8639159aaa3F2D4Cb", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x592773924741F0Da889a0dfdab71171Dd11E054C", + "commit_store": "0xEC4d35E1A85f770f4D93BA43a462c9d87Ef7017e", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0x45320085fF051361D301eC1044318213A5387A15", + "commit_store": "0x4Dc771B5ef21ef60c33e2987E092345f2b63aE08", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0xBa754ecd3CFA7E9093F688EAc3860cf9D07Fc0AC", + "commit_store": "0x04C0D5302E3D8Ca0A0019141a52a23B59cdb70e4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Mainnet": { + "off_ramp": "0xd7c877ea02310Cce9278D9A048Aa1Bb9aF72F00d", + "commit_store": "0x92A1C927E8E10Ab6A40E5A5154e2300D278d1a67", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "WeMix Mainnet": { + "is_native_fee_token": true, + "fee_token": "0x80f1FcdC96B55e459BF52b998aBBE2c364935d69", + "bridge_tokens": [], + "bridge_tokens_pools": [], + "arm": "0x07aaC8B69A62dB5bd3d244091916EbF2fac17b76", + "router": "0x7798b795Fde864f4Cd1b124a38Ba9619B7F8A442", + "price_registry": "0x252863688762aD86868D3d3076233Eacd80c7055", + "wrapped_native": "0x7D72b22a74A216Af4a002a1095C8C707d6eC1C5f", + "src_contracts": { + "Arbitrum Mainnet": { + "on_ramp": "0x9aBfd6f4C865610692AB6fb1Be862575809fFabf", + "deployed_at": 11111111 + }, + "Avalanche Mainnet": { + "on_ramp": "0xbE0Cfae74677F8dd16a246a3a5c8cbB1973118f4", + "deployed_at": 11111111 + }, + "BSC Mainnet": { + "on_ramp": "0x56657ec4D15C71f7F3C17ba2b21C853A24Dc5381", + "deployed_at": 11111111 + }, + "Optimism Mainnet": { + "on_ramp": "0x70f3b0FD7e6a4B9B623e9AB859604A9EE03e48BD", + "deployed_at": 11111111 + }, + "Polygon Mainnet": { + "on_ramp": "0x777058C1e1dcE4eB8001F38631a1cd9450816e5a", + "deployed_at": 11111111 + }, + "Ethereum Mainnet": { + "on_ramp": "0x190bcE84CF2d500B878966F4Cf98a50d78f2675E", + "deployed_at": 11111111 + }, + "Kroma Mainnet": { + "on_ramp": "0x47E9AE0A815C94836202E696748A5d5476aD8735", + "deployed_at": 11111111 + } + }, + "dest_contracts": { + "Arbitrum Mainnet": { + "off_ramp": "0x2ba68a395B72a6E3498D312efeD755ed2f3CF223", + "commit_store": "0xdAeC234DA83F68707Bb8AcB2ee6a01a5FD4c2391", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Mainnet": { + "off_ramp": "0xFac907F9a1087B846Faa75A14C5d34A8639233d8", + "commit_store": "0xF2812063446c7deD2CA306c67A68364BdDcbEfc5", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Mainnet": { + "off_ramp": "0x6ec9ca4Cba62cA17c55F05ad2000B46192f02035", + "commit_store": "0x84534BE763366a69710E119c100832955795B34B", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Mainnet": { + "off_ramp": "0x87220D01DF0fF27149B47227897074653788fd23", + "commit_store": "0xF8dD2be2C6FA43e48A17146380CbEBBB4291807b", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Mainnet": { + "off_ramp": "0x8f0229804513A9Bc00c1308414AB279Dbc718ae1", + "commit_store": "0x3A85D1b8641d83a87957C6ECF1b62151213e0842", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Ethereum Mainnet": { + "off_ramp": "0xF92Fa796F5307b029c65CA26f322a6D86f211194", + "commit_store": "0xbeC110FF43D52be2066B06525304A9924E16b73b", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Kroma Mainnet": { + "off_ramp": "0xF886d8DC64E544af4835cbf91e5678A54D95B80e", + "commit_store": "0x8794C9534658fdCC44f2FF6645Bf31cf9F6d2d5D", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + } + } +} +""" + +[CCIP.Env] +TTL = '8h' + +[CCIP.Env.Network] +selected_networks = [ + 'ETHEREUM_MAINNET', + 'ARBITRUM_MAINNET', + 'BASE_MAINNET', + 'WEMIX_MAINNET', + 'OPTIMISM_MAINNET', + 'POLYGON_MAINNET', + 'AVALANCHE_MAINNET', + 'BSC_MAINNET', + 'KROMA_MAINNET' + ] + +[CCIP.Groups.load] +NetworkPairs = [ + 'ETHEREUM_MAINNET,OPTIMISM_MAINNET', + 'ETHEREUM_MAINNET,AVALANCHE_MAINNET', + 'ETHEREUM_MAINNET,POLYGON_MAINNET', + 'ETHEREUM_MAINNET,BSC_MAINNET', + 'ETHEREUM_MAINNET,ARBITRUM_MAINNET', + 'ETHEREUM_MAINNET,BASE_MAINNET', + 'ETHEREUM_MAINNET,WEMIX_MAINNET', + 'AVALANCHE_MAINNET,POLYGON_MAINNET', + 'BASE_MAINNET,OPTIMISM_MAINNET', + 'BASE_MAINNET,ARBITRUM_MAINNET', + 'AVALANCHE_MAINNET,BSC_MAINNET', + 'BSC_MAINNET,POLYGON_MAINNET', + 'OPTIMISM_MAINNET,POLYGON_MAINNET', + 'BASE_MAINNET,BSC_MAINNET', + 'POLYGON_MAINNET,ARBITRUM_MAINNET', # added as batch 1 + 'ARBITRUM_MAINNET,BSC_MAINNET', # added as batch 1 + 'ARBITRUM_MAINNET,OPTIMISM_MAINNET', # added as batch 1 + 'AVALANCHE_MAINNET,OPTIMISM_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,ARBITRUM_MAINNET', # added as batch 2 + 'BASE_MAINNET,POLYGON_MAINNET', # added as batch 2 + 'BSC_MAINNET,OPTIMISM_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,BASE_MAINNET', # added as batch 2 + 'WEMIX_MAINNET,KROMA_MAINNET', + 'BSC_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'POLYGON_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'WEMIX_MAINNET,ARBITRUM_MAINNET', # added as batch 2 + 'OPTIMISM_MAINNET,WEMIX_MAINNET' # added as batch 2 +] + +BiDirectionalLane = true +PhaseTimeout = '20m' +ExistingDeployment = true + +[CCIP.Groups.load.TokenConfig] +NoOfTokensPerChain = 1 + +[CCIP.Groups.load.LoadProfile] +RequestPerUnitTime = [1] +TimeUnit = '1h' +TestDuration = '5h' +TestRunName = 'mainnet-2.7-ccip1.2' +FailOnFirstErrorInLoad = true +SkipRequestIfAnotherRequestTriggeredWithin = '40m' + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 0 +DataLength = 100 +NoOfTokens = 1 +AmountPerToken = 1 + +[CCIP.Groups.smoke] +# these are all the valid network pairs +NetworkPairs = [ + 'ETHEREUM_MAINNET,OPTIMISM_MAINNET', + 'ETHEREUM_MAINNET,AVALANCHE_MAINNET', + 'ETHEREUM_MAINNET,POLYGON_MAINNET', + 'ETHEREUM_MAINNET,BSC_MAINNET', + 'ETHEREUM_MAINNET,ARBITRUM_MAINNET', + 'ETHEREUM_MAINNET,BASE_MAINNET', + 'ETHEREUM_MAINNET,WEMIX_MAINNET', + 'AVALANCHE_MAINNET,POLYGON_MAINNET', + 'BASE_MAINNET,OPTIMISM_MAINNET', + 'BASE_MAINNET,ARBITRUM_MAINNET', + 'AVALANCHE_MAINNET,BSC_MAINNET', + 'BSC_MAINNET,POLYGON_MAINNET', + 'OPTIMISM_MAINNET,POLYGON_MAINNET', + 'BASE_MAINNET,BSC_MAINNET', + 'POLYGON_MAINNET,ARBITRUM_MAINNET', # added as batch 1 + 'ARBITRUM_MAINNET,BSC_MAINNET', # added as batch 1 + 'ARBITRUM_MAINNET,OPTIMISM_MAINNET', # added as batch 1 + 'AVALANCHE_MAINNET,OPTIMISM_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,ARBITRUM_MAINNET', # added as batch 2 + 'BASE_MAINNET,POLYGON_MAINNET', # added as batch 2 + 'BSC_MAINNET,OPTIMISM_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,BASE_MAINNET', # added as batch 2 + 'WEMIX_MAINNET,KROMA_MAINNET', + 'BSC_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'AVALANCHE_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'POLYGON_MAINNET,WEMIX_MAINNET', # added as batch 2 + 'WEMIX_MAINNET,ARBITRUM_MAINNET', # added as batch 2 + 'OPTIMISM_MAINNET,WEMIX_MAINNET' # added as batch 2 +] + +BiDirectionalLane = true +PhaseTimeout = '20m' +LocalCluster = false +ExistingDeployment = true +ReuseContracts = true + + +[CCIP.Groups.smoke.TokenConfig] +NoOfTokensPerChain = 1 +CCIPOwnerTokens = true + +[CCIP.Groups.smoke.MsgDetails] +MsgType = 'Data' +DestGasLimit = 0 +DataLength = 100 +NoOfTokens = 1 +AmountPerToken = 1 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip-crib.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip-crib.toml new file mode 100644 index 00000000000..12afcea791f --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip-crib.toml @@ -0,0 +1,96 @@ +[CCIP] +[CCIP.Env] +Mockserver = 'http://mockserver:1080' + +[CCIP.Env.Network] +selected_networks = ['AVALANCHE_FUJI', 'BSC_TESTNET'] + +[CCIP.Env.Network.EVMNetworks.AVALANCHE_FUJI] +evm_name = 'Avalanche Fuji' +evm_chain_id = 43113 +evm_urls = ['wss://...'] +evm_http_urls = ['https://...'] +evm_keys = [''] +evm_simulated = false +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 50000 +evm_transaction_timeout = '2m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_tag = true + +[CCIP.Env.Network.EVMNetworks.BSC_TESTNET] +evm_name = 'BSC Testnet' +evm_chain_id = 97 +evm_urls = ['wss://...'] +evm_http_urls = ['https://...'] +evm_keys = [''] +evm_simulated = false +client_implementation = 'BSC' +evm_chainlink_transaction_limit = 50000 +evm_transaction_timeout = '2m' +evm_minimum_confirmations = 3 +evm_gas_estimation_buffer = 0 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_tag = true + +[CCIP.Env.ExistingCLCluster] +Name = 'crib-ani' +NoOfNodes = 6 + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node1.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-1' + + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node2.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-2' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node3.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-3' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node4.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-4' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node5.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-5' + +[[CCIP.Env.ExistingCLCluster.NodeConfigs]] +URL = 'https://crib-ani-demo-node6.main.stage.cldev.sh/' +Email = 'notreal@fakeemail.ch' +Password = 'fj293fbBnlQ!f9vNs' +InternalIP = 'app-node-6' + +[CCIP.Groups] +[CCIP.Groups.smoke] +LocalCluster = false +TestRunName = 'crib-ani-demo' +NodeFunding = 1000.0 + + +[CCIP.Groups.load] +LocalCluster = false + +[CCIP.Groups.load.LoadProfile] +TestRunName = 'crib-ani-demo' +TimeUnit = '1s' +TestDuration = '15m' +RequestPerUnitTime = [1] +NodeFunding = 1000.0 diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml new file mode 100644 index 00000000000..0157ac24fb4 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip-default.toml @@ -0,0 +1,440 @@ +# this file contains the deafult configuration for the test +# all secrets must be stored in .env file and sourced before running the test +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = 'latest' +OffRamp = 'latest' +OnRamp = 'latest' +CommitStore = 'latest' +TokenPool = 'latest' + +# all variables to set up the test environment +[CCIP.Env] +TTL = '5h' +# networks between which lanes will be set up and the messages will be sent +# if more than 2 networks are specified, then lanes will be set up between all possible pairs of networks +# for example , if Networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3'], +# then lanes will be set up between SIMULATED_1 and SIMULATED_2, SIMULATED_1 and SIMULATED_3, SIMULATED_2 and SIMULATED_3 +# default value is ['SIMULATED_1', 'SIMULATED_2'] which means that test will create two private geth networks from scratch and set up lanes between them +[CCIP.Env.Network] +selected_networks = ['SIMULATED_1', 'SIMULATED_2'] + +# PrivateEthereumNetworks.NETWORK_NAME contains the configuration of private ethereum network that includes ethereum version, evm node client, chain id, +# certain chain configurations, addresses to fund or custom docker images to be used. These are non-dev networks, but they all run just a single node. +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_1] +# either eth1 or eth2 (for post-Merge); for eth2 Prysm is used for consensus layer. +ethereum_version = "eth1" +# geth, besu, erigon or nethermind +execution_layer = "geth" +# eth2-only, if set to true environment startup will wait until at least 1 epoch has been finalised +wait_for_finalization=false + +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_1.EthereumChainConfig] +# eth2-only, the lower the value the faster the block production (3 is minimum) +seconds_per_slot = 3 +# eth2-only, the lower the value the faster the epoch finalisation (2 is minimum) +slots_per_epoch = 2 +# eht2-only, the lower tha value the faster the chain starts (10 is minimum) +genesis_delay = 15 +# eth2-only, number of validators +validator_count = 4 +chain_id = 1337 +# address that should be founded in genesis wih ETH +addresses_to_fund = [ + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", +] + +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_1.EthereumChainConfig.HardForkEpochs] +# eth2-only, epoch at which chain will upgrade do Dencun or Deneb/Cancun (1 is minimum) +Deneb = 500 + +#[CCIP.Env.PrivateEthereumNetworks.SIMULATED_1.CustomDockerImages] +# custom docker image that will be used for execution layer client. It has to be one of: hyperledger/besu, nethermind/nethermind, thorax/erigon or ethereum/client-go. +# instead of using a specific tag you can also use "latest_available" to use latest published tag in Github or "latest_stable" to use latest stable release from Github +# (if corresponding Docker image on Docker Hub has not been published environment creation will fail). +#execution_layer="hyperledger/besu:latest_stable" + +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_2] +ethereum_version = "eth1" +execution_layer = "geth" + +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_2.EthereumChainConfig] +seconds_per_slot = 3 +slots_per_epoch = 2 +genesis_delay = 15 +validator_count = 4 +chain_id = 2337 +addresses_to_fund = [ + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", +] + +[CCIP.Env.PrivateEthereumNetworks.SIMULATED_2.EthereumChainConfig.HardForkEpochs] +Deneb = 500 + +[CCIP.Env.Logging] +test_log_collect = false # if set to true will save logs even if test did not fail + +[CCIP.Env.Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets = ["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout = "10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit = 10 + +# these values will be used to set up chainlink DON +# along with these values, the secrets needs to be specified as part of .env variables +# +[CCIP.Env.NewCLCluster] +NoOfNodes = 6 # number of chainlink nodes to be set up in DON, including one bootstrap node +# if tests are run in k8s, then the following values will be used to set up chainlink nodes and postgresql database, +# in case of local deployment through docker container, these values will be ignored +# for k8s deployment, helm charts are used from https://github.com/smartcontractkit/chainlink-testing-framework/tree/main/charts/chainlink/templates +NodeMemory = '4Gi' # memory to be allocated to each chainlink node; only used if tests are in k8s +NodeCPU = '2' # cpu to be allocated to each chainlink node ; only used if tests are in k8s +DBMemory = '4Gi' # memory to be allocated to postgresql database ; only used if tests are in k8s +DBCPU = '2' # cpu to be allocated to postgresql database ; only used if tests are in k8s +DBCapacity = '10Gi' # disk space to be allocated to postgresql database ; only used if tests are in k8s in stateful deployment +IsStateful = true # if true, chainlink nodes and postgresql database will be deployed as stateful set in k8s +DBArgs = [ + 'shared_buffers=1536MB', + 'effective_cache_size=4096MB', + 'work_mem=64MB', +] # postgresql database arguments ; only used if tests are in k8s + +# these values will be used to set up chainlink DON, if all the chainlink nodes are deployed with same configuration +[CCIP.Env.NewCLCluster.Common] +Name = 'node1' # name of the chainlink node, used as prefix for all the chainlink node names , used for k8s deployment +DBImage = 'postgres' # postgresql database image to be used for k8s deployment +DBTag = '13.12' # postgresql database image tag to be used for k8s deployment +# override config toml file for chainlink nodes +BaseConfigTOML = """ +[Feature] +LogPoller = true +CCIP = true + +[Log] +Level = 'debug' +JSONConsole = true + +[Log.File] +MaxSize = '0b' + +[WebServer] +AllowOrigins = '*' +HTTPPort = 6688 +SecureCookies = false +HTTPWriteTimeout = '1m' + +[WebServer.RateLimit] +Authenticated = 2000 +Unauthenticated = 1000 + +[WebServer.TLS] +HTTPSPort = 0 + +[Database] +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[OCR2] +Enabled = true +DefaultTransactionQueueDepth = 0 + +[OCR] +Enabled = false +DefaultTransactionQueueDepth = 0 + +[P2P] +[P2P.V2] +Enabled = true +ListenAddresses = ['0.0.0.0:6690'] +AnnounceAddresses = ['0.0.0.0:6690'] +DeltaDial = '500ms' +DeltaReconcile = '5s' +""" + +# override config toml related to EVMNode configs for chainlink nodes; applicable to all EVM node configs in chainlink toml +CommonChainConfigTOML = """ +[GasEstimator] +PriceMax = '200 gwei' +LimitDefault = 6000000 +FeeCapDefault = '200 gwei' +""" + +# chainlink override config toml for EVMNode config specific to EVM chains with chain id as mentioned in the key +[CCIP.Env.NewCLCluster.Common.ChainConfigTOMLByChain] +# applicable for arbitrum-goerli chain +421613 = """ +[GasEstimator] +PriceMax = '400 gwei' +LimitDefault = 100000000 +FeeCapDefault = '200 gwei' +BumpThreshold = 60 +BumpPercent = 20 +BumpMin = '100 gwei' +""" + +# applicable for optimism-goerli chain +420 = """ +[GasEstimator] +PriceMax = '150 gwei' +LimitDefault = 6000000 +FeeCapDefault = '150 gwei' +BumpThreshold = 60 +BumpPercent = 20 +BumpMin = '100 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 200 +EIP1559FeeCapBufferBlocks = 0 +""" + +# applicable for base-goerli chain +84531 = """ +[GasEstimator] +PriceMax = '150 gwei' +LimitDefault = 6000000 +FeeCapDefault = '150 gwei' +BumpThreshold = 60 +BumpPercent = 20 +BumpMin = '100 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 200 +EIP1559FeeCapBufferBlocks = 0 +""" + +# applicable for avalanche-fuji chain +43113 = """ +[GasEstimator] +PriceMax = '200 gwei' +LimitDefault = 6000000 +FeeCapDefault = '200 gwei' +BumpThreshold = 60 +""" + +# applicable for sepolia chain +11155111 = """ +[GasEstimator] +PriceMax = '200 gwei' +LimitDefault = 6000000 +FeeCapDefault = '200 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 200 +EIP1559FeeCapBufferBlocks = 0 +""" + +# the following configs are specific to each test type, smoke, load , chaos, etc... +[CCIP.Groups] +[CCIP.Groups.smoke] +# uncomment the following with specific values of lane combinations to be tested, if you want to run your tests to run only on these specific network pairs +# if specific network pairs are not mentioned, then all the network pairs will be tested based on values in CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +# if specified, CCIP.Groups..NetworkPairs takes precedence over CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +#NetworkPairs = ['SEPOLIA,OPTIMISM_GOERLI','SEPOLIA,POLYGON_MUMBAI','AVALANCHE_FUJI,SEPOLIA','SEPOLIA,BASE_GOERLI','SEPOLIA,BSC_TESTNET','SEPOLIA,WEMIX_TESTNET','AVALANCHE_FUJI,OPTIMISM_GOERLI','AVALANCHE_FUJI,POLYGON_MUMBAI','AVALANCHE_FUJI,BSC_TESTNET','AVALANCHE_FUJI,BASE_GOERLI','OPTIMISM_GOERLI,BASE_GOERLI','OPTIMISM_GOERLI,POLYGON_MUMBAI','BSC_TESTNET,POLYGON_MUMBAI','BSC_TESTNET,BASE_GOERLI','WEMIX_TESTNET,KROMA_SEPOLIA'] + +KeepEnvAlive = false # if true, the test will not tear down the test environment after the test is finished +CommitAndExecuteOnSameDON = true # if true, and the test is building the env from scratch, same chainlink nodes will be used for Commit and Execution jobs. +# Otherwise Commit and execution jobs will be set up in different nodes based on the number of nodes specified in NoOfCommitNodes and CCIP.Env.NewCLCluster.NoOfNodes +BiDirectionalLane = true # True uses both the lanes. If bidirectional is false only one way lane is set up. +NoOfCommitNodes = 5 # no of chainlink nodes with Commit job +PhaseTimeout = '10m' # Duration to wait for the each phase validation(SendRequested, Commit, RMN Blessing, Execution) to time-out. +LocalCluster = true # if true, the test will use the local docker container, otherwise it will use the k8s cluster +ExistingDeployment = false # true if the tests are run on existing environment with already set-up jobs, smart contracts, etc... +# In this case the test will only be used to send and verify ccip requests considering that lanes are already functioning. +# In case of ExistingDeployment = false, the test will deploy it's own contracts and spin up new chainlink nodes with ccip jobs. It will then use +# the newly deployed contracts to send and verify ccip requests. + +ReuseContracts = true # Whether to reuse the contracts deployed in the previous run. Default value is true unless specified +NodeFunding = 1.0 # Amount of native currency to fund the chainlink node with for each network. Default value is 1 for smoke and 20 for load unless specified +NoOfRoutersPerPair = 1 # denotes the number of routers to be deployed per network. mostly required for scalability tests. +MulticallInOneTx = false # if set to true, multiple ccip-send is grouped under one blockchain transaction +NoOfSendsInMulticall = 5 # if MulticallInOneTx=true , this denotes the number of ccip-sends to group in one transaction + +NoOfNetworks = 2 # this is used with Networks in `CCIP.Env`, `NoOfNetworks < len(CCIP.Env.Networks)` test only uses first NoOfNetworks from` CCIP.Env.Networks`. +# This value is ignored if CCIP.Groups..NetworkPairs is provided + + +[CCIP.Groups.smoke.MsgDetails] +MsgType = 'DataWithToken' # type of message to be sent, either 'Token' or 'DataWithToken' Or 'Data' +DestGasLimit = 100000 # change this to 0 gas limit if you are doing ccip-send to an EOA +DataLength = 1000 # length of the data to be sent in ccip message if MsgType = 'Data'/'DataWithToken' +NoOfTokens = 2 # number of bridge tokens to be sent in ccip message if MsgType = 'Token'/'DataWithToken' +AmountPerToken = 1 # amount to be sent for each bridge token in ccip message if MsgType = 'Token'/'DataWithToken' + +[CCIP.Groups.smoke.TokenConfig] +TimeoutForPriceUpdate = '15m' # Duration to wait for the price update to time-out. +# Now testing only with dynamic price getter (no pipeline). +# Could be removed once the pipeline is completely removed. +WithPipeline = false +NoOfTokensPerChain = 2 # number of bridge tokens to be deployed per network; if MsgType = 'Token'/'DataWithToken' +CCIPOwnerTokens = false # if true, the test will use deploy the tokens by the CCIPOwner, otherwise the tokens will be deployed by a non-owner account, only applicable for 1.5 pools and onwards + +#NoOfTokensWithDynamicPrice = 15 # number of tokens with dynamic price to be deployed +#DynamicPriceUpdateInterval ='15s' # Periodic interval to update the price of tokens, if there are tokens with dynamic price + +# uncomment the following if you want to run your tests with specific number of lanes; +# in this case out of all the possible lane combinations, only the ones with the specified number of lanes will be considered +# for example, if you have provided CCIP.Env.Networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3'] and CCIP.Groups..MaxNoOfLanes = 2, +# then only random combinations of 2 lanes from the following will be considered for the test : +# ['SIMULATED_1', 'SIMULATED_2'], ['SIMULATED_1', 'SIMULATED_3'], ['SIMULATED_2', 'SIMULATED_3'] +#MaxNoOfLanes = # maximum number of lanes to be added in the test; mainly used for scalability tests + + +[CCIP.Groups.load] +# uncomment the following with specific values of lane combinations to be tested, if you want to run your tests to run only on these specific network pairs +# if specific network pairs are not mentioned, then all the network pairs will be tested based on values in CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +# if specified, CCIP.Groups..NetworkPairs takes precedence over CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +#NetworkPairs = ['SEPOLIA,OPTIMISM_GOERLI','SEPOLIA,POLYGON_MUMBAI','AVALANCHE_FUJI,SEPOLIA','SEPOLIA,BASE_GOERLI','SEPOLIA,BSC_TESTNET','SEPOLIA,WEMIX_TESTNET','AVALANCHE_FUJI,OPTIMISM_GOERLI','AVALANCHE_FUJI,POLYGON_MUMBAI','AVALANCHE_FUJI,BSC_TESTNET','AVALANCHE_FUJI,BASE_GOERLI','OPTIMISM_GOERLI,BASE_GOERLI','OPTIMISM_GOERLI,POLYGON_MUMBAI','BSC_TESTNET,POLYGON_MUMBAI','BSC_TESTNET,BASE_GOERLI','WEMIX_TESTNET,KROMA_SEPOLIA'] + +KeepEnvAlive = false # same as above +CommitAndExecuteOnSameDON = true # same as above +BiDirectionalLane = true # same as above +NoOfCommitNodes = 5 # same as above +PhaseTimeout = '10m' # same as above +LocalCluster = false # same as above +ExistingDeployment = false # same as above +ReuseContracts = true # same as above +NodeFunding = 20.0 # same as above +NoOfRoutersPerPair = 1 # same as above +MulticallInOneTx = false # same as above +NoOfSendsInMulticall = 5 # same as above +NoOfNetworks = 2 # same as above + +[CCIP.Groups.load.OffRampConfig] +BatchGasLimit = 11000000 + +[CCIP.Groups.load.LoadProfile] +RequestPerUnitTime = [1] # number of ccip requests to be sent per unit time +TimeUnit = '10s' # unit of time for RequestPerUnitTime +TestDuration = '10m' # load test duration, not used for smoke tests +WaitBetweenChaosDuringLoad = '2m' # Duration to wait between each chaos injection during load test; only valid for chaos tests +NetworkChaosDelay = '100ms' # Duration for network chaos delay; only valid for chaos tests using network chaos + +# uncomment the following if you want your test results to be reflected under CCIP test grafana dashboard with namespace label same as the value of the following variable +# TestRunName = __ i.e prod-testnet-2.7.1-ccip1.2.1-beta +# Message Frequency Distribution Example + +# The 'Frequencies' array configures the relative frequency of different message types. +# Each value in the array represents the relative frequency of a message type, +# determining how often each type appears relative to the others. +#[CCIP.Groups.load.LoadProfile.MsgProfile] +#Frequencies = [4, 12, 3, 1] + +# Example Breakdown: +# - Frequencies = [4, 12, 3, 1] +# - Total Sum of Frequencies = 4 + 12 + 3 + 1 = 20 +# - Percentages: +# - Message Type 1: (4 / 20) * 100% = 20% +# - Message Type 2: (12 / 20) * 100% = 60% +# - Message Type 3: (3 / 20) * 100% = 15% +# - Message Type 4: (1 / 20) * 100% = 5% +# These percentages reflect how often each message type should appear in the total set of messages. +# Please note - if the total set of messages is not equal to the multiple of sum of frequencies, the percentages will not be accurate. +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [1] # frequency of each message type in the MsgDetails + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' # type of message to be sent, either 'Token' or 'DataWithToken' Or 'Data' +DestGasLimit = 100000 # change this to 0 gas limit if you are doing ccip-send to an EOA +DataLength = 1000 # length of the data to be sent in ccip message if MsgType = 'Data'/'DataWithToken' +NoOfTokens = 2 # number of bridge tokens to be sent in ccip message if MsgType = 'Token'/'DataWithToken' +AmountPerToken = 1 # amount to be sent for each bridge token in ccip message if MsgType = 'Token'/'DataWithToken' + + +[CCIP.Groups.load.TokenConfig] +TimeoutForPriceUpdate = '15m' # Duration to wait for the price update to time-out. +# Now testing only with dynamic price getter (no pipeline). +# Could be removed once the pipeline is completely removed. +WithPipeline = false +NoOfTokensPerChain = 2 # number of bridge tokens to be deployed per network; if MsgType = 'Token'/'DataWithToken' +CCIPOwnerTokens = false # if true, the test will use deploy the tokens by the CCIPOwner, otherwise the tokens and pools will be deployed by a non-owner account, +# only applicable for 1.5 pools and onwards, if you are running with pre-1.5 pools, then set this to true to deploy token pools by CCIPOwner, otherwise +# the test will fail + +#NoOfTokensWithDynamicPrice = 15 # number of tokens with dynamic price to be deployed +#DynamicPriceUpdateInterval ='15s' # Periodic interval to update the price of tokens, if there are tokens with dynamic price + +# uncomment the following if you want to run your tests with specific number of lanes; +# in this case out of all the possible lane combinations, only the ones with the specified number of lanes will be considered +# for example, if you have provided CCIP.Env.Networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3'] and CCIP.Groups..MaxNoOfLanes = 2, +# then only random combinations of 2 lanes from the following will be considered for the test : +# ['SIMULATED_1', 'SIMULATED_2'], ['SIMULATED_1', 'SIMULATED_3'], ['SIMULATED_2', 'SIMULATED_3'] +#MaxNoOfLanes = # maximum number of lanes to be added in the test; mainly used for scalability tests +# + +# Uncomment the following if you want to run your tests with updated OCR params +# otherwise test will use default OCR params from - +# https://github.com/smartcontractkit/chainlink/blob/develop/integration-tests/ccip-tests/contracts/contract_deployer.go#L729-L751 +## OCR Params +#CommitInflightExpiry = '2m' +#ExecInflightExpiry = '2m' +# +#[CCIP.Groups.load.CommitOCRParams] +#DeltaProgress = '2m' +#DeltaResend = '5s' +#DeltaRound = '75s' +#DeltaGrace = '5s' +#MaxDurationQuery = '100ms' +#MaxDurationObservation = '35s' +#MaxDurationReport = '10s' +#MaxDurationShouldAcceptFinalizedReport = '5s' +#MaxDurationShouldTransmitAcceptedReport = '10s' +# +#[CCIP.Groups.load.ExecOCRParams] +#DeltaProgress = '100s' +#DeltaResend = '5s' +#DeltaRound = '40s' +#DeltaGrace = '5s' +#MaxDurationQuery = '100ms' +#MaxDurationObservation = '20s' +#MaxDurationReport = '8s' +#MaxDurationShouldAcceptFinalizedReport = '5s' +#MaxDurationShouldTransmitAcceptedReport = '8s' + +[CCIP.Groups.chaos] +# uncomment the following with specific values of lane combinations to be tested, if you want to run your tests to run only on these specific network pairs +# if specific network pairs are not mentioned, then all the network pairs will be tested based on values in CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +# if specified, CCIP.Groups..NetworkPairs takes precedence over CCIP.Env.NetworkPairs and CCIP.Groups..NoOfNetworks +#NetworkPairs = ['SEPOLIA,OPTIMISM_GOERLI','SEPOLIA,POLYGON_MUMBAI','AVALANCHE_FUJI,SEPOLIA','SEPOLIA,BASE_GOERLI','SEPOLIA,BSC_TESTNET','SEPOLIA,WEMIX_TESTNET','AVALANCHE_FUJI,OPTIMISM_GOERLI','AVALANCHE_FUJI,POLYGON_MUMBAI','AVALANCHE_FUJI,BSC_TESTNET','AVALANCHE_FUJI,BASE_GOERLI','OPTIMISM_GOERLI,BASE_GOERLI','OPTIMISM_GOERLI,POLYGON_MUMBAI','BSC_TESTNET,POLYGON_MUMBAI','BSC_TESTNET,BASE_GOERLI','WEMIX_TESTNET,KROMA_SEPOLIA'] +KeepEnvAlive = false +CommitAndExecuteOnSameDON = false +BiDirectionalLane = true +NoOfCommitNodes = 5 +PhaseTimeout = '50m' +LocalCluster = false +ExistingDeployment = false +ReuseContracts = true +NodeFunding = 20.0 +NoOfRoutersPerPair = 1 +MulticallInOneTx = false +NoOfSendsInMulticall = 5 +NoOfNetworks = 2 +# chaos test settings +ChaosDuration = '10m' # Duration for whichever chaos will be injected; only valid for chaos tests + + +[CCIP.Groups.chaos.MsgDetails] +MsgType = 'DataWithToken' # type of message to be sent, either 'Token' or 'DataWithToken' Or 'Data' +DestGasLimit = 100000 # change this to 0 gas limit if you are doing ccip-send to an EOA +DataLength = 1000 # length of the data to be sent in ccip message if MsgType = 'Data'/'DataWithToken' +NoOfTokens = 2 # number of bridge tokens to be sent in ccip message if MsgType = 'Token'/'DataWithToken' +AmountPerToken = 1 # amount to be sent for each bridge token in ccip message if MsgType = 'Token'/'DataWithToken' + +[CCIP.Groups.chaos.TokenConfig] +TimeoutForPriceUpdate = '15m' # Duration to wait for the price update to time-out. +# Now testing only with dynamic price getter (no pipeline). +# Could be removed once the pipeline is completely removed. +WithPipeline = false +NoOfTokensPerChain = 2 # number of bridge tokens to be deployed per network; if MsgType = 'Token'/'DataWithToken' + +# uncomment the following if you want to run your tests with specific number of lanes; +# in this case out of all the possible lane combinations, only the ones with the specified number of lanes will be considered +# for example, if you have provided CCIP.Env.Networks = ['SIMULATED_1', 'SIMULATED_2', 'SIMULATED_3'] and CCIP.Groups..MaxNoOfLanes = 2, +# then only random combinations of 2 lanes from the following will be considered for the test : +# ['SIMULATED_1', 'SIMULATED_2'], ['SIMULATED_1', 'SIMULATED_3'], ['SIMULATED_2', 'SIMULATED_3'] +#MaxNoOfLanes = # maximum number of lanes to be added in the test; mainly used for scalability tests diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/baseline.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/baseline.toml new file mode 100644 index 00000000000..d48c0b0f797 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/baseline.toml @@ -0,0 +1,189 @@ +## Baseline performance test on simulated environment (with chaos) +## 40 chains / 400 lanes +## historyDepth 200 / finalityDepth 200 +## block_time = 1s +## throughput 1msg / 5s +## 20% Token, 60% DataWithToken, 15% Regular size msgs, 5% Large msgs +## +## make test_load_ccip testimage=.dkr.ecr..amazonaws.com/chainlink-ccip-tests:ccip-develop \ +## testname=TestLoadCCIPStableRequestTriggeringWithNetworkChaos \ +## override_toml=./testconfig/tomls/ccip-1.4-stress/baseline.toml \ +## secret_toml=./testconfig/tomls/secrets.toml + +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Env] +TTL = '8h' + +[CCIP.Env.Network] +selected_networks= ['PRIVATE-CHAIN-1', 'PRIVATE-CHAIN-2'] + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-1] +evm_name = 'private-chain-1' +evm_chain_id = 2337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 200 + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-2] +evm_name = 'private-chain-2' +evm_chain_id = 1337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 200 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-1] +block_time = 1 + +# +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-2] +block_time = 1 + +[CCIP.Env.NewCLCluster] +NoOfNodes = 17 +NodeMemory = '10Gi' +NodeCPU = '6' +DBMemory = '16Gi' +DBCPU = '4' +DBStorageClass = 'gp3' +PromPgExporter = true +DBCapacity = '50Gi' +IsStateful = true +DBArgs = ['shared_buffers=4096MB', 'effective_cache_size=8192MB', 'work_mem=128MB'] + +[CCIP.Env.NewCLCluster.Common] +BaseConfigTOML = """ +[Feature] +LogPoller = true +CCIP = true + +[Log] +Level = 'debug' +JSONConsole = true + +[Log.File] +MaxSize = '0b' + +[WebServer] +AllowOrigins = '*' +HTTPPort = 6688 +SecureCookies = false +HTTPWriteTimeout = '1m' + +[WebServer.RateLimit] +Authenticated = 2000 +Unauthenticated = 1000 + +[WebServer.TLS] +HTTPSPort = 0 + +[Database] +MaxIdleConns = 20 +MaxOpenConns = 30 +MigrateOnStartup = true + +[OCR2] +Enabled = true +DefaultTransactionQueueDepth = 0 + +[OCR] +Enabled = false +DefaultTransactionQueueDepth = 0 + +[P2P] +[P2P.V2] +Enabled = true +ListenAddresses = ['0.0.0.0:6690'] +AnnounceAddresses = ['0.0.0.0:6690'] +DeltaDial = '500ms' +DeltaReconcile = '5s' +""" + +CommonChainConfigTOML = """ +[HeadTracker] +HistoryDepth = 200 + +[GasEstimator] +PriceMax = '200 gwei' +LimitDefault = 6000000 +FeeCapDefault = '200 gwei' +""" + +[CCIP.Groups] +[CCIP.Groups.load] +KeepEnvAlive = true +NoOfCommitNodes = 16 +PhaseTimeout = '40m' +NodeFunding = 1000.0 +NoOfRoutersPerPair = 2 +NoOfNetworks = 40 +MaxNoOfLanes = 400 + +[CCIP.Groups.load.OffRampConfig] +BatchGasLimit = 11000000 + +[CCIP.Groups.load.TokenConfig] +TimeoutForPriceUpdate = '15m' +NoOfTokensPerChain = 10 +NoOfTokensWithDynamicPrice = 10 +DynamicPriceUpdateInterval ='15s' +CCIPOwnerTokens = true + +[CCIP.Groups.load.LoadProfile] +TestDuration = '4h' +TimeUnit = '5s' +RequestPerUnitTime = [1] +OptimizeSpace = true +NetworkChaosDelay = '100ms' + +# to represent 20%, 60%, 15%, 5% of the total messages +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [4,12,3,1] + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Token' +DestGasLimit = 0 +DataLength = 0 +NoOfTokens = 1 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' +DestGasLimit = 500000 +DataLength = 5000 +NoOfTokens = 1 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 800000 +DataLength = 10000 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 2500000 +DataLength = 10000 diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/prod-testnet.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/prod-testnet.toml new file mode 100644 index 00000000000..f8321584c84 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/prod-testnet.toml @@ -0,0 +1,964 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + + +[CCIP.Deployments] +Data = """ +{ + "lane_configs": { + "Arbitrum Sepolia": { + "is_native_fee_token": true, + "fee_token": "0xb1D4538B4571d411F07960EF2838Ce337FE1E80E", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x5EF7a726Fd21Fd9D77D34E3C56cfDD8691F7F0ac", + "router": "0x2a9C5afB0d0e4BAb2BCdaE109EC4b0c4Be15a165", + "price_registry": "0x89D5b13908b9063abCC6791dc724bF7B7c93634C", + "wrapped_native": "0xE591bf0A0CF924A0674d7792db046B23CEbF5f34", + "src_contracts": { + "Avalanche Fuji": { + "on_ramp": "0x1Cb56374296ED19E86F68fA437ee679FD7798DaA", + "deployed_at": 33999325 + }, + "Base Sepolia": { + "on_ramp": "0x7854E73C73e7F9bb5b0D5B4861E997f4C6E8dcC6", + "deployed_at": 9199926 + }, + "Gnosis Chiado": { + "on_ramp": "0x973CbE752258D32AE82b60CD1CB656Eebb588dF0", + "deployed_at": 42809650 + }, + "Optimism Sepolia": { + "on_ramp": "0x701Fe16916dd21EFE2f535CA59611D818B017877", + "deployed_at": 35180131 + }, + "Sepolia Testnet": { + "on_ramp": "0x4205E1Ca0202A248A5D42F5975A8FE56F3E302e9", + "deployed_at": 35180131 + }, + "WeMix Testnet": { + "on_ramp": "0xBD4106fBE4699FE212A34Cc21b10BFf22b02d959", + "deployed_at": 18816676 + } + }, + "dest_contracts": { + "Avalanche Fuji": { + "off_ramp": "0xcab0EF91Bee323d1A617c0a027eE753aFd6997E4", + "commit_store": "0x0d90b9b96cBFa0D01635ce12982ccE1b70827c7a", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0xc1982985720B959E66c19b64F783361Eb9B60F26", + "commit_store": "0x28F66bB336f6db713d6ad2a3bd1B7a531282A159", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x935C26F9a9122E5F9a27f2d3803e74c75B94f5a3", + "commit_store": "0xEdb963Ec5c2E5AbdFdCF137eF44A445a7fa4787A", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0xfD404A89e1d195F0c65be1A9042C77745197659e", + "commit_store": "0x84B7B012c95f8A152B44Ab3e952f2dEE424fA8e1", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0x1c71f141b4630EBE52d6aF4894812960abE207eB", + "commit_store": "0xaB0c8Ba51E7Fa3E5693a4Fbb39473520FD85d173", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0x262e16C8D42aa07bE13e58F81e7D9F62F6DE2830", + "commit_store": "0xc132eFAf929299E5ee704Fa6D9796CFa23Bb8b2C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Avalanche Fuji": { + "fee_token": "0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x0ea0D7B2b78DD3A926fC76d6875a287F0AEB158F", + "router": "0xF694E193200268f9a4868e4Aa017A0118C9a8177", + "price_registry": "0x19e157E5fb1DAec1aE4BaB113fdf077F980704AA", + "wrapped_native": "0xd00ae08403B9bbb9124bB305C09058E32C39A48c", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0x8bB16BEDbFd62D1f905ACe8DBBF2954c8EEB4f66", + "deployed_at": 31888860 + }, + "BSC Testnet": { + "on_ramp": "0xF25ECF1Aad9B2E43EDc2960cF66f325783245535", + "deployed_at": 33214865 + }, + "Base Sepolia": { + "on_ramp": "0x1A674645f3EB4147543FCA7d40C5719cbd997362", + "deployed_at": 31235262 + }, + "Gnosis Chiado": { + "on_ramp": "0x1532e5b204ee2b2244170c78E743CB9c168F4DF9", + "deployed_at": 32817266 + }, + "Optimism Sepolia": { + "on_ramp": "0xC334DE5b020e056d0fE766dE46e8d9f306Ffa1E2", + "deployed_at": 30396804 + }, + "Polygon Amoy": { + "on_ramp": "0x610F76A35E17DA4542518D85FfEa12645eF111Fc", + "deployed_at": 31982368 + }, + "Sepolia Testnet": { + "on_ramp": "0x5724B4Cc39a9690135F7273b44Dfd3BA6c0c69aD", + "deployed_at": 33214865 + }, + "WeMix Testnet": { + "on_ramp": "0x677B5ab5C8522d929166c064d5700F147b15fa33", + "deployed_at": 30436465 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0x90A74072e7B0c2d59e13aB4d8f93c8198c413194", + "commit_store": "0xf3458CFd2fdf4a6CF0Ce296d520DD21eB194828b", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0x10b28009E5D776F1f5AAA73941CE8953B8f42d26", + "commit_store": "0xacDD582F271eCF22FAd6764cCDe1c4a534b732A8", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0xdBdE8510226d1E060A3bf982b67705C67f5697e2", + "commit_store": "0x8Ee73BC9492b4182D289E5C1e66e40CD876CC00F", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x56dF55aF5F0A4689f3364230587a68eD6A314fAd", + "commit_store": "0xabA7ff98094c4cc7A075812EefF2CD21f6400235", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0x3d7CbC95DCC33257F14D6Eb780c88Bd56C6335BB", + "commit_store": "0x1fcDC02edDfb405f378ba53cF9E6104feBcB7542", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0x3e33290B90fD0FF30a3FA138934DF028E4eCA348", + "commit_store": "0xCFe3556Aa42d40be09BD23aa80448a19443BE5B1", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0x9e5e4324F8608D54A50a317832d456a392E4F8C2", + "commit_store": "0x92A51eD3F041B39EbD1e464C1f7cb1e8f8A8c63f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0xD0D338318bC6837b091FC7AB5F2a94B7783507d5", + "commit_store": "0xd9D479208235c7355848ff4aF26eB5aacfDC30c6", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "BSC Testnet": { + "is_native_fee_token": true, + "fee_token": "0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0xF9a21B587111e7E8745Fb8b13750014f19DB0014", + "router": "0xE1053aE1857476f36A3C62580FF9b016E8EE8F6f", + "price_registry": "0xCCDf022c9d31DC26Ebab4FB92432724a5b79809a", + "wrapped_native": "0xae13d989daC2f0dEbFf460aC112a837C89BAa7cd", + "src_contracts": { + "Avalanche Fuji": { + "on_ramp": "0xa2515683E99F50ADbE177519A46bb20FfdBaA5de", + "deployed_at": 40500000 + }, + "Base Sepolia": { + "on_ramp": "0x3E807220Ca84b997c0d1928162227b46C618e0c5", + "deployed_at": 37115558 + }, + "Gnosis Chiado": { + "on_ramp": "0x8735f991d41eA9cA9D2CC75cD201e4B7C866E63e", + "deployed_at": 40228352 + }, + "Polygon Amoy": { + "on_ramp": "0xf37CcbfC04adc1B56a46B36F811D52C744a1AF78", + "deployed_at": 39572254 + }, + "Sepolia Testnet": { + "on_ramp": "0xB1DE44B04C00eaFe9915a3C07a0CaeA4410537dF", + "deployed_at": 38150066 + }, + "WeMix Testnet": { + "on_ramp": "0x89268Afc1BEA0782a27ba84124E3F42b196af927", + "deployed_at": 38184995 + } + }, + "dest_contracts": { + "Avalanche Fuji": { + "off_ramp": "0x6e6fFCb6B4BED91ff0CC8C2e57EC029dA7DB80C2", + "commit_store": "0x38Bc38Bd824b6eE87571f9D3CFbe6D6E28E3Dc62", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0x2C61FD7E93Dc79422861282145c59B56dFbc3a8c", + "commit_store": "0x42fAe5B3605804CF6d08632d7A25864e24F792Ae", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x71a44a60832B0F8B63232C9516e7E6aEc3A373Dc", + "commit_store": "0xAC24299a91b72d1Cb5B31147e3CF54964D896974", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0x63440C7747d37bc6154b5538AE32b54FE0965AfA", + "commit_store": "0xAD22fA198CECfC534927aE1D480c460d5bB3460F", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0xf1c128Fe52Ea78CcAAB407509292E61ce38C1523", + "commit_store": "0x59dFD870dC4bd76A7B879A4f705Fdcd2595f85f9", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0xfd9B19c3725da5B517aA705B848ff3f21F98280e", + "commit_store": "0x3c1F1412563188aBc8FE3fd53E8F1Cb601CaB4f9", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Base Sepolia": { + "is_native_fee_token": true, + "fee_token": "0xE4aB69C077896252FAFBD49EFD26B5D171A32410", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x5aA82cA372782d6CC33AA4C830Df2a91017A7e1b", + "router": "0xD3b06cEbF099CE7DA4AcCf578aaebFDBd6e88a93", + "price_registry": "0x4D20536e60832bE579Cd38E89Dc03d11E1741FbA", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0x58622a80c6DdDc072F2b527a99BE1D0934eb2b50", + "deployed_at": 5146539 + }, + "Avalanche Fuji": { + "on_ramp": "0xAbA09a1b7b9f13E05A6241292a66793Ec7d43357", + "deployed_at": 7810235 + }, + "BSC Testnet": { + "on_ramp": "0xD806966beAB5A3C75E5B90CDA4a6922C6A9F0c9d", + "deployed_at": 5144127 + }, + "Gnosis Chiado": { + "on_ramp": "0x2Eff2d1BF5C557d6289D208a7a43608f5E3FeCc2", + "deployed_at": 9817141 + }, + "Optimism Sepolia": { + "on_ramp": "0x3b39Cd9599137f892Ad57A4f54158198D445D147", + "deployed_at": 5147649 + }, + "Sepolia Testnet": { + "on_ramp": "0x6486906bB2d85A6c0cCEf2A2831C11A2059ebfea", + "deployed_at": 7810235 + }, + "ethereum-testnet-sepolia-mode-1": { + "on_ramp": "0x3d0115386C01436870a2c47e6297962284E70BA6", + "deployed_at": 10409731 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0xd364C06ac99a82a00d3eFF9F2F78E4Abe4b9baAA", + "commit_store": "0xdE8d0f47a71eA3fDFBD3162271652f2847939097", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Fuji": { + "off_ramp": "0xAd91214efFee446500940c764DF77AF18427294F", + "commit_store": "0x1242b6c5e0e349b8d4BCf0938f961C4B4f7EA3Fa", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0xd5E9508921434e8758f4540D55c1c066b7cc1598", + "commit_store": "0x1a86b29364D1B3fA3386329A361aA98A104b2742", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x9Bb7e398ef9Acfe9cA584C39B1E233Cba62BB9f7", + "commit_store": "0x1F4B82cDebaC5e3a0Dd53183D47e51808B4a64cB", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0x86a3910908eCaAA31Fcd9F0fC8841D8E98f1511d", + "commit_store": "0xE99a87C9b5ed4D2b6060195DEea5106ffF655736", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0x189F61D9B886Dd2975D5Abc893c8Cf5f5effda71", + "commit_store": "0xEE7e27346DCD1e711348D0F7f7ECB53a9a3a08a7", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "ethereum-testnet-sepolia-mode-1": { + "off_ramp": "0xB26647A23e8b4284375e5C74b77c9557aE709D03", + "commit_store": "0x4b4fEB401d3E613e1D6242E155C83A80BF9ac2C9", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Gnosis Chiado": { + "is_native_fee_token": true, + "fee_token": "0xDCA67FD8324990792C0bfaE95903B8A64097754F", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0xb6f1Fe2CDE891eFd5Efd2A563C4C2F2549163718", + "router": "0x19b1bac554111517831ACadc0FD119D23Bb14391", + "price_registry": "0x2F4ACd1f8986c6B1788159C4c9a5fC3fceCCE363", + "wrapped_native": "0x18c8a7ec7897177E4529065a7E7B0878358B3BfF", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0x473b49fb592B54a4BfCD55d40E048431982879C9", + "deployed_at": 9718588 + }, + "Avalanche Fuji": { + "on_ramp": "0x610F76A35E17DA4542518D85FfEa12645eF111Fc", + "deployed_at": 9718676 + }, + "BSC Testnet": { + "on_ramp": "0xE48E6AA1fc7D0411acEA95F8C6CaD972A37721D4", + "deployed_at": 9718302 + }, + "Base Sepolia": { + "on_ramp": "0x41b4A51cAfb699D9504E89d19D71F92E886028a8", + "deployed_at": 9718513 + }, + "Optimism Sepolia": { + "on_ramp": "0xAae733212981e06D9C978Eb5148F8af03F54b6EF", + "deployed_at": 9718420 + }, + "Polygon Amoy": { + "on_ramp": "0x01800fCDd892e37f7829937271840A6F041bE62E", + "deployed_at": 9718194 + }, + "Sepolia Testnet": { + "on_ramp": "0x4ac7FBEc2A7298AbDf0E0F4fDC45015836C4bAFe", + "deployed_at": 8487681 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0x9aA82DBB53bf02096B771D40e9432A323a78fB26", + "commit_store": "0x5CdbA91aBC0cD81FC56bc10Ad1835C9E5fB38e5F", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Fuji": { + "off_ramp": "0x3e33290B90fD0FF30a3FA138934DF028E4eCA348", + "commit_store": "0xCFe3556Aa42d40be09BD23aa80448a19443BE5B1", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0xbc4AD54e91b213D4279af92c0C5518c0b96cf62D", + "commit_store": "0xff84e8Dd4Fd17eaBb23b6AeA6e1981830e54389C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0x4117953A5ceeF12f5B8C1E973b470ab83a8CebA6", + "commit_store": "0x94ad41296186E81f31e1ed0B1BcF5fa9e1721C27", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0x33d2898F8fb7714FD1661791766f40754982a343", + "commit_store": "0x55d6Df194472f02CD481e506A277c4A29D0D1bCc", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0x450543b1d85ca79885851D7b74dc982981b78229", + "commit_store": "0x23B79d940A769FE31b4C867A8BAE80117f24Ca81", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0xbf9036529123DE264bFA0FC7362fE25B650D4B16", + "commit_store": "0x5f7F1abD5c5EdaF2636D58B980e85355AF0Ef80d", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Kroma Sepolia": { + "is_native_fee_token": true, + "fee_token": "0xa75cCA5b404ec6F4BB6EC4853D177FE7057085c8", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x1E4e4e0d6f6631A45C616F71a1A5cF208DB9eCDe", + "router": "0xA8C0c11bf64AF62CDCA6f93D3769B88BdD7cb93D", + "price_registry": "0xa1ed3A3aA29166C9c8448654A8cA6b7916BC8379", + "wrapped_native": "0x4200000000000000000000000000000000000001", + "src_contracts": { + "WeMix Testnet": { + "on_ramp": "0x6ea155Fc77566D9dcE01B8aa5D7968665dc4f0C5", + "deployed_at": 10290904 + } + }, + "dest_contracts": { + "WeMix Testnet": { + "off_ramp": "0xB602B6E5Caf08ac0C920EAE585aed100a8cF6f3B", + "commit_store": "0x89D5b13908b9063abCC6791dc724bF7B7c93634C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Optimism Sepolia": { + "is_native_fee_token": true, + "fee_token": "0xE4aB69C077896252FAFBD49EFD26B5D171A32410", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0xf06Ff5D2084295909119ca541E93635E7D582FFc", + "router": "0x114A20A10b43D4115e5aeef7345a1A71d2a60C57", + "price_registry": "0x782a7Ba95215f2F7c3dD4C153cbB2Ae3Ec2d3215", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0x1a86b29364D1B3fA3386329A361aA98A104b2742", + "deployed_at": 10841494 + }, + "Avalanche Fuji": { + "on_ramp": "0x6b38CC6Fa938D5AB09Bdf0CFe580E226fDD793cE", + "deployed_at": 8677537 + }, + "Base Sepolia": { + "on_ramp": "0xe284D2315a28c4d62C419e8474dC457b219DB969", + "deployed_at": 7130524 + }, + "Gnosis Chiado": { + "on_ramp": "0x835a5b8e6CA17c2bB5A336c93a4E22478E6F1C8A", + "deployed_at": 11799783 + }, + "Polygon Amoy": { + "on_ramp": "0x2Cf26fb01E9ccDb831414B766287c0A9e4551089", + "deployed_at": 10813146 + }, + "Sepolia Testnet": { + "on_ramp": "0xC8b93b46BF682c39B3F65Aa1c135bC8A95A5E43a", + "deployed_at": 12165583 + }, + "WeMix Testnet": { + "on_ramp": "0xc7E53f6aB982af7A7C3e470c8cCa283d3399BDAd", + "deployed_at": 8733017 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0xDc2c7A3d8068C6F09F0F3648d24C84e372F6014d", + "commit_store": "0xb1aFb5cbE3c29b5Db71F21442BA9EfD450BC23C3", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Fuji": { + "off_ramp": "0x1F350718e015EB20E5065C09F4A7a3f66888aEeD", + "commit_store": "0x98650A8EB59f75D93563aB34FcF603b1A30e4CBF", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0x0a750ca77369e03613d7640548F4b2b1c695c3Bb", + "commit_store": "0x8fEBC74C26129C8d7E60288C6dCCc75eb494aA3C", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0xCE2CE7F940B7c839384e5D7e079A6aE80e8AD6dB", + "commit_store": "0x1b9D78Ec1CEEC439F0b7eA6C428A1a607D9FA7e4", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0xD667b5706592D0b040C78fEe5EE17D243b7dCB41", + "commit_store": "0x96101BA5250EE9295c193693C1e08A55bC593664", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0x260AF9b83e0d2Bb6C9015fC9f0BfF8858A0CCE68", + "commit_store": "0x7a0bB92Bc8663abe6296d0162A9b41a2Cb2E0358", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0x9C08B7712af0344188aa5087D9e6aD0f47191037", + "commit_store": "0x4BE6DB0B884169a6A207fe5cad01eB4C025a13dB", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Polygon Amoy": { + "is_native_fee_token": true, + "fee_token": "0x0Fd9e8d3aF1aaee056EB9e802c3A762a667b1904", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x50b023c5b33AEe5Adef15C2E95C2fEC690a52fa1", + "router": "0x9C32fCB86BF0f4a1A8921a9Fe46de3198bb884B2", + "price_registry": "0xfb2f2A207dC428da81fbAFfDDe121761f8Be1194", + "wrapped_native": "0x360ad4f9a9A8EFe9A8DCB5f461c4Cc1047E1Dcf9", + "src_contracts": { + "Avalanche Fuji": { + "on_ramp": "0x8Fb98b3837578aceEA32b454f3221FE18D7Ce903", + "deployed_at": 6004551 + }, + "BSC Testnet": { + "on_ramp": "0xC6683ac4a0F62803Bec89a5355B36495ddF2C38b", + "deployed_at": 6005330 + }, + "Gnosis Chiado": { + "on_ramp": "0x2331F6D614C9Fd613Ff59a1aB727f1EDf6c37A68", + "deployed_at": 6897885 + }, + "Optimism Sepolia": { + "on_ramp": "0xA52cDAeb43803A80B3c0C2296f5cFe57e695BE11", + "deployed_at": 6004902 + }, + "Sepolia Testnet": { + "on_ramp": "0x35347A2fC1f2a4c5Eae03339040d0b83b09e6FDA", + "deployed_at": 6004056 + }, + "WeMix Testnet": { + "on_ramp": "0x26546096F64B5eF9A1DcDAe70Df6F4f8c2E10C61", + "deployed_at": 6005611 + } + }, + "dest_contracts": { + "Avalanche Fuji": { + "off_ramp": "0xa733ce82a84335b2E9D864312225B0F3D5d80600", + "commit_store": "0x09B0F93fC2111aE439e853884173AC5b2F809885", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0x948dfaa4842fc23e0e362Fe8D4396AaE4E6DF7EA", + "commit_store": "0x7F4e739D40E58BBd59dAD388171d18e37B26326f", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x17c542a28e08AEF5697251601C7b2B621d153D42", + "commit_store": "0x811250c20fAB9a1b7ca245453aC214ba637fBEB5", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0xfFdE9E8c34A27BEBeaCcAcB7b3044A0A364455C9", + "commit_store": "0x74ED442ad211050e9C05Dc9A267E037E3d74A03B", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0xFb04129aD1EEDB741CC705ebC1978a7aB63e51f6", + "commit_store": "0x63f875240149d29136053C954Ca164a9BfA81F77", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0xdE8451E952Eb43350614839cCAA84f7C8701a09C", + "commit_store": "0xaCdaBa07ECad81dc634458b98673931DD9d3Bc14", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "Sepolia Testnet": { + "is_native_fee_token": true, + "fee_token": "0x779877A7B0D9E8603169DdbD7836e478b4624789", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0xB4d360459F32Dd641Ef5A6985fFbAC5c4e5521aA", + "router": "0x0BF3dE8c5D3e8A2B34D2BEeB17ABfCeBaf363A59", + "price_registry": "0x9EF7D57a4ea30b9e37794E55b0C75F2A70275dCc", + "wrapped_native": "0x097D90c9d3E0B50Ca60e1ae45F6A81010f9FB534", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0xe4Dd3B16E09c016402585a8aDFdB4A18f772a07e", + "deployed_at": 5737506 + }, + "Avalanche Fuji": { + "on_ramp": "0x0477cA0a35eE05D3f9f424d88bC0977ceCf339D4", + "deployed_at": 5944649 + }, + "BSC Testnet": { + "on_ramp": "0xD990f8aFA5BCB02f95eEd88ecB7C68f5998bD618", + "deployed_at": 5383500 + }, + "Base Sepolia": { + "on_ramp": "0x2B70a05320cB069e0fB55084D402343F832556E7", + "deployed_at": 5619657 + }, + "Gnosis Chiado": { + "on_ramp": "0x3E842E3A79A00AFdd03B52390B1caC6306Ea257E", + "deployed_at": 5386355 + }, + "Optimism Sepolia": { + "on_ramp": "0x69CaB5A0a08a12BaFD8f5B195989D709E396Ed4d", + "deployed_at": 5937506 + }, + "Polygon Amoy": { + "on_ramp": "0x9f656e0361Fb5Df2ac446102c8aB31855B591692", + "deployed_at": 5723315 + }, + "WeMix Testnet": { + "on_ramp": "0xedFc22336Eb0B9B11Ff37C07777db27BCcDe3C65", + "deployed_at": 5393931 + }, + "celo-testnet-alfajores": { + "on_ramp": "0x3C86d16F52C10B2ff6696a0e1b8E0BcfCC085948", + "deployed_at": 5704643 + }, + "ethereum-testnet-sepolia-blast-1": { + "on_ramp": "0xDB75E9D9ca7577CcBd7232741be954cf26194a66", + "deployed_at": 6040848 + }, + "ethereum-testnet-sepolia-metis-1": { + "on_ramp": "0x1C4640914cd57c5f02a68048A0fbb0E12d904223", + "deployed_at": 6002793 + }, + "ethereum-testnet-sepolia-mode-1": { + "on_ramp": "0xc630fbD4D0F6AEB00aD0793FB827b54fBB78e981", + "deployed_at": 5970819 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0xF18896AB20a09A29e64fdEbA99FDb8EC328f43b1", + "commit_store": "0x93Ff9Dd39Dc01eac1fc4d2c9211D95Ee458CAB94", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Fuji": { + "off_ramp": "0x000b26f604eAadC3D874a4404bde6D64a97d95ca", + "commit_store": "0x2dD9273F8208B8393350508131270A6574A69784", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0xdE2d8E126e08d675fCD7fFa5a6CE49925f3Dc692", + "commit_store": "0x0050ac355a82caB31194507f94174297bf0655A7", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Base Sepolia": { + "off_ramp": "0x31c0B81832B333419f0DfD36A69F502cF9094aed", + "commit_store": "0xDFcde9d698a2B32DB2537DC9B752Cadd1D846a52", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Gnosis Chiado": { + "off_ramp": "0x7db0115A0b3AAb01d30bf81123c5DD7B0C41Add5", + "commit_store": "0x6640723Ea801178c4383FA016b9781e7ef1016EF", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0xD50590D4438411EDe47029b0FD7901A7145E5Df6", + "commit_store": "0xe85EEE9Fd434A7b8a586Ee086E828abF41839479", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0x5032cbC0C4aEeD25bb6E45D8B3fAF05DB0688C5d", + "commit_store": "0xe6201C9996Cc7B6E828E10CbE937E693d577D318", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "WeMix Testnet": { + "off_ramp": "0x46b639a3C1a4CBfD326b94a2dB7415c27157282f", + "commit_store": "0x7b74554678816b045c1e7409327E086bD436aa46", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "celo-testnet-alfajores": { + "off_ramp": "0xB435E0f73c18C5a12C324CA1d02F81F2C3e6e761", + "commit_store": "0xbc5d74957F171e75F92c8F0E1C317A25a56a416D", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "ethereum-testnet-sepolia-blast-1": { + "off_ramp": "0x4e897e5cF3aC307F0541B2151A88bCD781c153a3", + "commit_store": "0xB656652841F347178e193951C4663652aCe36B74", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "ethereum-testnet-sepolia-metis-1": { + "off_ramp": "0x4DB693A93E9d5196ECD42EC56CDEAe99dFC652ED", + "commit_store": "0xBfACd78F1412B6f93Ac23409bf456aFec1ABd845", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "ethereum-testnet-sepolia-mode-1": { + "off_ramp": "0xbEfd8D65F6643De54F0b1268A3bf4618ff85dcB4", + "commit_store": "0x0C161D3470b45Cc677661654C30ce4AdE6aCD288", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "WeMix Testnet": { + "is_native_fee_token": true, + "fee_token": "0x3580c7A817cCD41f7e02143BFa411D4EeAE78093", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x46fF31494651593973D9b38a872ED5B06f45A693", + "router": "0xA8C0c11bf64AF62CDCA6f93D3769B88BdD7cb93D", + "price_registry": "0x89D17571DB7C9540eeB36760E3c749C8fb984569", + "wrapped_native": "0xbE3686643c05f00eC46e73da594c78098F7a9Ae7", + "src_contracts": { + "Arbitrum Sepolia": { + "on_ramp": "0xA9DE3F7A617D67bC50c56baaCb9E0373C15EbfC6", + "deployed_at": 51216113 + }, + "Avalanche Fuji": { + "on_ramp": "0xC4aC84da458ba8e40210D2dF94C76E9a41f70069", + "deployed_at": 51214769 + }, + "BSC Testnet": { + "on_ramp": "0x5AD6eed6Be0ffaDCA4105050CF0E584D87E0c2F1", + "deployed_at": 51213771 + }, + "Kroma Sepolia": { + "on_ramp": "0x428C4dc89b6Bf908B82d77C9CBceA786ea8cc7D0", + "deployed_at": 51239062 + }, + "Optimism Sepolia": { + "on_ramp": "0x1961a7De751451F410391c251D4D4F98D71B767D", + "deployed_at": 51216748 + }, + "Polygon Amoy": { + "on_ramp": "0xd55148e841e76265B484d399eC71b7076ecB1216", + "deployed_at": 55378685 + }, + "Sepolia Testnet": { + "on_ramp": "0x4d57C6d8037C65fa66D6231844785a428310a735", + "deployed_at": 51239309 + } + }, + "dest_contracts": { + "Arbitrum Sepolia": { + "off_ramp": "0xeB1dFaB2464Bf0574D43e764E0c758f92e7ecAFb", + "commit_store": "0xcEaCa2B7890065c485f3E58657358a185Ad33791", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Avalanche Fuji": { + "off_ramp": "0x98e811Df9D2512f1aaf58D534607F583D6c54A4F", + "commit_store": "0x8e538351F6E5B2daF3c90C565C3738bca69a2716", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "BSC Testnet": { + "off_ramp": "0xB0e7f0fCcD3c961C473E7c44D939C1cDb4Cec1cB", + "commit_store": "0x4B56D8d53f1A6e0117B09700067De99581aA5542", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Kroma Sepolia": { + "off_ramp": "0xD685D2d224dd6D0Db2D56497db6270D77D9a7966", + "commit_store": "0x7e062D6880779a0347e7742058C1b1Ee4AA0B137", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Optimism Sepolia": { + "off_ramp": "0xA5f97Bc69Bf06e7C37B93265c5457420A92c5F4b", + "commit_store": "0xd48b9213583074f518D8f4336FDf35370D450132", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Polygon Amoy": { + "off_ramp": "0x6c8f5999B06FDE17B11E4e3C1062b761766F960f", + "commit_store": "0x957c3c2056192e58A8485eF31165fC490d474239", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0x8AB103843ED9D28D2C5DAf5FdB9c3e1CE2B6c876", + "commit_store": "0x7d5297c5506ee2A7Ef121Da9bE02b6a6AD30b392", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "celo-testnet-alfajores": { + "is_native_fee_token": true, + "fee_token": "0x32E08557B14FaD8908025619797221281D439071", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0xbE8FD4b84ca8CC2cFAeeEf8dc1388E44860eeEeb", + "router": "0xb00E95b773528E2Ea724DB06B75113F239D15Dca", + "price_registry": "0x8F048206D11B2c69b8963E2EBd5968D141e022f4", + "wrapped_native": "0x99604d0e2EfE7ABFb58BdE565b5330Bb46Ab3Dca", + "src_contracts": { + "Sepolia Testnet": { + "on_ramp": "0x16a020c4bbdE363FaB8481262D30516AdbcfcFc8", + "deployed_at": 23561364 + } + }, + "dest_contracts": { + "Sepolia Testnet": { + "off_ramp": "0xa1b97F92D806BA040daf419AFC2765DC723683a4", + "commit_store": "0xcd92C0599Ac515e7588865cC45Eee21A74816aFc", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "ethereum-testnet-sepolia-blast-1": { + "is_native_fee_token": true, + "fee_token": "0x02c359ebf98fc8BF793F970F9B8302bb373BdF32", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x9C32fCB86BF0f4a1A8921a9Fe46de3198bb884B2", + "router": "0xfb2f2A207dC428da81fbAFfDDe121761f8Be1194", + "price_registry": "0xc8acE9dF450FaD007755C6C9AB4f0e9c8626E29C", + "wrapped_native": "0x4200000000000000000000000000000000000023", + "src_contracts": { + "Sepolia Testnet": { + "on_ramp": "0x85Ef19FC4C63c70744995DC38CAAEC185E0c619f", + "deployed_at": 6429339 + } + }, + "dest_contracts": { + "Sepolia Testnet": { + "off_ramp": "0x92cD24C278D34C726f377703E50875d8f9535dC2", + "commit_store": "0xcE1b4D50CeD56850182Bd58Ace91171cB249B873", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "ethereum-testnet-sepolia-metis-1": { + "is_native_fee_token": true, + "fee_token": "0x9870D6a0e05F867EAAe696e106741843F7fD116D", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x26546096F64B5eF9A1DcDAe70Df6F4f8c2E10C61", + "router": "0xaCdaBa07ECad81dc634458b98673931DD9d3Bc14", + "price_registry": "0x5DCE866b3ae6E0Ed153f0e149D7203A1B266cdF5", + "wrapped_native": "0x5c48e07062aC4E2Cf4b9A768a711Aef18e8fbdA0", + "src_contracts": { + "Sepolia Testnet": { + "on_ramp": "0x2Eff2d1BF5C557d6289D208a7a43608f5E3FeCc2", + "deployed_at": 858864 + } + }, + "dest_contracts": { + "Sepolia Testnet": { + "off_ramp": "0x9Bb7e398ef9Acfe9cA584C39B1E233Cba62BB9f7", + "commit_store": "0x1F4B82cDebaC5e3a0Dd53183D47e51808B4a64cB", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + }, + "ethereum-testnet-sepolia-mode-1": { + "is_native_fee_token": true, + "fee_token": "0x925a4bfE64AE2bFAC8a02b35F78e60C29743755d", + "bridge_tokens": [ + ], + "bridge_tokens_pools": [ + ], + "price_aggregators": null, + "arm": "0x11545812A8d64e4A3A0Ec36b6F70D87b42Ce4a01", + "router": "0xc49ec0eB4beb48B8Da4cceC51AA9A5bD0D0A4c43", + "price_registry": "0xa733ce82a84335b2E9D864312225B0F3D5d80600", + "wrapped_native": "0x4200000000000000000000000000000000000006", + "src_contracts": { + "Base Sepolia": { + "on_ramp": "0x73f7E074bd7291706a0C5412f51DB46441B1aDCB", + "deployed_at": 14359909 + }, + "Sepolia Testnet": { + "on_ramp": "0xfFdE9E8c34A27BEBeaCcAcB7b3044A0A364455C9", + "deployed_at": 14359680 + } + }, + "dest_contracts": { + "Base Sepolia": { + "off_ramp": "0x137a38c6b1Ad20101F93516aB2159Df525309168", + "commit_store": "0x8F43d867969F14619895d71E0A5b89E0bb20bF70", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + }, + "Sepolia Testnet": { + "off_ramp": "0xcD44cec849B6a8eBd5551D6DFeEcA452257Dfe4d", + "commit_store": "0xbA66f08733E6715D33edDfb5a5947676bb45d0e0", + "receiver_dapp": "0x1A2A69e3eB1382FE34Bc579AdD5Bae39e31d4A2c" + } + } + } + } +} +""" + +[CCIP.Env] +TTL = '8h' + +[CCIP.Env.Network] +selected_networks = [ + 'ARBITRUM_SEPOLIA', + 'AVALANCHE_FUJI', + 'OPTIMISM_SEPOLIA', + 'BASE_SEPOLIA', + 'BSC_TESTNET', + 'WEMIX_TESTNET', + 'SEPOLIA', + 'POLYGON_AMOY', + 'KROMA_SEPOLIA', + 'BLAST_SEPOLIA' +] + +[CCIP.Groups.load] +NetworkPairs = [ + 'AVALANCHE_FUJI,SEPOLIA', + 'OPTIMISM_SEPOLIA,BASE_SEPOLIA' +] + +BiDirectionalLane = true +PhaseTimeout = '45m' +ExistingDeployment = true + +NoOfTokensPerChain = 1 + +[CCIP.Groups.load.LoadProfile] +RequestPerUnitTime = [1] +TimeUnit = '5s' +TestDuration = '1h' +TestRunName = 'v2.12.0-ccip1.4.16-load' + +# to represent 20%, 60%, 15%, 5% of the total messages +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [4,12,3,1] + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Token' +DestGasLimit = 0 +DataLength = 0 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' +DestGasLimit = 500000 +DataLength = 5000 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 800000 +DataLength = 10000 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 2500000 +DataLength = 10000 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/sample-scalability.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/sample-scalability.toml new file mode 100644 index 00000000000..872a6ae565c --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/sample-scalability.toml @@ -0,0 +1,129 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Env] +TTL = '15h' + +[CCIP.Env.Network] +selected_networks= ['PRIVATE-CHAIN-1', 'PRIVATE-CHAIN-2'] + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-1] +evm_name = 'private-chain-1' +evm_chain_id = 2337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-2] +evm_name = 'private-chain-2' +evm_chain_id = 1337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-1] +block_time = 1 +# +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-2] +block_time = 1 + +[CCIP.Env.NewCLCluster] +NoOfNodes = 17 +NodeMemory = '12Gi' +NodeCPU = '6' +DBMemory = '10Gi' +DBCPU = '2' +DBStorageClass = 'gp3' +PromPgExporter = true +DBCapacity = '50Gi' +IsStateful = true +DBArgs = ['shared_buffers=2048MB', 'effective_cache_size=4096MB', 'work_mem=64MB'] + +#[CCIP.Env.NewCLCluster.Common] +#CommonChainConfigTOML = """ +#[HeadTracker] +#HistoryDepth = 3000 +# +#[GasEstimator] +#PriceMax = '200 gwei' +#LimitDefault = 6000000 +#FeeCapDefault = '200 gwei' +#""" + +[CCIP.Groups] +[CCIP.Groups.load] +KeepEnvAlive = true +NoOfCommitNodes = 16 +PhaseTimeout = '40m' +NodeFunding = 1000.0 +NoOfRoutersPerPair = 2 +NoOfNetworks = 40 +MaxNoOfLanes = 200 + +[CCIP.Groups.load.OffRampConfig] +BatchGasLimit = 11000000 + +[CCIP.Groups.load.TokenConfig] +TimeoutForPriceUpdate = '15m' +NoOfTokensPerChain = 60 +NoOfTokensWithDynamicPrice = 15 +DynamicPriceUpdateInterval ='15s' +CCIPOwnerTokens = true + +[CCIP.Groups.load.LoadProfile] +TestDuration = '4h' +TimeUnit = '5s' +RequestPerUnitTime = [1] +OptimizeSpace = true +NetworkChaosDelay = '100ms' + +# to represent 20%, 60%, 15%, 5% of the total messages +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [4,12,3,1] + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Token' +DestGasLimit = 0 +DataLength = 0 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' +DestGasLimit = 500000 +DataLength = 5000 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 800000 +DataLength = 10000 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 2500000 +DataLength = 10000 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/tier-a.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/tier-a.toml new file mode 100644 index 00000000000..5270de7f6d4 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/tier-a.toml @@ -0,0 +1,240 @@ +## Baseline performance test on simulated environment (with chaos) +## 40 chains / 400 lanes +## historyDepth 200 / finalityDepth 200 +## block_time = 1s +## throughput 1msg / 5s +## 20% Token, 60% DataWithToken, 15% Regular size msgs, 5% Large msgs +## +## make test_load_ccip testimage=.dkr.ecr..amazonaws.com/chainlink-ccip-tests:ccip-develop \ +## testname=TestLoadCCIPStableRequestTriggeringWithNetworkChaos \ +## override_toml=./testconfig/tomls/ccip1.4-stress/tier-a.toml \ +## secret_toml=./testconfig/tomls/secrets.toml + +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Env] +TTL = '8h' + +[CCIP.Env.Network] +selected_networks= ['PRIVATE-CHAIN-1', 'SLOW-CHAIN-1', 'SLOW-CHAIN-2', 'SLOW-CHAIN-3'] + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-1] +evm_name = 'private-chain-1' +evm_chain_id = 2337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.EVMNetworks.SLOW-CHAIN-1] +evm_name = 'slow-chain-1' +evm_chain_id = 90000001 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.EVMNetworks.SLOW-CHAIN-2] +evm_name = 'slow-chain-2' +evm_chain_id = 90000002 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.EVMNetworks.SLOW-CHAIN-3] +evm_name = 'slow-chain-3' +evm_chain_id = 1337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-1] +block_time = 2 + +# +[CCIP.Env.Network.AnvilConfigs.SLOW-CHAIN-1] +block_time = 12 + +[CCIP.Env.Network.AnvilConfigs.SLOW-CHAIN-2] +block_time = 12 + +[CCIP.Env.Network.AnvilConfigs.SLOW-CHAIN-3] +block_time = 12 + +[CCIP.Env.NewCLCluster] +NoOfNodes = 17 +NodeMemory = '10Gi' +NodeCPU = '6' +DBMemory = '16Gi' +DBCPU = '4' +DBStorageClass = 'gp3' +PromPgExporter = true +DBCapacity = '50Gi' +IsStateful = true +DBArgs = ['shared_buffers=4096MB', 'effective_cache_size=8192MB', 'work_mem=128MB'] + +[CCIP.Env.NewCLCluster.Common] +BaseConfigTOML = """ +[Feature] +LogPoller = true +CCIP = true + +[Log] +Level = 'debug' +JSONConsole = true + +[Log.File] +MaxSize = '0b' + +[WebServer] +AllowOrigins = '*' +HTTPPort = 6688 +SecureCookies = false +HTTPWriteTimeout = '1m' + +[WebServer.RateLimit] +Authenticated = 2000 +Unauthenticated = 1000 + +[WebServer.TLS] +HTTPSPort = 0 + +[Database] +MaxIdleConns = 20 +MaxOpenConns = 30 +MigrateOnStartup = true + +[OCR2] +Enabled = true +DefaultTransactionQueueDepth = 0 + +[OCR] +Enabled = false +DefaultTransactionQueueDepth = 0 + +[P2P] +[P2P.V2] +Enabled = true +ListenAddresses = ['0.0.0.0:6690'] +AnnounceAddresses = ['0.0.0.0:6690'] +DeltaDial = '500ms' +DeltaReconcile = '5s' +""" + +#CommonChainConfigTOML = """ +#[HeadTracker] +#HistoryDepth = 200 +# +#[GasEstimator] +#PriceMax = '200 gwei' +#LimitDefault = 6000000 +#FeeCapDefault = '200 gwei' +#""" + +[CCIP.Groups] +[CCIP.Groups.load] +DenselyConnectedNetworkChainIds = ['90000001', '90000002', '1337'] +KeepEnvAlive = true +NoOfCommitNodes = 16 +PhaseTimeout = '40m' +NodeFunding = 1000.0 +NoOfRoutersPerPair = 2 +NoOfNetworks = 40 +MaxNoOfLanes = 400 + +[CCIP.Groups.load.OffRampConfig] +BatchGasLimit = 11000000 + +[CCIP.Groups.load.TokenConfig] +TimeoutForPriceUpdate = '15m' +NoOfTokensPerChain = 60 +NoOfTokensWithDynamicPrice = 15 +DynamicPriceUpdateInterval ='5m' +CCIPOwnerTokens = true + +[CCIP.Groups.load.LoadProfile] +TestDuration = '4h' +OptimizeSpace = true +NetworkChaosDelay = '100ms' +TimeUnit = '5s' +RequestPerUnitTime = [1] + +[CCIP.Groups.load.LoadProfile.LoadFrequency.slow-chain-1] +TimeUnit = '10s' +RequestPerUnitTime = [1] + +[CCIP.Groups.load.LoadProfile.LoadFrequency.slow-chain-2] +TimeUnit = '10s' +RequestPerUnitTime = [1] + +[CCIP.Groups.load.LoadProfile.LoadFrequency.slow-chain-3] +TimeUnit = '10s' +RequestPerUnitTime = [1] + +# to represent 20%, 60%, 15%, 5% of the total messages +[CCIP.Groups.load.LoadProfile.MsgProfile] +Frequencies = [4,12,3,1] + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Token' +DestGasLimit = 0 +DataLength = 0 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'DataWithToken' +DestGasLimit = 500000 +DataLength = 5000 +NoOfTokens = 5 +AmountPerToken = 1 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 800000 +DataLength = 10000 + +[[CCIP.Groups.load.LoadProfile.MsgProfile.MsgDetails]] +MsgType = 'Data' +DestGasLimit = 2500000 +DataLength = 10000 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/tier-b.toml b/integration-tests/ccip-tests/testconfig/tomls/ccip1.4-stress/tier-b.toml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/integration-tests/ccip-tests/testconfig/tomls/contract-version1.4.toml b/integration-tests/ccip-tests/testconfig/tomls/contract-version1.4.toml new file mode 100644 index 00000000000..392b058e5c8 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/contract-version1.4.toml @@ -0,0 +1,13 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Groups.smoke.TokenConfig] +CCIPOwnerTokens = true + +[CCIP.Groups.load.TokenConfig] +CCIPOwnerTokens = true \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml b/integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml new file mode 100644 index 00000000000..9de5925cb11 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml @@ -0,0 +1,34 @@ +[CCIP] +[CCIP.Env] +[CCIP.Env.NewCLCluster] +NoOfNodes = 6 + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node1' +DBImage = 'postgres' +DBTag = '13.14' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node2' +DBImage = 'postgres' +DBTag = '12.18' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node3' +DBImage = 'postgres' +DBTag = '14.11' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node4' +DBImage = 'postgres' +DBTag = '14.8' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node5' +DBImage = 'postgres' +DBTag = '15.6' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node6' +DBImage = 'postgres' +DBTag = '15.6' diff --git a/integration-tests/ccip-tests/testconfig/tomls/leader-lane.toml b/integration-tests/ccip-tests/testconfig/tomls/leader-lane.toml new file mode 100644 index 00000000000..76b97ad97ba --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/leader-lane.toml @@ -0,0 +1,4 @@ +[CCIP] +[CCIP.Groups.smoke] +NoOfNetworks = 4 +NoOfRoutersPerPair = 2 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/load-with-arm-curse-uncurse.toml b/integration-tests/ccip-tests/testconfig/tomls/load-with-arm-curse-uncurse.toml new file mode 100644 index 00000000000..e49f1184af7 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/load-with-arm-curse-uncurse.toml @@ -0,0 +1,20 @@ +[CCIP] +[CCIP.Env] +TTL = '15h' + +[CCIP.Groups] +[CCIP.Groups.load] +KeepEnvAlive = true +PhaseTimeout = '50m' +NodeFunding = 1000.0 + + +[CCIP.Groups.load.LoadProfile] +RequestPerUnitTime = [1] +TimeUnit = '1m' +TestDuration = '30m' +SendMaxDataInEveryMsgCount = 0 + + + + diff --git a/integration-tests/ccip-tests/testconfig/tomls/node-post-upgrade-compatibility.toml b/integration-tests/ccip-tests/testconfig/tomls/node-post-upgrade-compatibility.toml new file mode 100644 index 00000000000..c1c6c651442 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/node-post-upgrade-compatibility.toml @@ -0,0 +1,45 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Deployments] +DataFile = 'lane-config/.*.json' + +[CCIP.Env] +[CCIP.Env.NewCLCluster] +NoOfNodes = 6 + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-1' + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-2' +NeedsUpgrade = true + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-3' +NeedsUpgrade = true + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-4' +NeedsUpgrade = true + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-5' +NeedsUpgrade = true + +[[CCIP.Env.NewCLCluster.Nodes]] +Name = 'node-6' +NeedsUpgrade = true + +[CCIP.Groups] +[CCIP.Groups.load] +LocalCluster = false +ExistingDeployment = true + +[CCIP.Groups.load.LoadProfile] +TestRunName = 'upgrade-test' \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/node-pre-upgrade-compatibility.toml b/integration-tests/ccip-tests/testconfig/tomls/node-pre-upgrade-compatibility.toml new file mode 100644 index 00000000000..36ada834193 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/node-pre-upgrade-compatibility.toml @@ -0,0 +1,13 @@ +[CCIP] +[CCIP.ContractVersions] +PriceRegistry = '1.2.0' +OffRamp = '1.2.0' +OnRamp = '1.2.0' +TokenPool = '1.4.0' +CommitStore = '1.2.0' + +[CCIP.Groups] +[CCIP.Groups.smoke] +LocalCluster = false +KeepEnvAlive = true +StoreLaneConfig = true \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml b/integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml new file mode 100644 index 00000000000..82a3d492163 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml @@ -0,0 +1,10 @@ +[CCIP] +[CCIP.Groups] +[CCIP.Groups.smoke] +USDCMockDeployment = true + +[CCIP.Groups.smoke.TokenConfig] +NoOfTokensPerChain = 2 + +[CCIP.Groups.smoke.MsgDetails] +NoOfTokens = 3 \ No newline at end of file diff --git a/integration-tests/ccip-tests/testconfig/tomls/varied-block-time-sample.toml b/integration-tests/ccip-tests/testconfig/tomls/varied-block-time-sample.toml new file mode 100644 index 00000000000..dfe947af115 --- /dev/null +++ b/integration-tests/ccip-tests/testconfig/tomls/varied-block-time-sample.toml @@ -0,0 +1,47 @@ +[CCIP] +[CCIP.Env] +[CCIP.Env.Network] +selected_networks= ['PRIVATE-CHAIN-1', 'PRIVATE-CHAIN-2'] + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-1] +evm_name = 'private-chain-1' +evm_chain_id = 2337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 100 # with 50 blocks of finality, and 12s block time, we have 20 minutes of finality + +[CCIP.Env.Network.EVMNetworks.PRIVATE-CHAIN-2] +evm_name = 'private-chain-2' +evm_chain_id = 1337 +evm_urls = ['wss://ignore-this-url.com'] +evm_http_urls = ['https://ignore-this-url.com'] +evm_keys = ['ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80'] +evm_simulated = true +client_implementation = 'Ethereum' +evm_chainlink_transaction_limit = 5000 +evm_transaction_timeout = '3m' +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 1000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_finality_depth = 1 # with 1 block of finality, and 1s block time, we have instant finality + + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-1] +block_time = 12 + +[CCIP.Env.Network.AnvilConfigs.PRIVATE-CHAIN-2] +block_time = 1 + +[CCIP.Groups] +[CCIP.Groups.smoke] +LocalCluster = false \ No newline at end of file diff --git a/integration-tests/ccip-tests/testreporters/ccip.go b/integration-tests/ccip-tests/testreporters/ccip.go new file mode 100644 index 00000000000..b567f6a6291 --- /dev/null +++ b/integration-tests/ccip-tests/testreporters/ccip.go @@ -0,0 +1,476 @@ +package testreporters + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/slack-go/slack" + + "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" + "github.com/smartcontractkit/chainlink-testing-framework/testreporters" +) + +type Phase string +type Status string + +const ( + // These are the different phases of a CCIP transaction lifecycle + // You can see an illustration of the flow here: https://docs.chain.link/images/ccip/ccip-diagram-04_v04.webp + TX Phase = "CCIP-Send Transaction" // The initial transaction is sent from the client to the OnRamp + CCIPSendRe Phase = "CCIPSendRequested" // The OnRamp emits the CCIPSendRequested event which acknowledges the transaction requesting a CCIP transfer + SourceLogFinalized Phase = "SourceLogFinalizedTentatively" // The source chain finalizes the transaction where the CCIPSendRequested event was emitted + Commit Phase = "Commit-ReportAccepted" // The destination chain commits to the transaction and emits the ReportAccepted event + ReportBlessed Phase = "ReportBlessedByARM" // The destination chain emits the ReportBlessed event. This is triggered by the RMN, for tests we usually mock it. + E2E Phase = "CommitAndExecute" // This is effectively an alias for the below phase, but it's used to represent the end-to-end flow + ExecStateChanged Phase = "ExecutionStateChanged" // The destination chain emits the ExecutionStateChanged event. This indicates that the transaction has been executed + + Success Status = "✅" + Failure Status = "❌" + Unsure = "⚠️" + slackFile string = "payload_ccip.json" +) + +type AggregatorMetrics struct { + Min float64 `json:"min_duration_for_successful_requests(s),omitempty"` + Max float64 `json:"max_duration_for_successful_requests(s),omitempty"` + Avg float64 `json:"avg_duration_for_successful_requests(s),omitempty"` + sum float64 + count int +} +type TransactionStats struct { + Fee string `json:"fee,omitempty"` + MsgID string `json:"msg_id,omitempty"` + GasUsed uint64 `json:"gas_used,omitempty"` + TxHash string `json:"tx_hash,omitempty"` + NoOfTokensSent int `json:"no_of_tokens_sent,omitempty"` + MessageBytesLength int64 `json:"message_bytes_length,omitempty"` + FinalizedByBlock string `json:"finalized_block_num,omitempty"` + FinalizedAt string `json:"finalized_at,omitempty"` + CommitRoot string `json:"commit_root,omitempty"` +} + +type PhaseStat struct { + SeqNum uint64 `json:"seq_num,omitempty"` + Duration float64 `json:"duration,omitempty"` + Status Status `json:"success"` + SendTransactionStats *TransactionStats `json:"ccip_send_data,omitempty"` +} + +type RequestStat struct { + ReqNo int64 + SeqNum uint64 + SourceNetwork string + DestNetwork string + StatusByPhase map[Phase]PhaseStat `json:"status_by_phase,omitempty"` +} + +func (stat *RequestStat) UpdateState( + lggr *zerolog.Logger, + seqNum uint64, + step Phase, + duration time.Duration, + state Status, + sendTransactionStats *TransactionStats, +) { + durationInSec := duration.Seconds() + stat.SeqNum = seqNum + phaseDetails := PhaseStat{ + SeqNum: seqNum, + Duration: durationInSec, + Status: state, + SendTransactionStats: sendTransactionStats, + } + + event := lggr.Info() + if seqNum != 0 { + event.Uint64("seq num", seqNum) + } + // if any of the phase fails mark the E2E as failed + if state == Failure || state == Unsure { + stat.StatusByPhase[E2E] = PhaseStat{ + SeqNum: seqNum, + Status: state, + } + stat.StatusByPhase[step] = phaseDetails + lggr.Info(). + Str(fmt.Sprint(E2E), string(state)). + Msgf("reqNo %d", stat.ReqNo) + event.Str(string(step), string(state)).Msgf("reqNo %d", stat.ReqNo) + } else { + event.Str(string(step), string(Success)).Msgf("reqNo %d", stat.ReqNo) + // we don't want to save phase details for TX and CCIPSendRe to avoid redundancy if these phases are successful + if step != TX && step != CCIPSendRe { + stat.StatusByPhase[step] = phaseDetails + } + if step == Commit || step == ReportBlessed || step == ExecStateChanged { + stat.StatusByPhase[E2E] = PhaseStat{ + SeqNum: seqNum, + Status: state, + Duration: stat.StatusByPhase[step].Duration + stat.StatusByPhase[E2E].Duration, + } + if step == ExecStateChanged { + lggr.Info(). + Str(fmt.Sprint(E2E), string(Success)). + Msgf("reqNo %d", stat.ReqNo) + } + } + } +} + +func NewCCIPRequestStats(reqNo int64, source, dest string) *RequestStat { + return &RequestStat{ + ReqNo: reqNo, + StatusByPhase: make(map[Phase]PhaseStat), + SourceNetwork: source, + DestNetwork: dest, + } +} + +type CCIPLaneStats struct { + lane string + lggr *zerolog.Logger + TotalRequests int64 `json:"total_requests,omitempty"` // TotalRequests is the total number of requests made + SuccessCountsByPhase map[Phase]int64 `json:"success_counts_by_phase,omitempty"` // SuccessCountsByPhase is the number of requests that succeeded in each phase + FailedCountsByPhase map[Phase]int64 `json:"failed_counts_by_phase,omitempty"` // FailedCountsByPhase is the number of requests that failed in each phase + DurationStatByPhase map[Phase]AggregatorMetrics `json:"duration_stat_by_phase,omitempty"` // DurationStatByPhase is the duration statistics for each phase + statusByPhaseByRequests sync.Map +} + +func (testStats *CCIPLaneStats) UpdatePhaseStatsForReq(stat *RequestStat) { + testStats.statusByPhaseByRequests.Store(stat.ReqNo, stat.StatusByPhase) +} + +func (testStats *CCIPLaneStats) Aggregate(phase Phase, durationInSec float64) { + if prevDur, ok := testStats.DurationStatByPhase[phase]; !ok { + testStats.DurationStatByPhase[phase] = AggregatorMetrics{ + Min: durationInSec, + Max: durationInSec, + sum: durationInSec, + count: 1, + } + } else { + if prevDur.Min > durationInSec { + prevDur.Min = durationInSec + } + if prevDur.Max < durationInSec { + prevDur.Max = durationInSec + } + prevDur.sum = prevDur.sum + durationInSec + prevDur.count++ + testStats.DurationStatByPhase[phase] = prevDur + } +} + +func (testStats *CCIPLaneStats) Finalize(lane string) { + phases := []Phase{E2E, TX, CCIPSendRe, SourceLogFinalized, Commit, ReportBlessed, ExecStateChanged} + events := make(map[Phase]*zerolog.Event) + testStats.statusByPhaseByRequests.Range(func(key, value interface{}) bool { + if reqNo, ok := key.(int64); ok { + if stat, ok := value.(map[Phase]PhaseStat); ok { + for phase, phaseStat := range stat { + if phaseStat.Status == Success { + testStats.SuccessCountsByPhase[phase]++ + testStats.Aggregate(phase, phaseStat.Duration) + } else { + testStats.FailedCountsByPhase[phase]++ + } + } + } + if reqNo > testStats.TotalRequests { + testStats.TotalRequests = reqNo + } + } + return true + }) + // if no phase stats are found return + if testStats.TotalRequests <= 0 { + return + } + testStats.lggr.Info().Int64("Total Requests Triggerred", testStats.TotalRequests).Msg("Test Run Completed") + for _, phase := range phases { + events[phase] = testStats.lggr.Info().Str("Phase", string(phase)) + if phaseStat, ok := testStats.DurationStatByPhase[phase]; ok { + testStats.DurationStatByPhase[phase] = AggregatorMetrics{ + Min: phaseStat.Min, + Max: phaseStat.Max, + Avg: phaseStat.sum / float64(phaseStat.count), + } + events[phase]. + Str("Min Duration for Successful Requests", fmt.Sprintf("%.02f", testStats.DurationStatByPhase[phase].Min)). + Str("Max Duration for Successful Requests", fmt.Sprintf("%.02f", testStats.DurationStatByPhase[phase].Max)). + Str("Average Duration for Successful Requests", fmt.Sprintf("%.02f", testStats.DurationStatByPhase[phase].Avg)) + } + if failed, ok := testStats.FailedCountsByPhase[phase]; ok { + events[phase].Int64("Failed Count", failed) + } + if s, ok := testStats.SuccessCountsByPhase[phase]; ok { + events[phase].Int64("Successful Count", s) + } + events[phase].Msgf("Phase Stats for Lane %s", lane) + } +} + +type CCIPTestReporter struct { + t *testing.T + logger *zerolog.Logger + startTime int64 + endTime int64 + grafanaURLProvider testreporters.GrafanaURLProvider + grafanaURL string + grafanaQueryParams []string + namespace string + reportFilePath string + duration time.Duration // duration is the duration of the test + FailedLanes map[string]Phase `json:"failed_lanes_and_phases,omitempty"` // FailedLanes is the list of lanes that failed and the phase at which it failed + LaneStats map[string]*CCIPLaneStats `json:"lane_stats"` // LaneStats is the statistics for each lane + mu *sync.Mutex + sendSlackReport bool +} + +func (r *CCIPTestReporter) SetSendSlackReport(sendSlackReport bool) { + r.sendSlackReport = sendSlackReport +} + +func (r *CCIPTestReporter) CompleteGrafanaDashboardURL() error { + if r.grafanaURLProvider == nil { + return fmt.Errorf("grafana URL provider is not set") + } + grafanaUrl, err := r.grafanaURLProvider.GetGrafanaBaseURL() + if err != nil { + return err + } + + dashboardUrl, err := r.grafanaURLProvider.GetGrafanaDashboardURL() + if err != nil { + return err + } + r.grafanaURL = fmt.Sprintf("%s%s", grafanaUrl, dashboardUrl) + err = r.AddToGrafanaDashboardQueryParams( + fmt.Sprintf("from=%d", r.startTime), + fmt.Sprintf("to=%d", r.endTime), + fmt.Sprintf("var-remote_runner=%s", r.namespace)) + if err != nil { + return err + } + + err = r.FormatGrafanaURLWithQueryParameters() + if err != nil { + return fmt.Errorf("error formatting grafana URL: %w", err) + } + r.logger.Info().Str("Dashboard", r.grafanaURL).Msg("Dashboard URL") + return nil +} + +// FormatGrafanaURLWithQueryParameters adds query params to the grafana URL +// The query params are added in the format ?key=value if the grafana URL does not have any query params +// If the grafana URL already has query params, the query params are added in the format &key=value +// The function parameter qParam should be in the format key=value +// If the function parameter qParam does not contain an =, an error is returned +func (r *CCIPTestReporter) FormatGrafanaURLWithQueryParameters() error { + for _, v := range r.grafanaQueryParams { + if !strings.Contains(v, "=") { + return fmt.Errorf("invalid query param %s", v) + } + if strings.Contains(r.grafanaURL, "?") { + r.grafanaURL = fmt.Sprintf("%s&%s", r.grafanaURL, v) + continue + } + r.grafanaURL = fmt.Sprintf("%s?%s", r.grafanaURL, v) + } + return nil +} + +// AddToGrafanaDashboardQueryParams adds query params to the QueryParams slice +// The function parameter qParam should be in the format key=value +// If the function parameter qParam does not contain an =, an error is returned +func (r *CCIPTestReporter) AddToGrafanaDashboardQueryParams(qParams ...string) error { + for _, qParam := range qParams { + if !strings.Contains(qParam, "=") { + return fmt.Errorf("invalid query param %s", qParam) + } + r.grafanaQueryParams = append(r.grafanaQueryParams, qParam) + } + return nil +} + +// SendSlackNotification sends a slack notification to the specified channel set in the environment variable "SLACK_CHANNEL" +// notifying the user set in the environment variable "SLACK_USER" +// The function returns an error if the slack notification fails +func (r *CCIPTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client, _ testreporters.GrafanaURLProvider) error { + if r.sendSlackReport { + r.logger.Info().Msg("Sending Slack notification") + } else { + r.logger.Info().Msg("Slack notification not enabled") + return nil + } + if testreporters.SlackAPIKey == "" || testreporters.SlackChannel == "" || testreporters.SlackUserID == "" { + r.logger.Warn().Msg("Slack API Key, Channel or User ID not set. Skipping Slack notification") + return nil + } + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + var msgTexts []string + headerText := ":white_check_mark: CCIP Test PASSED :white_check_mark:" + if t.Failed() { + headerText = ":x: CCIP Test FAILED :x:" + } + // If grafanaURLProvider is not set, form the message notifying about the failed lanes with the report file path + if r.grafanaURLProvider == nil { + for name, lane := range r.LaneStats { + if lane.FailedCountsByPhase[E2E] > 0 { + msgTexts = append(msgTexts, + fmt.Sprintf("lane %s :x:", name), + fmt.Sprintf( + "\nNumber of ccip-send= %d"+ + "\nNo of failed requests = %d", lane.TotalRequests, lane.FailedCountsByPhase[E2E])) + } + } + + msgTexts = append(msgTexts, fmt.Sprintf( + "\nTest Run Summary created on _remote-test-runner_ at _%s_\nNotifying <@%s>", + r.reportFilePath, testreporters.SlackUserID)) + } else { + // If grafanaURLProvider is set, form the message with the grafana dashboard link + err := r.CompleteGrafanaDashboardURL() + if err != nil { + return fmt.Errorf("error formatting grafana dashboard URL: %w", err) + } + msgTexts = append(msgTexts, fmt.Sprintf( + "\nTest Run Completed \nNotifying <@%s>\n<%s|CCIP Long Running Tests Dashboard>", + testreporters.SlackUserID, r.grafanaURL)) + } + + messageBlocks := testreporters.SlackNotifyBlocks(headerText, r.namespace, msgTexts) + ts, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + fmt.Println(messageBlocks) + return fmt.Errorf("failed to send slack message: %w", err) + } + // if grafanaURLProvider is set, we don't want to write the report in a file + // the report will be shared in terms of grafana dashboard link + if r.grafanaURLProvider == nil { + return testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("CCIP Test Report %s", r.namespace), + Filetype: "json", + Filename: fmt.Sprintf("ccip_report_%s.csv", r.namespace), + File: r.reportFilePath, + InitialComment: fmt.Sprintf("CCIP Test Report %s.", r.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }) + } + return nil +} + +func (r *CCIPTestReporter) WriteReport(folderPath string) error { + l := r.logger + for k := range r.LaneStats { + r.LaneStats[k].Finalize(k) + // if E2E for the lane has failed + if _, ok := r.LaneStats[k].FailedCountsByPhase[E2E]; ok { + // find the phase at which it failed + for phase, count := range r.LaneStats[k].FailedCountsByPhase { + if count > 0 && phase != E2E { + r.FailedLanes[k] = phase + break + } + } + } + } + if len(r.FailedLanes) > 0 { + r.logger.Info().Interface("List of Failed Lanes", r.FailedLanes).Msg("Failed Lanes") + } + + // if grafanaURLProvider is set, we don't want to write the report in a file + // the report will be shared in terms of grafana dashboard link + if r.grafanaURLProvider != nil { + return nil + } + l.Debug().Str("Folder Path", folderPath).Msg("Writing CCIP Test Report") + if err := testreporters.MkdirIfNotExists(folderPath); err != nil { + return err + } + reportLocation := filepath.Join(folderPath, slackFile) + r.reportFilePath = reportLocation + slackFile, err := os.Create(reportLocation) + defer func() { + err = slackFile.Close() + if err != nil { + l.Error().Err(err).Msg("Error closing slack file") + } + }() + if err != nil { + return err + } + stats, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = slackFile.Write(stats) + if err != nil { + return err + } + return nil +} + +// SetNamespace sets the namespace of the report for clean reports +func (r *CCIPTestReporter) SetNamespace(namespace string) { + // if the test is run in remote runner, the namespace will be set to the remote runner's namespace + if value, set := os.LookupEnv(config.EnvVarNamespace); set && value != "" { + r.namespace = value + return + } + // if the namespace is not set, set it to the namespace provided + r.namespace = namespace +} + +// SetDuration sets the duration of the test +func (r *CCIPTestReporter) SetDuration(d time.Duration) { + r.duration = d +} + +func (r *CCIPTestReporter) SetGrafanaURLProvider(provider testreporters.GrafanaURLProvider) { + r.grafanaURLProvider = provider +} + +func (r *CCIPTestReporter) AddNewLane(name string, lggr *zerolog.Logger) *CCIPLaneStats { + r.mu.Lock() + defer r.mu.Unlock() + i := &CCIPLaneStats{ + lane: name, + lggr: lggr, + FailedCountsByPhase: make(map[Phase]int64), + SuccessCountsByPhase: make(map[Phase]int64), + DurationStatByPhase: make(map[Phase]AggregatorMetrics), + } + r.LaneStats[name] = i + return i +} + +func (r *CCIPTestReporter) SendReport(t *testing.T, namespace string, slackSend bool) error { + logsPath := filepath.Join("logs", fmt.Sprintf("%s-%s-%d", t.Name(), namespace, time.Now().Unix())) + r.SetNamespace(namespace) + r.endTime = time.Now().UTC().UnixMilli() + r.SetSendSlackReport(r.namespace != "" && slackSend) + return testreporters.SendReport(t, namespace, logsPath, r, nil) +} + +func NewCCIPTestReporter(t *testing.T, lggr *zerolog.Logger) *CCIPTestReporter { + return &CCIPTestReporter{ + LaneStats: make(map[string]*CCIPLaneStats), + startTime: time.Now().UTC().UnixMilli(), + logger: lggr, + t: t, + mu: &sync.Mutex{}, + FailedLanes: make(map[string]Phase), + } +} diff --git a/integration-tests/ccip-tests/testsetups/ccip.go b/integration-tests/ccip-tests/testsetups/ccip.go new file mode 100644 index 00000000000..207773aace4 --- /dev/null +++ b/integration-tests/ccip-tests/testsetups/ccip.go @@ -0,0 +1,1436 @@ +package testsetups + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "os" + "regexp" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/AlekSi/pointer" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + + chainselectors "github.com/smartcontractkit/chain-selectors" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client" + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" + ctf_config_types "github.com/smartcontractkit/chainlink-testing-framework/config/types" + ctftestenv "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" + + integrationactions "github.com/smartcontractkit/chainlink/integration-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/contracts/laneconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + ccipconfig "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testconfig" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/testreporters" + testutils "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/utils" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" +) + +var ( + GethResourceProfile = map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "4", + "memory": "6Gi", + }, + "limits": map[string]interface{}{ + "cpu": "4", + "memory": "6Gi", + }, + } + // to set default values through test config use sync.once + setContractVersion sync.Once + setOCRParams sync.Once + setConfigOverrides sync.Once +) + +type NetworkPair struct { + NetworkA blockchain.EVMNetwork + NetworkB blockchain.EVMNetwork + ChainClientA blockchain.EVMClient + ChainClientB blockchain.EVMClient +} + +// LeaderLane is to hold the details of leader lane source and destination network +type LeaderLane struct { + source string + dest string +} + +type CCIPTestConfig struct { + Test *testing.T + EnvInput *testconfig.Common + TestGroupInput *testconfig.CCIPTestGroupConfig + VersionInput map[contracts.Name]contracts.Version + ContractsInput *testconfig.CCIPContractConfig + AllNetworks map[string]blockchain.EVMNetwork + SelectedNetworks []blockchain.EVMNetwork + NetworkPairs []NetworkPair + LeaderLanes []LeaderLane + GethResourceProfile map[string]interface{} +} + +func (c *CCIPTestConfig) useExistingDeployment() bool { + return pointer.GetBool(c.TestGroupInput.ExistingDeployment) +} + +func (c *CCIPTestConfig) useSeparateTokenDeployer() bool { + return contracts.NeedTokenAdminRegistry() && + !pointer.GetBool(c.TestGroupInput.TokenConfig.CCIPOwnerTokens) && + !c.useExistingDeployment() +} + +func (c *CCIPTestConfig) MultiCallEnabled() bool { + return pointer.GetBool(c.TestGroupInput.MulticallInOneTx) +} + +func (c *CCIPTestConfig) localCluster() bool { + return pointer.GetBool(c.TestGroupInput.LocalCluster) +} + +func (c *CCIPTestConfig) ExistingCLCluster() bool { + return c.EnvInput.ExistingCLCluster != nil +} + +func (c *CCIPTestConfig) CLClusterNeedsUpgrade() bool { + if c.EnvInput.NewCLCluster == nil { + return false + } + if c.EnvInput.NewCLCluster.Common != nil && c.EnvInput.NewCLCluster.Common.ChainlinkUpgradeImage != nil { + return true + } + for _, node := range c.EnvInput.NewCLCluster.Nodes { + if node.ChainlinkUpgradeImage != nil { + return true + } + } + return false +} + +func (c *CCIPTestConfig) AddPairToNetworkList(networkA, networkB blockchain.EVMNetwork) { + if c.AllNetworks == nil { + c.AllNetworks = make(map[string]blockchain.EVMNetwork) + } + firstOfPairs := []blockchain.EVMNetwork{networkA} + secondOfPairs := []blockchain.EVMNetwork{networkB} + // if no of lanes per pair is greater than 1, copy common contracts from the same network + // if no of lanes per pair is more than 1, the networks are added into the inputs.AllNetworks with a suffix of - + // for example, if no of lanes per pair is 2, and the network pairs are called "testnetA", "testnetB", + // the network will be added as "testnetA-1", testnetA-2","testnetB-1", testnetB-2" + // to deploy 4 lanes between same network pair "testnetA", "testnetB". + // lanes - testnetA-1<->testnetB-1, testnetA-1<-->testnetB-2 , testnetA-2<--> testnetB-1, testnetA-2<--> testnetB-2 + if c.TestGroupInput.NoOfRoutersPerPair > 1 { + firstOfPairs[0].Name = fmt.Sprintf("%s-%d", firstOfPairs[0].Name, 1) + secondOfPairs[0].Name = fmt.Sprintf("%s-%d", secondOfPairs[0].Name, 1) + for i := 1; i < c.TestGroupInput.NoOfRoutersPerPair; i++ { + netsA := networkA + netsA.Name = fmt.Sprintf("%s-%d", netsA.Name, i+1) + netsB := networkB + netsB.Name = fmt.Sprintf("%s-%d", netsB.Name, i+1) + firstOfPairs = append(firstOfPairs, netsA) + secondOfPairs = append(secondOfPairs, netsB) + } + } + + for i := range firstOfPairs { + c.AllNetworks[firstOfPairs[i].Name] = firstOfPairs[i] + c.AllNetworks[secondOfPairs[i].Name] = secondOfPairs[i] + c.NetworkPairs = append(c.NetworkPairs, NetworkPair{ + NetworkA: firstOfPairs[i], + NetworkB: secondOfPairs[i], + }) + } +} + +func (c *CCIPTestConfig) SetNetworkPairs(lggr zerolog.Logger) error { + var allError error + var err error + var inputNetworks []string + c.SelectedNetworks, inputNetworks, err = c.EnvInput.EVMNetworks() + if err != nil { + allError = multierr.Append(allError, fmt.Errorf("failed to get networks: %w", err)) + return allError + } + + networkByChainName := make(map[string]blockchain.EVMNetwork) + for i, net := range c.SelectedNetworks { + networkByChainName[inputNetworks[i]] = net + } + // if network pairs are provided, then use them + if c.TestGroupInput.NetworkPairs != nil { + networkPairs := c.TestGroupInput.NetworkPairs + + for _, pair := range networkPairs { + networkNames := strings.Split(pair, ",") + if len(networkNames) != 2 { + allError = multierr.Append(allError, fmt.Errorf("invalid network pair")) + } + // check if the network names are valid + network1, ok := networkByChainName[networkNames[0]] + if !ok { + allError = multierr.Append(allError, fmt.Errorf("network %s not found in network config", networkNames[0])) + } + network2, ok := networkByChainName[networkNames[1]] + if !ok { + allError = multierr.Append(allError, fmt.Errorf("network %s not found in network config", networkNames[1])) + } + c.AddPairToNetworkList(network1, network2) + } + lggr.Info().Int("Pairs", len(c.NetworkPairs)).Msg("No Of Lanes") + return allError + } + + if c.TestGroupInput.NoOfNetworks == 0 { + c.TestGroupInput.NoOfNetworks = len(c.SelectedNetworks) + } + // TODO remove this when CTF network timeout is fixed + for i := range c.SelectedNetworks { + c.SelectedNetworks[i].Timeout = blockchain.StrDuration{ + Duration: 3 * time.Minute, + } + } + simulated := c.SelectedNetworks[0].Simulated + for i := 1; i < len(c.SelectedNetworks); i++ { + if c.SelectedNetworks[i].Simulated != simulated { + lggr.Fatal().Msg("networks must be of the same type either simulated or real") + } + } + + // if the networks are not simulated use the first p.NoOfNetworks networks from the selected networks + if !simulated && len(c.SelectedNetworks) != c.TestGroupInput.NoOfNetworks { + if len(c.SelectedNetworks) < c.TestGroupInput.NoOfNetworks { + allError = multierr.Append(allError, fmt.Errorf("not enough networks provided")) + } else { + c.SelectedNetworks = c.SelectedNetworks[:c.TestGroupInput.NoOfNetworks] + } + } + // If provided networks is lesser than the required number of networks + // and the provided networks are simulated network, create replicas of the provided networks with + // different chain ids + if simulated && len(c.SelectedNetworks) < c.TestGroupInput.NoOfNetworks { + actualNoOfNetworks := len(c.SelectedNetworks) + n := c.SelectedNetworks[0] + var chainIDs []int64 + existingChainIDs := make(map[uint64]struct{}) + for _, net := range c.SelectedNetworks { + existingChainIDs[uint64(net.ChainID)] = struct{}{} + } + for _, id := range chainselectors.TestChainIds() { + // if the chain id already exists in the already provided selected networks, skip it + if _, exists := existingChainIDs[id]; exists { + continue + } + chainIDs = append(chainIDs, int64(id)) + } + for i := 0; i < c.TestGroupInput.NoOfNetworks-actualNoOfNetworks; i++ { + chainID := chainIDs[i] + // if i is greater than the number of simulated pvt keys, rotate the keys + if i > len(networks.AdditionalSimulatedPvtKeys)-1 { + networks.AdditionalSimulatedPvtKeys = append(networks.AdditionalSimulatedPvtKeys, networks.AdditionalSimulatedPvtKeys...) + } + name := fmt.Sprintf("private-chain-%d", len(c.SelectedNetworks)+1) + c.SelectedNetworks = append(c.SelectedNetworks, blockchain.EVMNetwork{ + Name: name, + ChainID: chainID, + Simulated: true, + PrivateKeys: []string{ + networks.AdditionalSimulatedPvtKeys[i], + "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", // second key for token deployments + }, + ChainlinkTransactionLimit: n.ChainlinkTransactionLimit, + Timeout: n.Timeout, + MinimumConfirmations: n.MinimumConfirmations, + GasEstimationBuffer: n.GasEstimationBuffer + 1000, + ClientImplementation: n.ClientImplementation, + DefaultGasLimit: n.DefaultGasLimit, + FinalityDepth: n.FinalityDepth, + SupportsEIP1559: true, + }) + if existing, ok := c.EnvInput.Network.AnvilConfigs[strings.ToUpper(n.Name)]; c.EnvInput.Network.AnvilConfigs != nil && ok { + c.EnvInput.Network.AnvilConfigs[strings.ToUpper(name)] = existing + } + + chainConfig := &ctf_config.EthereumChainConfig{} + err := chainConfig.Default() + if err != nil { + allError = multierr.Append(allError, fmt.Errorf("failed to get default chain config: %w", err)) + } else { + chainConfig.ChainID = int(chainID) + eth1 := ctf_config_types.EthereumVersion_Eth1 + geth := ctf_config_types.ExecutionLayer_Geth + + c.EnvInput.PrivateEthereumNetworks[fmt.Sprint(chainID)] = &ctf_config.EthereumNetworkConfig{ + EthereumVersion: ð1, + ExecutionLayer: &geth, + EthereumChainConfig: chainConfig, + } + } + } + } + + if c.TestGroupInput.NoOfNetworks > 2 { + c.FormNetworkPairCombinations() + } else { + c.AddPairToNetworkList(c.SelectedNetworks[0], c.SelectedNetworks[1]) + } + + // if the number of lanes is lesser than the number of network pairs, choose first c.TestGroupInput.MaxNoOfLanes pairs + if c.TestGroupInput.MaxNoOfLanes > 0 && c.TestGroupInput.MaxNoOfLanes < len(c.NetworkPairs) { + var newNetworkPairs []NetworkPair + denselyConnectedNetworks := make(map[string]struct{}) + // if densely connected networks are provided, choose all the network pairs containing the networks mentioned in the list for DenselyConnectedNetworkChainIds + if c.TestGroupInput.DenselyConnectedNetworkChainIds != nil && len(c.TestGroupInput.DenselyConnectedNetworkChainIds) > 0 { + for _, n := range c.TestGroupInput.DenselyConnectedNetworkChainIds { + denselyConnectedNetworks[n] = struct{}{} + } + for _, pair := range c.NetworkPairs { + if _, exists := denselyConnectedNetworks[strconv.FormatInt(pair.NetworkA.ChainID, 10)]; exists { + newNetworkPairs = append(newNetworkPairs, pair) + } + } + } + // shuffle the network pairs, we want to randomly distribute the network pairs among all available networks + rand.Shuffle(len(c.NetworkPairs), func(i, j int) { + c.NetworkPairs[i], c.NetworkPairs[j] = c.NetworkPairs[j], c.NetworkPairs[i] + }) + // now add the remaining network pairs by skipping the already covered networks + // and adding the remaining pair from the shuffled list + i := len(newNetworkPairs) + j := 0 + for i < c.TestGroupInput.MaxNoOfLanes { + pair := c.NetworkPairs[j] + // if the network is already covered, skip it + if _, exists := denselyConnectedNetworks[strconv.FormatInt(pair.NetworkA.ChainID, 10)]; !exists { + newNetworkPairs = append(newNetworkPairs, pair) + i++ + } + j++ + } + c.NetworkPairs = newNetworkPairs + } + + // setting leader lane details to network pairs if it is enabled and only in simulated environments + if !pointer.GetBool(c.TestGroupInput.ExistingDeployment) { + c.defineLeaderLanes(lggr) + } + for _, n := range c.NetworkPairs { + lggr.Info(). + Str("NetworkA", fmt.Sprintf("%s-%d", n.NetworkA.Name, n.NetworkA.ChainID)). + Str("NetworkB", fmt.Sprintf("%s-%d", n.NetworkB.Name, n.NetworkB.ChainID)). + Msg("Network Pairs") + } + for _, lane := range c.LeaderLanes { + lggr.Info(). + Str("Source", lane.source). + Str("Destination", lane.dest). + Msg("Leader Lane: ") + } + lggr.Info().Int("Pairs", len(c.NetworkPairs)).Msg("No Of Lanes") + + return allError +} + +// defineLeaderLanes goes over the available network pairs and define one leader lane per destination +func (c *CCIPTestConfig) defineLeaderLanes(lggr zerolog.Logger) { + if !isLeaderLaneFeatureEnabled(&lggr) { + return + } + // the way we are defining leader lane is simply by tagging the destination as key along with the first source network + // as value to the map. + leaderLanes := make(map[string]string) + for _, n := range c.NetworkPairs { + if _, ok := leaderLanes[n.NetworkB.Name]; !ok { + leaderLanes[n.NetworkB.Name] = n.NetworkA.Name + } + if pointer.GetBool(c.TestGroupInput.BiDirectionalLane) { + if _, ok := leaderLanes[n.NetworkA.Name]; !ok { + leaderLanes[n.NetworkA.Name] = n.NetworkB.Name + } + } + } + for k, v := range leaderLanes { + c.LeaderLanes = append(c.LeaderLanes, LeaderLane{ + source: v, + dest: k, + }) + } +} + +// isPriceReportingDisabled checks the given lane is leader lane and return boolean accordingly +func (c *CCIPTestConfig) isPriceReportingDisabled(lggr *zerolog.Logger, source, dest string) bool { + for _, leader := range c.LeaderLanes { + if leader.source == source && leader.dest == dest { + lggr.Debug(). + Str("Source", source). + Str("Destination", dest). + Msg("Non-leader lane") + return true + } + } + return false +} + +func isLeaderLaneFeatureEnabled(lggr *zerolog.Logger) bool { + if err := contracts.MatchContractVersionsOrAbove(map[contracts.Name]contracts.Version{ + contracts.OffRampContract: contracts.V1_2_0, + contracts.OnRampContract: contracts.V1_2_0, + }); err != nil { + lggr.Info().Str("Required contract version", contracts.V1_2_0.String()).Msg("Leader lane feature is not enabled") + return false + } + return true +} + +func (c *CCIPTestConfig) FormNetworkPairCombinations() { + for i := 0; i < c.TestGroupInput.NoOfNetworks; i++ { + for j := i + 1; j < c.TestGroupInput.NoOfNetworks; j++ { + c.AddPairToNetworkList(c.SelectedNetworks[i], c.SelectedNetworks[j]) + } + } +} + +func (c *CCIPTestConfig) SetContractVersion() error { + if c.VersionInput == nil { + return nil + } + for contractName, version := range c.VersionInput { + err := contracts.CheckVersionSupported(contractName, version) + if err != nil { + return err + } + contracts.VersionMap[contractName] = version + } + return nil +} + +func (c *CCIPTestConfig) SetOCRParams() error { + if c.TestGroupInput.OffRampConfig != nil { + if c.TestGroupInput.OffRampConfig.InflightExpiry != nil && + c.TestGroupInput.OffRampConfig.InflightExpiry.Duration() > 0 { + actions.InflightExpiryExec = c.TestGroupInput.OffRampConfig.InflightExpiry.Duration() + } + if pointer.GetUint32(c.TestGroupInput.OffRampConfig.BatchGasLimit) > 0 { + actions.BatchGasLimit = pointer.GetUint32(c.TestGroupInput.OffRampConfig.BatchGasLimit) + } + if pointer.GetUint32(c.TestGroupInput.OffRampConfig.MaxDataBytes) > 0 { + actions.MaxDataBytes = pointer.GetUint32(c.TestGroupInput.OffRampConfig.MaxDataBytes) + } + if c.TestGroupInput.OffRampConfig.RootSnooze != nil && + c.TestGroupInput.OffRampConfig.RootSnooze.Duration() > 0 { + actions.RootSnoozeTime = c.TestGroupInput.OffRampConfig.RootSnooze.Duration() + } + } + if c.TestGroupInput.CommitInflightExpiry != nil && c.TestGroupInput.CommitInflightExpiry.Duration() > 0 { + actions.InflightExpiryCommit = c.TestGroupInput.CommitInflightExpiry.Duration() + } + return nil +} + +// TestConfigOverrideOption is a function that modifies the test config and overrides any values passed in by test files +// This is useful for setting up test specific configurations. +// The return should be a short, explanatory string that describes the change made by the override. +// This is logged at the beginning of the test run. +type TestConfigOverrideOption func(*CCIPTestConfig) string + +// UseCCIPOwnerTokens defines whether all tokens are deployed by the same address as the CCIP owner +func UseCCIPOwnerTokens(yes bool) TestConfigOverrideOption { + return func(c *CCIPTestConfig) string { + c.TestGroupInput.TokenConfig.CCIPOwnerTokens = pointer.ToBool(yes) + return fmt.Sprintf("CCIPOwnerTokens set to %t", yes) + } +} + +// WithTokensPerChain sets the number of tokens to deploy on each chain +func WithTokensPerChain(count int) TestConfigOverrideOption { + return func(c *CCIPTestConfig) string { + c.TestGroupInput.TokenConfig.NoOfTokensPerChain = pointer.ToInt(count) + return fmt.Sprintf("NoOfTokensPerChain set to %d", count) + } +} + +// WithMsgDetails sets the message details for the test +func WithMsgDetails(details *testconfig.MsgDetails) TestConfigOverrideOption { + return func(c *CCIPTestConfig) string { + c.TestGroupInput.MsgDetails = details + return "Message set" + } +} + +// WithNoTokensPerMessage sets how many tokens can be sent in a single message +func WithNoTokensPerMessage(noOfTokens int) TestConfigOverrideOption { + return func(c *CCIPTestConfig) string { + c.TestGroupInput.MsgDetails.NoOfTokens = pointer.ToInt(noOfTokens) + return fmt.Sprintf("MsgDetails.NoOfTokens set to %d", noOfTokens) + } +} + +// NewCCIPTestConfig reads the CCIP test config from TOML files, applies any overrides, and configures the test environment +func NewCCIPTestConfig(t *testing.T, lggr zerolog.Logger, tType string, overrides ...TestConfigOverrideOption) *CCIPTestConfig { + testCfg := ccipconfig.GlobalTestConfig() + groupCfg, exists := testCfg.CCIP.Groups[tType] + if !exists { + t.Fatalf("group config for %s does not exist", tType) + } + if tType == ccipconfig.Load { + if testCfg.CCIP.Env.Logging == nil || testCfg.CCIP.Env.Logging.Loki == nil { + t.Fatal("loki config is required to be set for load test") + } + if testCfg.CCIP.Env.Logging == nil || testCfg.CCIP.Env.Logging.Grafana == nil { + t.Fatal("grafana config is required for load test") + } + } + if pointer.GetBool(groupCfg.KeepEnvAlive) { + err := os.Setenv(config.EnvVarKeepEnvironments, "ALWAYS") + if err != nil { + t.Fatal(err) + } + } + ccipTestConfig := &CCIPTestConfig{ + Test: t, + EnvInput: testCfg.CCIP.Env, + ContractsInput: testCfg.CCIP.Deployments, + VersionInput: testCfg.CCIP.ContractVersions, + TestGroupInput: groupCfg, + GethResourceProfile: GethResourceProfile, + } + setContractVersion.Do(func() { + err := ccipTestConfig.SetContractVersion() + if err != nil { + t.Fatal(err) + } + }) + setOCRParams.Do(func() { + err := ccipTestConfig.SetOCRParams() + if err != nil { + t.Fatal(err) + } + }) + setConfigOverrides.Do(func() { + overrideMessages := []string{} + for _, override := range overrides { + if override != nil { + overrideMessages = append(overrideMessages, override(ccipTestConfig)) + } + } + if len(overrideMessages) > 0 { + lggr.Debug().Int("Overrides", len(overrideMessages)).Msg("Test Specific Config Overrides Applied") + for _, msg := range overrideMessages { + lggr.Debug().Msg(msg) + } + } + }) + err := ccipTestConfig.SetNetworkPairs(lggr) + if err != nil { + t.Fatal(err) + } + + return ccipTestConfig +} + +type BiDirectionalLaneConfig struct { + NetworkA blockchain.EVMNetwork + NetworkB blockchain.EVMNetwork + ForwardLane *actions.CCIPLane + ReverseLane *actions.CCIPLane +} + +type CCIPTestSetUpOutputs struct { + SetUpContext context.Context + Cfg *CCIPTestConfig + LaneContractsByNetwork *sync.Map + laneMutex *sync.Mutex + Lanes []*BiDirectionalLaneConfig + Reporter *testreporters.CCIPTestReporter + LaneConfigFile string + LaneConfig *laneconfig.Lanes + TearDown func() error + Env *actions.CCIPTestEnv + Balance *actions.BalanceSheet + BootstrapAdded *atomic.Bool + JobAddGrp *errgroup.Group +} + +func (o *CCIPTestSetUpOutputs) AddToLanes(lane *BiDirectionalLaneConfig) { + o.laneMutex.Lock() + defer o.laneMutex.Unlock() + o.Lanes = append(o.Lanes, lane) +} + +func (o *CCIPTestSetUpOutputs) ReadLanes() []*BiDirectionalLaneConfig { + o.laneMutex.Lock() + defer o.laneMutex.Unlock() + return o.Lanes +} + +func (o *CCIPTestSetUpOutputs) DeployChainContracts( + lggr *zerolog.Logger, + chainClient blockchain.EVMClient, + networkCfg blockchain.EVMNetwork, + noOfTokens int, + tokenDeployerFns []blockchain.ContractDeployer, +) error { + var k8Env *environment.Environment + ccipEnv := o.Env + if ccipEnv != nil { + k8Env = ccipEnv.K8Env + } + if k8Env != nil && chainClient.NetworkSimulated() { + networkCfg.URLs = k8Env.URLs[chainClient.GetNetworkConfig().Name] + } + + mainChainClient, err := blockchain.ConcurrentEVMClient(networkCfg, k8Env, chainClient, *lggr) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create chain client for %s: %w", networkCfg.Name, err)) + } + + mainChainClient.ParallelTransactions(true) + defer mainChainClient.Close() + ccipCommon, err := actions.DefaultCCIPModule( + lggr, o.Cfg.TestGroupInput, mainChainClient, + ) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create ccip common module for %s: %w", networkCfg.Name, err)) + } + + cfg := o.LaneConfig.ReadLaneConfig(networkCfg.Name) + + err = ccipCommon.DeployContracts(noOfTokens, tokenDeployerFns, cfg) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to deploy common ccip contracts for %s: %w", networkCfg.Name, err)) + } + ccipCommon.WriteLaneConfig(cfg) + o.LaneContractsByNetwork.Store(networkCfg.Name, cfg) + + return nil +} + +func (o *CCIPTestSetUpOutputs) SetupDynamicTokenPriceUpdates() error { + interval := o.Cfg.TestGroupInput.TokenConfig.DynamicPriceUpdateInterval.Duration() + covered := make(map[string]struct{}) + for _, lanes := range o.ReadLanes() { + lane := lanes.ForwardLane + if _, exists := covered[lane.SourceNetworkName]; !exists { + covered[lane.SourceNetworkName] = struct{}{} + err := lane.Source.Common.UpdateTokenPricesAtRegularInterval(lane.Context, lane.Logger, interval, o.LaneConfig.ReadLaneConfig(lane.SourceNetworkName)) + if err != nil { + return err + } + } + if _, exists := covered[lane.DestNetworkName]; !exists { + covered[lane.DestNetworkName] = struct{}{} + err := lane.Dest.Common.UpdateTokenPricesAtRegularInterval(lane.Context, lane.Logger, interval, o.LaneConfig.ReadLaneConfig(lane.DestNetworkName)) + if err != nil { + return err + } + } + } + return nil +} + +func (o *CCIPTestSetUpOutputs) AddLanesForNetworkPair( + lggr *zerolog.Logger, + networkA, networkB blockchain.EVMNetwork, + chainClientA, chainClientB blockchain.EVMClient, +) error { + var ( + t = o.Cfg.Test + allErrors atomic.Error + k8Env *environment.Environment + ccipEnv = o.Env + namespace = "" + ) + + if o.Cfg.TestGroupInput.LoadProfile != nil { + namespace = o.Cfg.TestGroupInput.LoadProfile.TestRunName + } + bidirectional := pointer.GetBool(o.Cfg.TestGroupInput.BiDirectionalLane) + if ccipEnv != nil { + k8Env = ccipEnv.K8Env + if k8Env != nil { + namespace = k8Env.Cfg.Namespace + } + } + + setUpFuncs, ctx := errgroup.WithContext(testcontext.Get(t)) + + // Use new set of clients(sourceChainClient,destChainClient) + // with new header subscriptions(otherwise transactions + // on one lane will keep on waiting for transactions on other lane for the same network) + // Currently for simulated network clients(from same network) created with NewEVMClient does not sync nonce + // ConcurrentEVMClient is a work-around for that. + sourceChainClientA2B, err := blockchain.ConcurrentEVMClient(networkA, k8Env, chainClientA, *lggr) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create chain client for %s: %w", networkA.Name, err)) + } + + sourceChainClientA2B.ParallelTransactions(true) + + destChainClientA2B, err := blockchain.ConcurrentEVMClient(networkB, k8Env, chainClientB, *lggr) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create chain client for %s: %w", networkB.Name, err)) + } + destChainClientA2B.ParallelTransactions(true) + + ccipLaneA2B := &actions.CCIPLane{ + Test: t, + SourceChain: sourceChainClientA2B, + DestChain: destChainClientA2B, + SourceNetworkName: actions.NetworkName(networkA.Name), + DestNetworkName: actions.NetworkName(networkB.Name), + ValidationTimeout: o.Cfg.TestGroupInput.PhaseTimeout.Duration(), + SentReqs: make(map[common.Hash][]actions.CCIPRequest), + TotalFee: big.NewInt(0), + Balance: o.Balance, + Context: testcontext.Get(t), + } + // if it non leader lane, disable the price reporting + ccipLaneA2B.PriceReportingDisabled = len(o.Cfg.LeaderLanes) > 0 && + !o.Cfg.isPriceReportingDisabled(lggr, ccipLaneA2B.SourceNetworkName, ccipLaneA2B.DestNetworkName) + + contractsA, ok := o.LaneContractsByNetwork.Load(networkA.Name) + if !ok { + return errors.WithStack(fmt.Errorf("failed to load lane contracts for %s", networkA.Name)) + } + srcCfg := contractsA.(*laneconfig.LaneConfig) + ccipLaneA2B.SrcNetworkLaneCfg = srcCfg + contractsB, ok := o.LaneContractsByNetwork.Load(networkB.Name) + if !ok { + return errors.WithStack(fmt.Errorf("failed to load lane contracts for %s", networkB.Name)) + } + destCfg := contractsB.(*laneconfig.LaneConfig) + ccipLaneA2B.DstNetworkLaneCfg = destCfg + + a2blogger := lggr.With().Str("env", namespace).Str("Lane", + fmt.Sprintf("%s-->%s", ccipLaneA2B.SourceNetworkName, ccipLaneA2B.DestNetworkName)).Logger() + ccipLaneA2B.Logger = &a2blogger + ccipLaneA2B.Reports = o.Reporter.AddNewLane(fmt.Sprintf("%s To %s", + networkA.Name, networkB.Name), ccipLaneA2B.Logger) + + bidirectionalLane := &BiDirectionalLaneConfig{ + NetworkA: networkA, + NetworkB: networkB, + ForwardLane: ccipLaneA2B, + } + + var ccipLaneB2A *actions.CCIPLane + + if bidirectional { + sourceChainClientB2A, err := blockchain.ConcurrentEVMClient(networkB, k8Env, chainClientB, *lggr) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create chain client for %s: %w", networkB.Name, err)) + } + sourceChainClientB2A.ParallelTransactions(true) + + destChainClientB2A, err := blockchain.ConcurrentEVMClient(networkA, k8Env, chainClientA, *lggr) + if err != nil { + return errors.WithStack(fmt.Errorf("failed to create chain client for %s: %w", networkA.Name, err)) + } + destChainClientB2A.ParallelTransactions(true) + + ccipLaneB2A = &actions.CCIPLane{ + Test: t, + SourceNetworkName: actions.NetworkName(networkB.Name), + DestNetworkName: actions.NetworkName(networkA.Name), + SourceChain: sourceChainClientB2A, + DestChain: destChainClientB2A, + ValidationTimeout: o.Cfg.TestGroupInput.PhaseTimeout.Duration(), + Balance: o.Balance, + SentReqs: make(map[common.Hash][]actions.CCIPRequest), + TotalFee: big.NewInt(0), + Context: testcontext.Get(t), + SrcNetworkLaneCfg: ccipLaneA2B.DstNetworkLaneCfg, + DstNetworkLaneCfg: ccipLaneA2B.SrcNetworkLaneCfg, + } + // if it non leader lane, disable the price reporting + ccipLaneB2A.PriceReportingDisabled = len(o.Cfg.LeaderLanes) > 0 && + !o.Cfg.isPriceReportingDisabled(lggr, ccipLaneB2A.SourceNetworkName, ccipLaneB2A.DestNetworkName) + b2aLogger := lggr.With().Str("env", namespace).Str("Lane", + fmt.Sprintf("%s-->%s", ccipLaneB2A.SourceNetworkName, ccipLaneB2A.DestNetworkName)).Logger() + ccipLaneB2A.Logger = &b2aLogger + ccipLaneB2A.Reports = o.Reporter.AddNewLane( + fmt.Sprintf("%s To %s", networkB.Name, networkA.Name), ccipLaneB2A.Logger) + bidirectionalLane.ReverseLane = ccipLaneB2A + } + o.AddToLanes(bidirectionalLane) + + setUpFuncs.Go(func() error { + lggr.Info().Msgf("Setting up lane %s to %s", networkA.Name, networkB.Name) + err := ccipLaneA2B.DeployNewCCIPLane( + o.SetUpContext, o.Env, + o.Cfg.TestGroupInput, o.BootstrapAdded, o.JobAddGrp, + ) + if err != nil { + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("deploying lane %s to %s; err - %w", networkA.Name, networkB.Name, errors.WithStack(err)))) + return err + } + err = o.LaneConfig.WriteLaneConfig(networkA.Name, ccipLaneA2B.SrcNetworkLaneCfg) + if err != nil { + lggr.Error().Err(err).Msgf("error deploying lane %s to %s", networkA.Name, networkB.Name) + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("writing lane config for %s; err - %w", networkA.Name, errors.WithStack(err)))) + return err + } + err = o.LaneConfig.WriteLaneConfig(networkB.Name, ccipLaneA2B.DstNetworkLaneCfg) + if err != nil { + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("writing lane config for %s; err - %w", networkB.Name, errors.WithStack(err)))) + return err + } + + // we need to set the remote chains on the pool after the lane is deployed + // it's sufficient to do this only for the forward lane, as the destination pools will also be updated with source pool updates + // The reverse lane will have the same pools as the forward lane but in reverse order of source and destination + err = ccipLaneA2B.SetRemoteChainsOnPool() + if err != nil { + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("error setting remote chains; err - %w", errors.WithStack(err)))) + return err + } + lggr.Info().Msgf("done setting up lane %s to %s", networkA.Name, networkB.Name) + if o.Cfg.TestGroupInput.LoadProfile != nil && pointer.GetBool(o.Cfg.TestGroupInput.LoadProfile.OptimizeSpace) { + // This is to optimize memory space for load tests with high number of networks, lanes, tokens + ccipLaneA2B.OptimizeStorage() + } + return nil + }) + + setUpFuncs.Go(func() error { + if bidirectional { + lggr.Info().Msgf("Setting up lane %s to %s", networkB.Name, networkA.Name) + err := ccipLaneB2A.DeployNewCCIPLane( + o.SetUpContext, o.Env, + o.Cfg.TestGroupInput, o.BootstrapAdded, o.JobAddGrp, + ) + if err != nil { + lggr.Error().Err(err).Msgf("error deploying lane %s to %s", networkB.Name, networkA.Name) + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("deploying lane %s to %s; err - %w", networkB.Name, networkA.Name, errors.WithStack(err)))) + return err + } + + err = o.LaneConfig.WriteLaneConfig(networkB.Name, ccipLaneB2A.SrcNetworkLaneCfg) + if err != nil { + allErrors.Store(multierr.Append(allErrors.Load(), fmt.Errorf("writing lane config for %s; err - %w", networkA.Name, errors.WithStack(err)))) + return err + } + err = o.LaneConfig.WriteLaneConfig(networkA.Name, ccipLaneB2A.DstNetworkLaneCfg) + if err != nil { + allErrors.Store( + multierr.Append( + allErrors.Load(), + fmt.Errorf("writing lane config for %s; err - %w", networkB.Name, errors.WithStack(err)), + ), + ) + return err + } + lggr.Info().Msgf("done setting up lane %s to %s", networkB.Name, networkA.Name) + if o.Cfg.TestGroupInput.LoadProfile != nil && pointer.GetBool(o.Cfg.TestGroupInput.LoadProfile.OptimizeSpace) { + // This is to optimize memory space for load tests with high number of networks, lanes, tokens + ccipLaneB2A.OptimizeStorage() + } + return nil + } + return nil + }) + + errs := make(chan error, 1) + go func() { + errs <- setUpFuncs.Wait() + }() + + // wait for either context to get cancelled or all the error-groups to finish execution + for { + select { + case err := <-errs: + // check if there has been any error while waiting for the error groups + // to finish execution + return err + case <-ctx.Done(): + lggr.Print(ctx.Err()) + return allErrors.Load() + } + } +} + +func (o *CCIPTestSetUpOutputs) StartEventWatchers() { + for _, lane := range o.ReadLanes() { + err := lane.ForwardLane.StartEventWatchers() + require.NoError(o.Cfg.Test, err) + if lane.ReverseLane != nil { + err = lane.ReverseLane.StartEventWatchers() + require.NoError(o.Cfg.Test, err) + } + } +} + +func (o *CCIPTestSetUpOutputs) WaitForPriceUpdates() { + t := o.Cfg.Test + priceUpdateGrp, _ := errgroup.WithContext(o.SetUpContext) + for _, lanes := range o.ReadLanes() { + lanes := lanes + forwardLane := lanes.ForwardLane + reverseLane := lanes.ReverseLane + waitForUpdate := func(lane *actions.CCIPLane) error { + defer func() { + lane.Logger.Info(). + Str("source_chain", lane.Source.Common.ChainClient.GetNetworkName()). + Uint64("dest_chain", lane.Source.DestinationChainId). + Str("price_registry", lane.Source.Common.PriceRegistry.Address()). + Msg("Stopping price update watch") + + }() + var allTokens []common.Address + for _, token := range lane.Source.Common.BridgeTokens { + allTokens = append(allTokens, token.ContractAddress) + } + allTokens = append(allTokens, lane.Source.Common.FeeToken.EthAddress) + lane.Logger.Info(). + Str("source_chain", lane.Source.Common.ChainClient.GetNetworkName()). + Uint64("dest_chain", lane.Source.DestinationChainId). + Str("price_registry", lane.Source.Common.PriceRegistry.Address()). + Msgf("Waiting for price update") + err := lane.Source.Common.WaitForPriceUpdates( + o.SetUpContext, lane.Logger, + o.Cfg.TestGroupInput.TokenConfig.TimeoutForPriceUpdate.Duration(), + lane.Source.DestinationChainId, + allTokens, + ) + if err != nil { + return errors.Wrapf(err, "waiting for price update failed on lane %s-->%s", lane.SourceNetworkName, lane.DestNetworkName) + } + return nil + } + + priceUpdateGrp.Go(func() error { + return waitForUpdate(forwardLane) + }) + if lanes.ReverseLane != nil { + priceUpdateGrp.Go(func() error { + return waitForUpdate(reverseLane) + }) + } + } + + require.NoError(t, priceUpdateGrp.Wait()) +} + +// CheckGasUpdateTransaction checks the gas update transactions count +func (o *CCIPTestSetUpOutputs) CheckGasUpdateTransaction(lggr *zerolog.Logger) error { + transactionsBySource := make(map[string]string) + destToSourcesList := make(map[string][]string) + // create a map to hold the unique destination with list of sources + for _, n := range o.Cfg.NetworkPairs { + destToSourcesList[n.NetworkB.Name] = append(destToSourcesList[n.NetworkB.Name], n.NetworkA.Name) + if pointer.GetBool(o.Cfg.TestGroupInput.BiDirectionalLane) { + destToSourcesList[n.NetworkA.Name] = append(destToSourcesList[n.NetworkA.Name], n.NetworkB.Name) + } + } + lggr.Debug().Interface("list", destToSourcesList).Msg("Dest to Source") + // a function to read the gas update events and create a map with unique source and store the tx hash + filterGasUpdateEventTxBySource := func(lane *actions.CCIPLane) error { + for _, g := range lane.Source.Common.GasUpdateEvents { + if g.Value == nil { + return fmt.Errorf("gas update value should not be nil in tx %s", g.Tx) + } + if _, ok := transactionsBySource[g.Source]; !ok { + transactionsBySource[g.Source] = g.Tx + } + } + return nil + } + + for _, lane := range o.ReadLanes() { + if err := filterGasUpdateEventTxBySource(lane.ForwardLane); err != nil { + return fmt.Errorf("error in filtering gas update transactions in the lane source: %s and destination: %s, error: %w", + lane.ForwardLane.SourceNetworkName, lane.ForwardLane.DestNetworkName, err) + } + if lane.ReverseLane != nil { + if err := filterGasUpdateEventTxBySource(lane.ReverseLane); err != nil { + return fmt.Errorf("error in filtering gas update transactions in the lane source: %s and destination: %s, error: %w", + lane.ReverseLane.SourceNetworkName, lane.ReverseLane.DestNetworkName, err) + } + } + } + + lggr.Debug().Interface("Tx hashes by source", transactionsBySource).Msg("Checked Gas Update Transactions by Source") + // when leader lane setup is enabled, number of unique transaction from the source + // should match the number of leader lanes defined. + if len(transactionsBySource) != len(o.Cfg.LeaderLanes) { + lggr.Error(). + Int("Tx hashes expected", len(o.Cfg.LeaderLanes)). + Int("Tx hashes received", len(transactionsBySource)). + Int("Leader lanes count", len(o.Cfg.LeaderLanes)). + Msg("Checked Gas Update transactions count doesn't match") + return fmt.Errorf("checked Gas Update transactions count doesn't match") + } + lggr.Debug(). + Int("Tx hashes by source", len(transactionsBySource)). + Int("Leader lanes count", len(o.Cfg.LeaderLanes)). + Msg("Checked Gas Update transactions count matches") + + return nil +} + +// CCIPDefaultTestSetUp sets up the environment for CCIP tests +// if configureCLNode is set as false, it assumes: +// 1. contracts are already deployed on live networks +// 2. CL nodes are set up and configured with existing contracts +// 3. No k8 env deployment is needed +// It reuses already deployed contracts from the addresses provided in ../contracts/ccip/laneconfig/contracts.json +// +// If bidirectional is true it sets up two-way lanes between NetworkA and NetworkB. Same CL nodes are used for both the lanes. +// If bidirectional is false only one way lane is set up. +// +// Returns - +// 1. CCIPLane for NetworkA --> NetworkB +// 2. If bidirectional is true, CCIPLane for NetworkB --> NetworkA +// 3. If configureCLNode is true, the tearDown func to call when environment needs to be destroyed +func CCIPDefaultTestSetUp( + t *testing.T, + lggr *zerolog.Logger, + envName string, + tokenDeployerFns []blockchain.ContractDeployer, + testConfig *CCIPTestConfig, +) *CCIPTestSetUpOutputs { + var err error + reportPath := "tmp_laneconfig" + filepath := fmt.Sprintf("./%s/tmp_%s.json", reportPath, strings.ReplaceAll(t.Name(), "/", "_")) + reportFile := testutils.FileNameFromPath(filepath) + parent, cancel := context.WithCancel(context.Background()) + defer cancel() + setUpArgs := &CCIPTestSetUpOutputs{ + SetUpContext: parent, + Cfg: testConfig, + Reporter: testreporters.NewCCIPTestReporter(t, lggr), + LaneConfigFile: filepath, + LaneContractsByNetwork: &sync.Map{}, + Balance: actions.NewBalanceSheet(), + BootstrapAdded: atomic.NewBool(false), + JobAddGrp: &errgroup.Group{}, + laneMutex: &sync.Mutex{}, + } + + contractsData, err := setUpArgs.Cfg.ContractsInput.ContractsData() + require.NoError(t, err, "error reading existing lane config") + + chainClientByChainID := setUpArgs.CreateEnvironment(lggr, envName, reportPath) + // if test is run in remote runner, register a clean-up to copy the laneconfig file + if value, set := os.LookupEnv(config.EnvVarJobImage); set && value != "" && + (setUpArgs.Env != nil && setUpArgs.Env.K8Env != nil) && + pointer.GetBool(setUpArgs.Cfg.TestGroupInput.StoreLaneConfig) { + t.Cleanup(func() { + path := fmt.Sprintf("reports/%s/%s", reportPath, reportFile) + dir, err := os.Getwd() + require.NoError(t, err) + destPath := fmt.Sprintf("%s/%s", dir, reportFile) + lggr.Info().Str("srcPath", path).Str("dstPath", destPath).Msg("copying lane config") + err = setUpArgs.Env.K8Env.CopyFromPod("app=runner-data", + "remote-test-runner-data-files", path, destPath) + require.NoError(t, err, "error getting lane config") + }) + } + if setUpArgs.Env != nil { + ccipEnv := setUpArgs.Env + if ccipEnv.K8Env != nil && ccipEnv.K8Env.WillUseRemoteRunner() { + return setUpArgs + } + } + + setUpArgs.LaneConfig, err = laneconfig.ReadLanesFromExistingDeployment(contractsData) + require.NoError(t, err) + + if setUpArgs.LaneConfig == nil { + setUpArgs.LaneConfig = &laneconfig.Lanes{LaneConfigs: make(map[string]*laneconfig.LaneConfig)} + } + laneCfgFile, err := os.Stat(setUpArgs.LaneConfigFile) + if err == nil && laneCfgFile.Size() > 0 { + // remove the existing lane config file + err = os.Remove(setUpArgs.LaneConfigFile) + require.NoError(t, err, "error while removing existing lane config file - %s", setUpArgs.LaneConfigFile) + } + + configureCLNode := !testConfig.useExistingDeployment() + + // if no of lanes per pair is greater than 1, copy common contracts from the same network + // if no of lanes per pair is more than 1, the networks are added into the testConfig.AllNetworks with a suffix of - + // for example, if no of lanes per pair is 2, and the network pairs are called "testnetA", "testnetB", + // the network will be added as "testnetA-1", testnetA-2","testnetB-1", testnetB-2" + // to deploy 2 lanes between same network pair "testnetA", "testnetB". + // In the following the common contracts will be copied from "testnetA" to "testnetA-1" and "testnetA-2" and + // from "testnetB" to "testnetB-1" and "testnetB-2" + for n := range testConfig.AllNetworks { + if setUpArgs.Cfg.TestGroupInput.NoOfRoutersPerPair > 1 { + regex := regexp.MustCompile(`-(\d+)$`) + networkNameToReadCfg := regex.ReplaceAllString(n, "") + reuse := pointer.GetBool(testConfig.TestGroupInput.ReuseContracts) + // if reuse contracts is true, copy common contracts from the same network except the router contract + setUpArgs.LaneConfig.CopyCommonContracts( + networkNameToReadCfg, n, + reuse, testConfig.TestGroupInput.MsgDetails.IsTokenTransfer(), + ) + } + } + + // deploy all chain specific common contracts + chainAddGrp, _ := errgroup.WithContext(setUpArgs.SetUpContext) + lggr.Info().Msg("Deploying common contracts") + + // If we have a token admin registry, we need to use a separate to deploy our test tokens from so that the tokens + // are not owned by the same account that owns the other CCIP contracts. This emulates self-serve token setups where + // the token owner is different from the CCIP contract owner. + if testConfig.useSeparateTokenDeployer() { + for _, net := range testConfig.AllNetworks { + chainClient := chainClientByChainID[net.ChainID] + require.NotNil(t, chainClient, "Chain client not found for chainID %d", net.ChainID) + require.GreaterOrEqual(t, len(chainClient.GetWallets()), 2, "The test is using a TokenAdminRegistry, and has CCIPOwnerTokens set to 'false'. The test needs a second wallet to deploy token contracts from. Please add a second wallet to the 'evm_clients' config option.") + tokenDeployerWallet := chainClient.GetWallets()[1] + // TODO: This is a total guess at how much funds we need to deploy the tokens. This could be way off, especially on live chains. + // There aren't a lot of good ways to estimate this though. See CCIP-2471. + recommendedTokenBalance := new(big.Int).Mul(big.NewInt(5e18), big.NewInt(int64(pointer.GetInt(testConfig.TestGroupInput.TokenConfig.NoOfTokensPerChain)))) + currentTokenBalance, err := chainClient.BalanceAt(context.Background(), common.HexToAddress(tokenDeployerWallet.Address())) + require.NoError(t, err) + if currentTokenBalance.Cmp(recommendedTokenBalance) < 0 { + lggr.Warn(). + Str("Token Deployer Address", tokenDeployerWallet.Address()). + Uint64("Current Balance", currentTokenBalance.Uint64()). + Uint64("Recommended Balance", recommendedTokenBalance.Uint64()). + Msg("Token Deployer wallet may be underfunded. Please ensure it has enough funds to deploy the tokens.") + } + } + } + + for _, net := range testConfig.AllNetworks { + chainClient := chainClientByChainID[net.ChainID] + net := net + net.HTTPURLs = chainClient.GetNetworkConfig().HTTPURLs + net.URLs = chainClient.GetNetworkConfig().URLs + chainAddGrp.Go(func() error { + return setUpArgs.DeployChainContracts( + lggr, chainClient, net, + pointer.GetInt(testConfig.TestGroupInput.TokenConfig.NoOfTokensPerChain), + tokenDeployerFns, + ) + }) + } + require.NoError(t, chainAddGrp.Wait(), "Deploying common contracts shouldn't fail") + + // set up mock server for price pipeline and usdc attestation if not using existing deployment + if !pointer.GetBool(setUpArgs.Cfg.TestGroupInput.ExistingDeployment) { + var killgrave *ctftestenv.Killgrave + if setUpArgs.Env.LocalCluster != nil { + killgrave = setUpArgs.Env.LocalCluster.MockAdapter + } + if setUpArgs.Cfg.TestGroupInput.TokenConfig.IsPipelineSpec() { + // set up mock server for price pipeline. need to set it once for all the lanes as the price pipeline path uses + // regex to match the path for all tokens across all lanes + actions.SetMockserverWithTokenPriceValue(killgrave, setUpArgs.Env.MockServer) + } + if pointer.GetBool(setUpArgs.Cfg.TestGroupInput.USDCMockDeployment) { + // if it's a new USDC deployment, set up mock server for attestation, + // we need to set it only once for all the lanes as the attestation path uses regex to match the path for + // all messages across all lanes + err = actions.SetMockServerWithUSDCAttestation(killgrave, setUpArgs.Env.MockServer) + require.NoError(t, err, "failed to set up mock server for attestation") + } + } + // deploy all lane specific contracts + lggr.Info().Msg("Deploying lane specific contracts") + laneAddGrp, _ := errgroup.WithContext(setUpArgs.SetUpContext) + // for memory management set a batch size for active lane deployment group + laneAddGrp.SetLimit(200) + for _, networkPair := range testConfig.NetworkPairs { + n := networkPair + var ok bool + n.ChainClientA, ok = chainClientByChainID[n.NetworkA.ChainID] + require.True(t, ok, "Chain client for chainID %d not found", n.NetworkA.ChainID) + n.ChainClientB, ok = chainClientByChainID[n.NetworkB.ChainID] + require.True(t, ok, "Chain client for chainID %d not found", n.NetworkB.ChainID) + + n.NetworkA.HTTPURLs = n.ChainClientA.GetNetworkConfig().HTTPURLs + n.NetworkA.URLs = n.ChainClientA.GetNetworkConfig().URLs + n.NetworkB.HTTPURLs = n.ChainClientB.GetNetworkConfig().HTTPURLs + n.NetworkB.URLs = n.ChainClientB.GetNetworkConfig().URLs + + laneAddGrp.Go(func() error { + return setUpArgs.AddLanesForNetworkPair( + lggr, n.NetworkA, n.NetworkB, + chainClientByChainID[n.NetworkA.ChainID], chainClientByChainID[n.NetworkB.ChainID], + ) + }) + } + require.NoError(t, laneAddGrp.Wait()) + err = laneconfig.WriteLanesToJSON(setUpArgs.LaneConfigFile, setUpArgs.LaneConfig) + require.NoError(t, err) + + require.Equal(t, len(setUpArgs.Lanes), len(testConfig.NetworkPairs), + "Number of bi-directional lanes should be equal to number of network pairs") + // only required for env set up + setUpArgs.LaneContractsByNetwork = nil + + if configureCLNode { + // wait for all jobs to get created + lggr.Info().Msg("Waiting for jobs to be created") + require.NoError(t, setUpArgs.JobAddGrp.Wait(), "Creating jobs shouldn't fail") + // wait for price updates to be available + setUpArgs.WaitForPriceUpdates() + if isLeaderLaneFeatureEnabled(lggr) && !pointer.GetBool(setUpArgs.Cfg.TestGroupInput.ExistingDeployment) { + require.NoError(t, setUpArgs.CheckGasUpdateTransaction(lggr), "gas update transaction check shouldn't fail") + } + // if dynamic price update is required + if setUpArgs.Cfg.TestGroupInput.TokenConfig.IsDynamicPriceUpdate() { + require.NoError(t, setUpArgs.SetupDynamicTokenPriceUpdates(), "setting up dynamic price update should not fail") + } + } + + // start event watchers for all lanes + setUpArgs.StartEventWatchers() + // now that lane configs are already dumped to file, we can clean up the lane config map + setUpArgs.LaneConfig = nil + setUpArgs.TearDown = func() error { + var errs error + for _, lanes := range setUpArgs.Lanes { + // if existing deployment is true, don't attempt to pay ccip fees + err := lanes.ForwardLane.CleanUp(configureCLNode) + if err != nil { + errs = multierr.Append(errs, err) + } + if lanes.ReverseLane != nil { + // if existing deployment is true, don't attempt to pay ccip fees + err := lanes.ReverseLane.CleanUp(configureCLNode) + if err != nil { + errs = multierr.Append(errs, err) + } + } + } + return errs + } + lggr.Info().Msg("Test setup completed") + return setUpArgs +} + +// CreateEnvironment creates the environment for the test and registers the test clean-up function to tear down the set-up environment +// It returns the map of chainID to EVMClient +func (o *CCIPTestSetUpOutputs) CreateEnvironment( + lggr *zerolog.Logger, + envName string, + reportPath string, +) map[int64]blockchain.EVMClient { + var ( + testConfig = o.Cfg + t = o.Cfg.Test + + ccipEnv *actions.CCIPTestEnv + k8Env *environment.Environment + err error + chains []blockchain.EVMClient + local *test_env.CLClusterTestEnv + deployCL func() error + ) + + envConfig := createEnvironmentConfig(t, envName, testConfig, reportPath) + + configureCLNode := !testConfig.useExistingDeployment() || pointer.GetString(testConfig.EnvInput.EnvToConnect) != "" + namespace := "" + if testConfig.TestGroupInput.LoadProfile != nil { + namespace = testConfig.TestGroupInput.LoadProfile.TestRunName + } + require.False(t, testConfig.localCluster() && testConfig.ExistingCLCluster(), + "local cluster and existing cluster cannot be true at the same time") + // if it's a new deployment, deploy the env + // Or if EnvToConnect is given connect to that k8 environment + if configureCLNode { + if !testConfig.ExistingCLCluster() { + // if it's a local cluster, deploy the local cluster in docker + if testConfig.localCluster() { + local, deployCL = DeployLocalCluster(t, testConfig) + ccipEnv = &actions.CCIPTestEnv{ + LocalCluster: local, + } + namespace = "local-docker-deployment" + } else { + // Otherwise, deploy the k8s env + lggr.Info().Msg("Deploying test environment") + // deploy the env if configureCLNode is true + k8Env = DeployEnvironments(t, envConfig, testConfig) + ccipEnv = &actions.CCIPTestEnv{K8Env: k8Env} + namespace = ccipEnv.K8Env.Cfg.Namespace + } + } else { + // if there is already a cluster, use the existing cluster to connect to the nodes + ccipEnv = &actions.CCIPTestEnv{} + mockserverURL := pointer.GetString(testConfig.EnvInput.Mockserver) + require.NotEmpty(t, mockserverURL, "mockserver URL cannot be nil") + ccipEnv.MockServer = ctfClient.NewMockserverClient(&ctfClient.MockserverConfig{ + LocalURL: mockserverURL, + ClusterURL: mockserverURL, + }) + } + ccipEnv.CLNodeWithKeyReady, _ = errgroup.WithContext(o.SetUpContext) + o.Env = ccipEnv + if ccipEnv.K8Env != nil && ccipEnv.K8Env.WillUseRemoteRunner() { + return nil + } + } else { + // if configureCLNode is false it means we don't need to deploy any additional pods, + // use a placeholder env to create just the remote runner in it. + if value, set := os.LookupEnv(config.EnvVarJobImage); set && value != "" { + k8Env = environment.New(envConfig) + err = k8Env.Run() + require.NoErrorf(t, err, "error creating environment remote runner") + o.Env = &actions.CCIPTestEnv{K8Env: k8Env} + if k8Env.WillUseRemoteRunner() { + return nil + } + } + } + if o.Cfg.TestGroupInput.LoadProfile != nil { + o.Cfg.TestGroupInput.LoadProfile.SetTestRunName(namespace) + } + chainByChainID := make(map[int64]blockchain.EVMClient) + if pointer.GetBool(testConfig.TestGroupInput.LocalCluster) { + require.NotNil(t, ccipEnv.LocalCluster, "Local cluster shouldn't be nil") + for _, n := range ccipEnv.LocalCluster.EVMNetworks { + if evmClient, err := blockchain.NewEVMClientFromNetwork(*n, *lggr); err == nil { + chainByChainID[evmClient.GetChainID().Int64()] = evmClient + chains = append(chains, evmClient) + } else { + lggr.Error().Err(err).Msgf("EVMClient for chainID %d not found", n.ChainID) + } + } + } else { + for _, n := range testConfig.SelectedNetworks { + if _, ok := chainByChainID[n.ChainID]; ok { + continue + } + var ec blockchain.EVMClient + if k8Env == nil { + ec, err = blockchain.ConnectEVMClient(n, *lggr) + } else { + log.Info().Interface("urls", k8Env.URLs).Msg("URLs") + ec, err = blockchain.NewEVMClient(n, k8Env, *lggr) + } + require.NoError(t, err, "Connecting to blockchain nodes shouldn't fail") + chains = append(chains, ec) + chainByChainID[n.ChainID] = ec + } + } + if configureCLNode { + ccipEnv.CLNodeWithKeyReady.Go(func() error { + var totalNodes int + if !o.Cfg.ExistingCLCluster() { + if ccipEnv.LocalCluster != nil { + err = deployCL() + if err != nil { + return err + } + } + err = ccipEnv.ConnectToDeployedNodes() + if err != nil { + return fmt.Errorf("error connecting to chainlink nodes: %w", err) + } + totalNodes = pointer.GetInt(testConfig.EnvInput.NewCLCluster.NoOfNodes) + } else { + totalNodes = pointer.GetInt(testConfig.EnvInput.ExistingCLCluster.NoOfNodes) + err = ccipEnv.ConnectToExistingNodes(o.Cfg.EnvInput) + if err != nil { + return fmt.Errorf("error deploying and connecting to chainlink nodes: %w", err) + } + } + err = ccipEnv.SetUpNodeKeysAndFund(lggr, big.NewFloat(testConfig.TestGroupInput.NodeFunding), chains) + if err != nil { + return fmt.Errorf("error setting up nodes and keys %w", err) + } + // first node is the bootstrapper + ccipEnv.CommitNodeStartIndex = 1 + ccipEnv.ExecNodeStartIndex = 1 + ccipEnv.NumOfCommitNodes = testConfig.TestGroupInput.NoOfCommitNodes + ccipEnv.NumOfExecNodes = ccipEnv.NumOfCommitNodes + if !pointer.GetBool(testConfig.TestGroupInput.CommitAndExecuteOnSameDON) { + if len(ccipEnv.CLNodesWithKeys) < 11 { + return fmt.Errorf("not enough CL nodes for separate commit and execution nodes") + } + if testConfig.TestGroupInput.NoOfCommitNodes >= totalNodes { + return fmt.Errorf("number of commit nodes can not be greater than total number of nodes in DON") + } + // first two nodes are reserved for bootstrap commit and bootstrap exec + ccipEnv.CommitNodeStartIndex = 2 + ccipEnv.ExecNodeStartIndex = 2 + testConfig.TestGroupInput.NoOfCommitNodes + ccipEnv.NumOfExecNodes = totalNodes - (2 + testConfig.TestGroupInput.NoOfCommitNodes) + if ccipEnv.NumOfExecNodes < 4 { + return fmt.Errorf("insufficient number of exec nodes") + } + } + ccipEnv.NumOfAllowedFaultyExec = (ccipEnv.NumOfExecNodes - 1) / 3 + ccipEnv.NumOfAllowedFaultyCommit = (ccipEnv.NumOfCommitNodes - 1) / 3 + return nil + }) + } + + t.Cleanup(func() { + if configureCLNode { + if ccipEnv.LocalCluster != nil { + err := ccipEnv.LocalCluster.Terminate() + require.NoError(t, err, "Local cluster termination shouldn't fail") + require.NoError(t, o.Reporter.SendReport(t, namespace, false), "Aggregating and sending report shouldn't fail") + return + } + if pointer.GetBool(testConfig.TestGroupInput.KeepEnvAlive) || testConfig.ExistingCLCluster() { + require.NoError(t, o.Reporter.SendReport(t, namespace, true), "Aggregating and sending report shouldn't fail") + return + } + lggr.Info().Msg("Tearing down the environment") + err = integrationactions.TeardownSuite(t, nil, ccipEnv.K8Env, ccipEnv.CLNodes, o.Reporter, zapcore.DPanicLevel, o.Cfg.EnvInput) + require.NoError(t, err, "Environment teardown shouldn't fail") + } else { + //just send the report + require.NoError(t, o.Reporter.SendReport(t, namespace, true), "Aggregating and sending report shouldn't fail") + } + }) + return chainByChainID +} + +func createEnvironmentConfig(t *testing.T, envName string, testConfig *CCIPTestConfig, reportPath string) *environment.Config { + envConfig := &environment.Config{ + NamespacePrefix: envName, + Test: t, + // PreventPodEviction: true, //TODO: enable this once we have a way to handle pod eviction + } + if pointer.GetBool(testConfig.TestGroupInput.StoreLaneConfig) { + envConfig.ReportPath = reportPath + } + // if there is already existing namespace, no need to update any manifest there, we just connect to it + existingEnv := pointer.GetString(testConfig.EnvInput.EnvToConnect) + if existingEnv != "" { + envConfig.Namespace = existingEnv + envConfig.NamespacePrefix = "" + envConfig.SkipManifestUpdate = true + envConfig.RunnerName = fmt.Sprintf("%s-%s", environment.REMOTE_RUNNER_NAME, uuid.NewString()[0:5]) + } + if testConfig.EnvInput.TTL != nil { + envConfig.TTL = testConfig.EnvInput.TTL.Duration() + } + if testConfig.TestGroupInput.LoadProfile != nil && testConfig.TestGroupInput.LoadProfile.TestDuration != nil { + approxDur := testConfig.TestGroupInput.LoadProfile.TestDuration.Duration() + 3*time.Hour + if envConfig.TTL < approxDur { + envConfig.TTL = approxDur + } + } + return envConfig +} diff --git a/integration-tests/ccip-tests/testsetups/test_env.go b/integration-tests/ccip-tests/testsetups/test_env.go new file mode 100644 index 00000000000..63018c9fe44 --- /dev/null +++ b/integration-tests/ccip-tests/testsetups/test_env.go @@ -0,0 +1,594 @@ +package testsetups + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "testing" + + "github.com/AlekSi/pointer" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + ctf_config "github.com/smartcontractkit/chainlink-testing-framework/config" + ctf_config_types "github.com/smartcontractkit/chainlink-testing-framework/config/types" + "github.com/smartcontractkit/chainlink-testing-framework/networks" + "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions" + + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/foundry" + + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + ctftestenv "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink" + "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + k8config "github.com/smartcontractkit/chainlink-testing-framework/k8s/config" + + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/actions" + "github.com/smartcontractkit/chainlink/integration-tests/ccip-tests/types/config/node" + "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" + integrationnodes "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + corechainlink "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" +) + +func SetResourceProfile(cpu, mem string) map[string]interface{} { + return map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": cpu, + "memory": mem, + }, + "limits": map[string]interface{}{ + "cpu": cpu, + "memory": mem, + }, + } +} + +func setNodeConfig(nets []blockchain.EVMNetwork, nodeConfig, commonChain string, configByChain map[string]string) (*corechainlink.Config, string, error) { + var tomlCfg *corechainlink.Config + var err error + var commonChainConfig *evmcfg.Chain + if commonChain != "" { + err = config.DecodeTOML(bytes.NewReader([]byte(commonChain)), &commonChainConfig) + if err != nil { + return nil, "", err + } + } + configByChainMap := make(map[int64]evmcfg.Chain) + for k, v := range configByChain { + var chain evmcfg.Chain + err = config.DecodeTOML(bytes.NewReader([]byte(v)), &chain) + if err != nil { + return nil, "", err + } + chainId, err := strconv.ParseInt(k, 10, 64) + if err != nil { + return nil, "", err + } + configByChainMap[chainId] = chain + } + if nodeConfig == "" { + tomlCfg = integrationnodes.NewConfig( + integrationnodes.NewBaseConfig(), + node.WithPrivateEVMs(nets, commonChainConfig, configByChainMap)) + } else { + tomlCfg, err = node.NewConfigFromToml([]byte(nodeConfig), node.WithPrivateEVMs(nets, commonChainConfig, configByChainMap)) + if err != nil { + return nil, "", err + } + } + tomlStr, err := tomlCfg.TOMLString() + return tomlCfg, tomlStr, err +} + +func ChainlinkPropsForUpdate( + t *testing.T, + testInputs *CCIPTestConfig, +) (map[string]any, int) { + updateProps := make(map[string]any) + upgradeImage := pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkUpgradeImage.Image) + upgradeTag := pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkUpgradeImage.Version) + noOfNodesToUpgrade := 0 + if len(testInputs.EnvInput.NewCLCluster.Nodes) > 0 { + var nodesMap []map[string]any + for _, clNode := range testInputs.EnvInput.NewCLCluster.Nodes { + if !pointer.GetBool(clNode.NeedsUpgrade) { + continue + } + upgradeImage = pointer.GetString(clNode.ChainlinkUpgradeImage.Image) + upgradeTag = pointer.GetString(clNode.ChainlinkUpgradeImage.Version) + if upgradeImage == "" || upgradeTag == "" { + continue + } + nodeConfig := clNode.BaseConfigTOML + commonChainConfig := clNode.CommonChainConfigTOML + chainConfigByChain := clNode.ChainConfigTOMLByChain + if nodeConfig == "" { + nodeConfig = testInputs.EnvInput.NewCLCluster.Common.BaseConfigTOML + } + if commonChainConfig == "" { + commonChainConfig = testInputs.EnvInput.NewCLCluster.Common.CommonChainConfigTOML + } + if chainConfigByChain == nil { + chainConfigByChain = testInputs.EnvInput.NewCLCluster.Common.ChainConfigTOMLByChain + } + + _, tomlStr, err := setNodeConfig( + testInputs.SelectedNetworks, + nodeConfig, commonChainConfig, chainConfigByChain, + ) + require.NoError(t, err) + nodesMap = append(nodesMap, map[string]any{ + "name": clNode.Name, + "chainlink": map[string]any{ + "image": map[string]any{ + "image": upgradeImage, + "version": upgradeTag, + }, + }, + "toml": tomlStr, + }) + noOfNodesToUpgrade++ + } + updateProps["nodes"] = nodesMap + } else { + if upgradeImage == "" || upgradeTag == "" { + return nil, 0 + } + updateProps["chainlink"] = map[string]interface{}{ + "image": map[string]interface{}{ + "image": upgradeImage, + "version": upgradeTag, + }, + } + _, tomlStr, err := setNodeConfig( + testInputs.SelectedNetworks, + testInputs.EnvInput.NewCLCluster.Common.BaseConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.CommonChainConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.ChainConfigTOMLByChain, + ) + require.NoError(t, err) + updateProps["toml"] = tomlStr + noOfNodesToUpgrade = pointer.GetInt(testInputs.EnvInput.NewCLCluster.NoOfNodes) + } + return updateProps, noOfNodesToUpgrade +} + +func ChainlinkChart( + t *testing.T, + testInputs *CCIPTestConfig, + nets []blockchain.EVMNetwork, +) environment.ConnectedChart { + require.NotNil(t, testInputs.EnvInput.NewCLCluster.Common, "Chainlink Common config is not specified") + clProps := make(map[string]interface{}) + clProps["prometheus"] = true + var formattedArgs []string + if len(testInputs.EnvInput.NewCLCluster.DBArgs) > 0 { + for _, arg := range testInputs.EnvInput.NewCLCluster.DBArgs { + formattedArgs = append(formattedArgs, "-c") + formattedArgs = append(formattedArgs, arg) + } + } + clProps["db"] = map[string]interface{}{ + "resources": SetResourceProfile(testInputs.EnvInput.NewCLCluster.DBCPU, testInputs.EnvInput.NewCLCluster.DBMemory), + "additionalArgs": formattedArgs, + "stateful": pointer.GetBool(testInputs.EnvInput.NewCLCluster.IsStateful), + "capacity": testInputs.EnvInput.NewCLCluster.DBCapacity, + "storageClassName": pointer.GetString(testInputs.EnvInput.NewCLCluster.DBStorageClass), + "enablePrometheusPostgresExporter": pointer.GetBool(testInputs.EnvInput.NewCLCluster.PromPgExporter), + "image": map[string]any{ + "image": testInputs.EnvInput.NewCLCluster.Common.DBImage, + "version": testInputs.EnvInput.NewCLCluster.Common.DBTag, + }, + } + clProps["chainlink"] = map[string]interface{}{ + "resources": SetResourceProfile(testInputs.EnvInput.NewCLCluster.NodeCPU, testInputs.EnvInput.NewCLCluster.NodeMemory), + "image": map[string]any{ + "image": pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Image), + "version": pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Version), + }, + } + + require.NotNil(t, testInputs.EnvInput, "no env test input specified") + + if len(testInputs.EnvInput.NewCLCluster.Nodes) > 0 { + var nodesMap []map[string]any + for _, clNode := range testInputs.EnvInput.NewCLCluster.Nodes { + nodeConfig := clNode.BaseConfigTOML + commonChainConfig := clNode.CommonChainConfigTOML + chainConfigByChain := clNode.ChainConfigTOMLByChain + if nodeConfig == "" { + nodeConfig = testInputs.EnvInput.NewCLCluster.Common.BaseConfigTOML + } + if commonChainConfig == "" { + commonChainConfig = testInputs.EnvInput.NewCLCluster.Common.CommonChainConfigTOML + } + if chainConfigByChain == nil { + chainConfigByChain = testInputs.EnvInput.NewCLCluster.Common.ChainConfigTOMLByChain + } + + _, tomlStr, err := setNodeConfig(nets, nodeConfig, commonChainConfig, chainConfigByChain) + require.NoError(t, err) + nodesMap = append(nodesMap, map[string]any{ + "name": clNode.Name, + "chainlink": map[string]any{ + "image": map[string]any{ + "image": pointer.GetString(clNode.ChainlinkImage.Image), + "version": pointer.GetString(clNode.ChainlinkImage.Version), + }, + }, + "db": map[string]any{ + "image": map[string]any{ + "image": clNode.DBImage, + "version": clNode.DBTag, + }, + "storageClassName": "gp3", + }, + "toml": tomlStr, + }) + } + clProps["nodes"] = nodesMap + return chainlink.New(0, clProps) + } + clProps["replicas"] = pointer.GetInt(testInputs.EnvInput.NewCLCluster.NoOfNodes) + _, tomlStr, err := setNodeConfig( + nets, + testInputs.EnvInput.NewCLCluster.Common.BaseConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.CommonChainConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.ChainConfigTOMLByChain, + ) + require.NoError(t, err) + clProps["toml"] = tomlStr + return chainlink.New(0, clProps) +} + +func DeployLocalCluster( + t *testing.T, + testInputs *CCIPTestConfig, +) (*test_env.CLClusterTestEnv, func() error) { + selectedNetworks := testInputs.SelectedNetworks + + privateEthereumNetworks := []*ctf_config.EthereumNetworkConfig{} + for _, network := range testInputs.EnvInput.PrivateEthereumNetworks { + privateEthereumNetworks = append(privateEthereumNetworks, network) + + for _, networkCfg := range networks.MustGetSelectedNetworkConfig(testInputs.EnvInput.Network) { + for _, key := range networkCfg.PrivateKeys { + address, err := conversions.PrivateKeyHexToAddress(key) + require.NoError(t, err, "failed to convert private key to address: %w", err) + network.EthereumChainConfig.AddressesToFund = append( + network.EthereumChainConfig.AddressesToFund, address.Hex(), + ) + } + } + } + + if len(selectedNetworks) > len(privateEthereumNetworks) { + seen := make(map[int64]bool) + missing := []blockchain.EVMNetwork{} + + for _, network := range privateEthereumNetworks { + seen[int64(network.EthereumChainConfig.ChainID)] = true + } + + for _, network := range selectedNetworks { + if !seen[network.ChainID] { + missing = append(missing, network) + } + } + + for _, network := range missing { + chainConfig := &ctf_config.EthereumChainConfig{} + err := chainConfig.Default() + if err != nil { + require.NoError(t, err, "failed to get default chain config: %w", err) + } else { + chainConfig.ChainID = int(network.ChainID) + eth1 := ctf_config_types.EthereumVersion_Eth1 + geth := ctf_config_types.ExecutionLayer_Geth + + privateEthereumNetworks = append(privateEthereumNetworks, &ctf_config.EthereumNetworkConfig{ + EthereumVersion: ð1, + ExecutionLayer: &geth, + EthereumChainConfig: chainConfig, + }) + } + } + + require.Equal(t, len(selectedNetworks), len(privateEthereumNetworks), "failed to create undefined selected networks. Maybe some of them had the same chain ids?") + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestConfig(testInputs.EnvInput). + WithTestInstance(t). + WithPrivateEthereumNetworks(privateEthereumNetworks). + WithMockAdapter(). + WithoutCleanup(). + Build() + require.NoError(t, err) + // the builder builds network with a static network config, we don't want that. + env.EVMNetworks = []*blockchain.EVMNetwork{} + for i, networkCfg := range selectedNetworks { + rpcProvider, err := env.GetRpcProvider(networkCfg.ChainID) + require.NoError(t, err, "Error getting rpc provider") + selectedNetworks[i].URLs = rpcProvider.PrivateWsUrsl() + selectedNetworks[i].HTTPURLs = rpcProvider.PrivateHttpUrls() + newNetwork := networkCfg + newNetwork.URLs = rpcProvider.PublicWsUrls() + newNetwork.HTTPURLs = rpcProvider.PublicHttpUrls() + env.EVMNetworks = append(env.EVMNetworks, &newNetwork) + } + testInputs.SelectedNetworks = selectedNetworks + + // a func to start the CL nodes asynchronously + deployCL := func() error { + noOfNodes := pointer.GetInt(testInputs.EnvInput.NewCLCluster.NoOfNodes) + // if individual nodes are specified, then deploy them with specified configs + if len(testInputs.EnvInput.NewCLCluster.Nodes) > 0 { + for _, clNode := range testInputs.EnvInput.NewCLCluster.Nodes { + toml, _, err := setNodeConfig( + selectedNetworks, + clNode.BaseConfigTOML, + clNode.CommonChainConfigTOML, + clNode.ChainConfigTOMLByChain, + ) + if err != nil { + return err + } + ccipNode, err := test_env.NewClNode( + []string{env.DockerNetwork.Name}, + pointer.GetString(clNode.ChainlinkImage.Image), + pointer.GetString(clNode.ChainlinkImage.Version), + toml, + env.LogStream, + test_env.WithPgDBOptions( + ctftestenv.WithPostgresImageName(clNode.DBImage), + ctftestenv.WithPostgresImageVersion(clNode.DBTag), + ), + ) + if err != nil { + return err + } + ccipNode.SetTestLogger(t) + env.ClCluster.Nodes = append(env.ClCluster.Nodes, ccipNode) + } + } else { + // if no individual nodes are specified, then deploy the number of nodes specified in the env input with common config + for i := 0; i < noOfNodes; i++ { + toml, _, err := setNodeConfig( + selectedNetworks, + testInputs.EnvInput.NewCLCluster.Common.BaseConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.CommonChainConfigTOML, + testInputs.EnvInput.NewCLCluster.Common.ChainConfigTOMLByChain, + ) + if err != nil { + return err + } + ccipNode, err := test_env.NewClNode( + []string{env.DockerNetwork.Name}, + pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Image), + pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkImage.Version), + toml, + env.LogStream, + test_env.WithPgDBOptions( + ctftestenv.WithPostgresImageName(testInputs.EnvInput.NewCLCluster.Common.DBImage), + ctftestenv.WithPostgresImageVersion(testInputs.EnvInput.NewCLCluster.Common.DBTag), + ), + ) + if err != nil { + return err + } + ccipNode.SetTestLogger(t) + env.ClCluster.Nodes = append(env.ClCluster.Nodes, ccipNode) + } + } + return env.ClCluster.Start() + } + return env, deployCL +} + +// UpgradeNodes restarts chainlink nodes in the given range with upgrade image +// startIndex and endIndex are inclusive +func UpgradeNodes( + t *testing.T, + lggr *zerolog.Logger, + testInputs *CCIPTestConfig, + ccipEnv *actions.CCIPTestEnv, +) error { + lggr.Info(). + Msg("Upgrading node version") + // if the test is running on local docker + if pointer.GetBool(testInputs.TestGroupInput.LocalCluster) { + env := ccipEnv.LocalCluster + for i, clNode := range env.ClCluster.Nodes { + upgradeImage := pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkUpgradeImage.Image) + upgradeTag := pointer.GetString(testInputs.EnvInput.NewCLCluster.Common.ChainlinkUpgradeImage.Version) + // if individual node upgrade image is provided, use that + if len(testInputs.EnvInput.NewCLCluster.Nodes) > 0 { + if i < len(testInputs.EnvInput.NewCLCluster.Nodes) { + upgradeImage = pointer.GetString(testInputs.EnvInput.NewCLCluster.Nodes[i].ChainlinkUpgradeImage.Image) + upgradeTag = pointer.GetString(testInputs.EnvInput.NewCLCluster.Nodes[i].ChainlinkUpgradeImage.Version) + } + } + if upgradeImage == "" || upgradeTag == "" { + continue + } + err := clNode.UpgradeVersion(upgradeImage, upgradeTag) + if err != nil { + return err + } + } + } else { + // if the test is running on k8s + k8Env := ccipEnv.K8Env + if k8Env == nil { + return errors.New("k8s environment is nil, cannot restart nodes") + } + props, noOfNodesToUpgrade := ChainlinkPropsForUpdate(t, testInputs) + chartName := ccipEnv.CLNodes[0].ChartName + // explicitly set the env var into false to allow manifest update + // if tests are run in remote runner, it might be set to true to disable manifest update + err := os.Setenv(k8config.EnvVarSkipManifestUpdate, "false") + if err != nil { + return err + } + k8Env.Cfg.SkipManifestUpdate = false + lggr.Info(). + Str("Chart Name", chartName). + Interface("Upgrade Details", props). + Msg("Upgrading Chainlink Node") + k8Env, err = k8Env.UpdateHelm(chartName, props) + if err != nil { + return err + } + err = k8Env.RunUpdated(noOfNodesToUpgrade) + // Run the new environment and wait for changes to show + if err != nil { + return err + } + } + return nil +} + +// DeployEnvironments deploys K8 env for CCIP tests. For tests running on simulated geth it deploys - +// 1. two simulated geth network in non-dev mode +// 2. mockserver ( to set mock price feed details) +// 3. chainlink nodes +func DeployEnvironments( + t *testing.T, + envconfig *environment.Config, + testInputs *CCIPTestConfig, +) *environment.Environment { + selectedNetworks := testInputs.SelectedNetworks + testEnvironment := environment.New(envconfig) + numOfTxNodes := 1 + var charts []string + for i, network := range selectedNetworks { + if testInputs.EnvInput.Network.AnvilConfigs != nil { + // if anvilconfig is specified for a network addhelm for anvil + if anvilConfig, exists := testInputs.EnvInput.Network.AnvilConfigs[strings.ToUpper(network.Name)]; exists { + charts = append(charts, foundry.ChartName) + if anvilConfig.BaseFee == nil { + anvilConfig.BaseFee = pointer.ToInt64(1000000) + } + if anvilConfig.BlockGaslimit == nil { + anvilConfig.BlockGaslimit = pointer.ToInt64(100000000) + } + testEnvironment. + AddHelm(foundry.NewVersioned("0.2.1", &foundry.Props{ + NetworkName: network.Name, + Values: map[string]interface{}{ + "fullnameOverride": actions.NetworkName(network.Name), + "image": map[string]interface{}{ + "repository": "ghcr.io/foundry-rs/foundry", + "tag": "nightly-5ac78a9cd4b94dc53d1fe5e0f42372b28b5a7559", + // "tag": "nightly-ea2eff95b5c17edd3ffbdfc6daab5ce5cc80afc0", + }, + "anvil": map[string]interface{}{ + "chainId": fmt.Sprintf("%d", network.ChainID), + "blockTime": anvilConfig.BlockTime, + "forkURL": anvilConfig.URL, + "forkBlockNumber": anvilConfig.BlockNumber, + "forkRetries": anvilConfig.Retries, + "forkTimeout": anvilConfig.Timeout, + "forkComputeUnitsPerSecond": anvilConfig.ComputePerSecond, + "forkNoRateLimit": anvilConfig.RateLimitDisabled, + "blocksToKeepInMemory": anvilConfig.BlocksToKeepInMem, + "blockGasLimit": fmt.Sprintf("%d", pointer.GetInt64(anvilConfig.BlockGaslimit)), + "baseFee": fmt.Sprintf("%d", pointer.GetInt64(anvilConfig.BaseFee)), + }, + "resources": GethResourceProfile, + "cache": map[string]interface{}{ + "capacity": "150Gi", + }, + }, + })) + selectedNetworks[i].Simulated = true + actions.NetworkChart = foundry.ChartName + continue + } + } + + if !network.Simulated { + charts = append(charts, "") + continue + } + charts = append(charts, strings.ReplaceAll(strings.ToLower(network.Name), " ", "-")) + testEnvironment. + AddHelm(reorg.New(&reorg.Props{ + NetworkName: network.Name, + NetworkType: "simulated-geth-non-dev", + Values: map[string]interface{}{ + "geth": map[string]interface{}{ + "genesis": map[string]interface{}{ + "networkId": fmt.Sprint(network.ChainID), + }, + "tx": map[string]interface{}{ + "replicas": strconv.Itoa(numOfTxNodes), + "resources": testInputs.GethResourceProfile, + }, + "miner": map[string]interface{}{ + "replicas": "0", + "resources": testInputs.GethResourceProfile, + }, + }, + "bootnode": map[string]interface{}{ + "replicas": "1", + }, + }, + })) + } + if pointer.GetBool(testInputs.TestGroupInput.USDCMockDeployment) || + pointer.GetBool(testInputs.TestGroupInput.TokenConfig.WithPipeline) { + testEnvironment. + AddHelm(mockservercfg.New(nil)). + AddHelm(mockserver.New(nil)) + } + err := testEnvironment.Run() + require.NoError(t, err) + + if testEnvironment.WillUseRemoteRunner() { + return testEnvironment + } + urlFinder := func(network blockchain.EVMNetwork, chart string) ([]string, []string) { + if !network.Simulated { + return network.URLs, network.HTTPURLs + } + networkName := actions.NetworkName(network.Name) + var internalWsURLs, internalHttpURLs []string + switch chart { + case foundry.ChartName: + internalWsURLs = append(internalWsURLs, fmt.Sprintf("ws://%s:8545", networkName)) + internalHttpURLs = append(internalHttpURLs, fmt.Sprintf("http://%s:8545", networkName)) + case networkName: + for i := 0; i < numOfTxNodes; i++ { + internalWsURLs = append(internalWsURLs, fmt.Sprintf("ws://%s-ethereum-geth:8546", networkName)) + internalHttpURLs = append(internalHttpURLs, fmt.Sprintf("http://%s-ethereum-geth:8544", networkName)) + } + default: + return network.URLs, network.HTTPURLs + } + + return internalWsURLs, internalHttpURLs + } + var nets []blockchain.EVMNetwork + for i := range selectedNetworks { + nets = append(nets, selectedNetworks[i]) + nets[i].URLs, nets[i].HTTPURLs = urlFinder(selectedNetworks[i], charts[i]) + } + + err = testEnvironment. + AddHelm(ChainlinkChart(t, testInputs, nets)). + Run() + require.NoError(t, err) + return testEnvironment +} diff --git a/integration-tests/ccip-tests/types/config/node/core.go b/integration-tests/ccip-tests/types/config/node/core.go new file mode 100644 index 00000000000..eb12598f948 --- /dev/null +++ b/integration-tests/ccip-tests/types/config/node/core.go @@ -0,0 +1,67 @@ +package node + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/smartcontractkit/chainlink-testing-framework/blockchain" + "github.com/smartcontractkit/chainlink-testing-framework/utils/ptr" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink/integration-tests/types/config/node" + itutils "github.com/smartcontractkit/chainlink/integration-tests/utils" + evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" +) + +func NewConfigFromToml(tomlConfig []byte, opts ...node.NodeConfigOpt) (*chainlink.Config, error) { + var cfg chainlink.Config + err := config.DecodeTOML(bytes.NewReader(tomlConfig), &cfg) + if err != nil { + return nil, err + } + for _, opt := range opts { + opt(&cfg) + } + return &cfg, nil +} + +func WithPrivateEVMs(networks []blockchain.EVMNetwork, commonChainConfig *evmcfg.Chain, chainSpecificConfig map[int64]evmcfg.Chain) node.NodeConfigOpt { + var evmConfigs []*evmcfg.EVMConfig + for _, network := range networks { + var evmNodes []*evmcfg.Node + for i := range network.URLs { + evmNodes = append(evmNodes, &evmcfg.Node{ + Name: ptr.Ptr(fmt.Sprintf("%s-%d", network.Name, i)), + WSURL: itutils.MustURL(network.URLs[i]), + HTTPURL: itutils.MustURL(network.HTTPURLs[i]), + }) + } + evmConfig := &evmcfg.EVMConfig{ + ChainID: ubig.New(big.NewInt(network.ChainID)), + Nodes: evmNodes, + Chain: evmcfg.Chain{}, + } + if commonChainConfig != nil { + evmConfig.Chain = *commonChainConfig + } + if chainSpecificConfig == nil { + if overriddenChainCfg, ok := chainSpecificConfig[network.ChainID]; ok { + evmConfig.Chain = overriddenChainCfg + } + } + if evmConfig.Chain.FinalityDepth == nil && network.FinalityDepth > 0 { + evmConfig.Chain.FinalityDepth = ptr.Ptr(uint32(network.FinalityDepth)) + } + if evmConfig.Chain.FinalityTagEnabled == nil && network.FinalityTag { + evmConfig.Chain.FinalityTagEnabled = ptr.Ptr(network.FinalityTag) + } + evmConfigs = append(evmConfigs, evmConfig) + } + return func(c *chainlink.Config) { + c.EVM = evmConfigs + } +} diff --git a/integration-tests/ccip-tests/utils/common.go b/integration-tests/ccip-tests/utils/common.go new file mode 100644 index 00000000000..afa8158e450 --- /dev/null +++ b/integration-tests/ccip-tests/utils/common.go @@ -0,0 +1,32 @@ +package utils + +import ( + "path/filepath" + "runtime" + "sync" +) + +func ProjectRoot() string { + _, b, _, _ := runtime.Caller(0) + return filepath.Join(filepath.Dir(b), "/..") +} + +// DeleteNilEntriesFromMap checks for nil entry in map, store all not-nil entries to another map and deallocates previous map +// Deleting keys from a map actually does not delete the key, It just sets the corresponding value to nil. +func DeleteNilEntriesFromMap(inputMap *sync.Map) *sync.Map { + newMap := &sync.Map{} + foundNil := false + inputMap.Range(func(key, value any) bool { + if value != nil { + newMap.Store(key, value) + } + if value == nil { + foundNil = true + } + return true + }) + if foundNil { + runtime.GC() + } + return newMap +} diff --git a/integration-tests/ccip-tests/utils/fileutil.go b/integration-tests/ccip-tests/utils/fileutil.go new file mode 100644 index 00000000000..43e048e1f6b --- /dev/null +++ b/integration-tests/ccip-tests/utils/fileutil.go @@ -0,0 +1,63 @@ +package utils + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/rs/zerolog/log" +) + +// FilesWithRegex returns all filepaths under root folder matching with regex pattern +func FilesWithRegex(root, pattern string) ([]string, error) { + r, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + var filenames []string + err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Check if the file matches the regex pattern + if !info.IsDir() && r.MatchString(info.Name()) { + filenames = append(filenames, path) + } + + return nil + }) + return filenames, err +} + +func FileNameFromPath(path string) string { + if !strings.Contains(path, "/") { + return path + } + return strings.Split(path, "/")[len(strings.Split(path, "/"))-1] +} + +// FirstFileFromMatchingPath formats the given filepathWithPattern with actual file path +// if filepathWithPattern is provided with a regex expression it returns the first filepath +// matching with the regex. +// if there is no regex provided in filepathWithPattern it just returns the provided filepath +func FirstFileFromMatchingPath(filepathWithPattern string) (string, error) { + filename := FileNameFromPath(filepathWithPattern) + if strings.Contains(filepathWithPattern, "/") { + rootFolder := strings.Split(filepathWithPattern, filename)[0] + allFiles, err := FilesWithRegex(rootFolder, filename) + if err != nil { + return "", fmt.Errorf("error trying to find file %s:%w", filepathWithPattern, err) + } + if len(allFiles) == 0 { + return "", fmt.Errorf("error trying to find file %s", filepathWithPattern) + } + if len(allFiles) > 1 { + log.Warn().Str("path", filepathWithPattern).Msg("more than one contract config files found in location, using the first one") + } + return allFiles[0], nil + } + return filepathWithPattern, nil +} diff --git a/integration-tests/go.mod b/integration-tests/go.mod index a7783f7daa8..16482effa47 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -6,6 +6,9 @@ go 1.22.5 replace github.com/smartcontractkit/chainlink/v2 => ../ require ( + dario.cat/mergo v1.0.0 + github.com/AlekSi/pointer v1.1.0 + github.com/Masterminds/semver/v3 v3.2.1 github.com/avast/retry-go/v4 v4.6.0 github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df github.com/chaos-mesh/chaos-mesh/api v0.0.0-20240709130330-9f4feec7553f @@ -22,11 +25,13 @@ require ( github.com/onsi/gomega v1.33.1 github.com/pelletier/go-toml/v2 v2.2.2 github.com/pkg/errors v0.9.1 + github.com/prometheus/common v0.55.0 github.com/rs/zerolog v1.31.0 github.com/scylladb/go-reflectx v1.0.1 github.com/segmentio/ksuid v1.0.4 github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.12.2 + github.com/smartcontractkit/chain-selectors v1.0.21 github.com/smartcontractkit/chainlink-automation v1.0.4 github.com/smartcontractkit/chainlink-common v0.2.2-0.20240808143317-6b16fc28887d github.com/smartcontractkit/chainlink-testing-framework v1.34.2 @@ -41,7 +46,11 @@ require ( github.com/test-go/testify v1.1.4 github.com/testcontainers/testcontainers-go v0.28.0 github.com/umbracle/ethgo v0.1.3 + go.uber.org/atomic v1.11.0 + go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.25.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/sync v0.7.0 golang.org/x/text v0.16.0 gopkg.in/guregu/null.v4 v4.0.0 @@ -60,7 +69,6 @@ require ( cosmossdk.io/depinject v1.0.0-alpha.3 // indirect cosmossdk.io/errors v1.0.0 // indirect cosmossdk.io/math v1.0.1 // indirect - dario.cat/mergo v1.0.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.1 // indirect @@ -78,7 +86,6 @@ require ( github.com/K-Phoen/sdk v0.12.4 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect @@ -270,6 +277,7 @@ require ( github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/skiplist v1.2.0 // indirect @@ -358,7 +366,6 @@ require ( github.com/prometheus/alertmanager v0.26.0 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -376,7 +383,6 @@ require ( github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/smartcontractkit/chain-selectors v1.0.10 // indirect github.com/smartcontractkit/chainlink-ccip v0.0.0-20240806144315-04ac101e9c95 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 // indirect github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect @@ -442,14 +448,10 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect - go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 411b3ddd46b..99de85d8775 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1484,8 +1484,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ= github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= -github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.21 h1:KCR9SA7PhOexaBzFieHoLv1WonwhVOPtOStpqTmLC4E= +github.com/smartcontractkit/chain-selectors v1.0.21/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= github.com/smartcontractkit/chainlink-ccip v0.0.0-20240806144315-04ac101e9c95 h1:LAgJTg9Yr/uCo2g7Krp88Dco2U45Y6sbJVl8uKoLkys= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 4a15b97abfd..0a65245f439 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -370,7 +370,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/smartcontractkit/chain-selectors v1.0.10 // indirect + github.com/smartcontractkit/chain-selectors v1.0.21 // indirect github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f // indirect github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 // indirect github.com/smartcontractkit/chainlink-solana v1.1.1-0.20240806154405-8e5684f98564 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 625da73ba03..22286e59333 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1466,8 +1466,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ= github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= -github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/smartcontractkit/chain-selectors v1.0.21 h1:KCR9SA7PhOexaBzFieHoLv1WonwhVOPtOStpqTmLC4E= +github.com/smartcontractkit/chain-selectors v1.0.21/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= github.com/smartcontractkit/chainlink-ccip v0.0.0-20240806144315-04ac101e9c95 h1:LAgJTg9Yr/uCo2g7Krp88Dco2U45Y6sbJVl8uKoLkys=