diff --git a/.github/actions/build-chainlink-image/action.yml b/.github/actions/build-chainlink-image/action.yml
new file mode 100644
index 00000000000..ac29a3d7b8d
--- /dev/null
+++ b/.github/actions/build-chainlink-image/action.yml
@@ -0,0 +1,48 @@
+name: Build Chainlink Image
+description: A composite action that allows building and publishing the Chainlink image for integration testing
+
+inputs:
+ tag_suffix:
+ description: The suffix to append to the image tag (usually blank or "-plugins")
+ default: ""
+ dockerfile:
+ description: The path to the Dockerfile to use (usually core/chainlink.Dockerfile or plugins/chainlink.Dockerfile)
+ default: core/chainlink.Dockerfile
+ git_commit_sha:
+ description: The git commit sha to use for the image tag
+ default: ${{ github.sha }}
+ GRAFANA_CLOUD_BASIC_AUTH:
+ description: "grafana cloud basic auth"
+ GRAFANA_CLOUD_HOST:
+ description: "grafana cloud hostname"
+ AWS_REGION:
+ description: "AWS region to use for ECR"
+ AWS_ROLE_TO_ASSUME:
+ description: "AWS role to assume for ECR"
+
+runs:
+ using: composite
+ steps:
+ - name: Check if image exists
+ id: check-image
+ uses: smartcontractkit/chainlink-github-actions/docker/image-exists@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ with:
+ repository: chainlink
+ tag: ${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }}
+ AWS_REGION: ${{ inputs.AWS_REGION }}
+ AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }}
+ - name: Build Image
+ if: steps.check-image.outputs.exists == 'false'
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ with:
+ cl_repo: smartcontractkit/chainlink
+ cl_ref: ${{ inputs.git_commit_sha }}
+ cl_dockerfile: ${{ inputs.dockerfile }}
+ push_tag: ${{ env.CHAINLINK_IMAGE }}:${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }}
+ QA_AWS_REGION: ${{ inputs.AWS_REGION }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }}
+ - name: Print Chainlink Image Built
+ shell: sh
+ run: |
+ echo "### Chainlink node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY
+ echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY
diff --git a/.github/actions/build-sign-publish-chainlink/action.yml b/.github/actions/build-sign-publish-chainlink/action.yml
index 55c682bc8d9..62add53092a 100644
--- a/.github/actions/build-sign-publish-chainlink/action.yml
+++ b/.github/actions/build-sign-publish-chainlink/action.yml
@@ -13,6 +13,12 @@ inputs:
description: Path to the Dockerfile (relative to the repo root)
default: core/chainlink.Dockerfile
required: false
+ dockerhub_username:
+ description: Username for Docker Hub to avoid rate limits when pulling public images
+ required: false
+ dockerhub_password:
+ description: Password for Docker Hub to avoid rate limits when pulling public images
+ required: false
ecr-hostname:
description: The ECR registry scope
default: public.ecr.aws
@@ -126,6 +132,14 @@ runs:
type=semver,pattern={{version}},suffix=${{ inputs.ecr-tag-suffix }}-root
type=sha,format=short,suffix=${{ inputs.ecr-tag-suffix }}-root
+ # To avoid rate limiting from Docker Hub, we login with a paid user account.
+ - name: Login to Docker Hub
+ if: inputs.dockerhub_username && inputs.dockerhub_password
+ uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
+ with:
+ username: ${{ inputs.dockerhub_username }}
+ password: ${{ inputs.dockerhub_password }}
+
- name: Build and push root docker image
id: buildpush-root
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0
@@ -159,6 +173,14 @@ runs:
images: ${{ env.shared-images }}
tags: ${{ env.shared-tag-list }}
+ # To avoid rate limiting from Docker Hub, we login with a paid user account.
+ - name: Login to Docker Hub
+ if: inputs.dockerhub_username && inputs.dockerhub_password
+ uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
+ with:
+ username: ${{ inputs.dockerhub_username }}
+ password: ${{ inputs.dockerhub_password }}
+
- name: Build and push non-root docker image
id: buildpush-nonroot
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0
@@ -201,7 +223,7 @@ runs:
- if: inputs.sign-images == 'true'
name: Install cosign
- uses: sigstore/cosign-installer@581838fbedd492d2350a9ecd427a95d6de1e5d01 # v2.1.0
+ uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
with:
cosign-release: "v1.6.0"
diff --git a/.github/actions/build-test-image/action.yml b/.github/actions/build-test-image/action.yml
index c4b39b4d7af..a241f51d920 100644
--- a/.github/actions/build-test-image/action.yml
+++ b/.github/actions/build-test-image/action.yml
@@ -30,6 +30,13 @@ inputs:
runs:
using: composite
steps:
+ - name: Get CTF Version
+ id: version
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@e865e376b8c2d594028c8d645dd6c47169b72974 # v2.2.16
+ with:
+ go-project-path: ./integration-tests
+ module-name: github.com/smartcontractkit/chainlink-testing-framework
+ enforce-semantic-tag: "true" # it has to be in the form of v1.2.3 or the image won't exist
- name: Check if image exists
id: check-image
uses: smartcontractkit/chainlink-github-actions/docker/image-exists@00c6214deb10a3f374c6d3430c32c5202015d463 # v2.2.12
@@ -48,7 +55,7 @@ runs:
file: ./integration-tests/test.Dockerfile
build-args: |
BASE_IMAGE=${{ inputs.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ inputs.QA_AWS_REGION }}.amazonaws.com/test-base-image
- IMAGE_VERSION=v0.38.2
+ IMAGE_VERSION=${{ steps.version.outputs.version }}
SUITES="${{ inputs.suites }}"
AWS_REGION: ${{ inputs.QA_AWS_REGION }}
AWS_ROLE_TO_ASSUME: ${{ inputs.QA_AWS_ROLE_TO_ASSUME }}
diff --git a/.github/actions/golangci-lint/action.yml b/.github/actions/golangci-lint/action.yml
index 97755fa46ea..055960ff282 100644
--- a/.github/actions/golangci-lint/action.yml
+++ b/.github/actions/golangci-lint/action.yml
@@ -53,7 +53,7 @@ runs:
- name: golangci-lint
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
with:
- version: v1.55.0
+ version: v1.55.2
# We already cache these directories in setup-go
skip-pkg-cache: true
skip-build-cache: true
diff --git a/.github/actions/goreleaser-build-sign-publish/action.yml b/.github/actions/goreleaser-build-sign-publish/action.yml
index 845d2443fc1..b2d42c1234e 100644
--- a/.github/actions/goreleaser-build-sign-publish/action.yml
+++ b/.github/actions/goreleaser-build-sign-publish/action.yml
@@ -84,7 +84,7 @@ runs:
version: ${{ inputs.zig-version }}
- name: Setup cosign
if: inputs.enable-cosign == 'true'
- uses: sigstore/cosign-installer@581838fbedd492d2350a9ecd427a95d6de1e5d01 # v2.1.0
+ uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
with:
cosign-release: ${{ inputs.cosign-version }}
- name: Login to docker registry
diff --git a/.github/workflows/automation-benchmark-tests.yml b/.github/workflows/automation-benchmark-tests.yml
index f23102f1ee6..a4338d642bc 100644
--- a/.github/workflows/automation-benchmark-tests.yml
+++ b/.github/workflows/automation-benchmark-tests.yml
@@ -24,6 +24,8 @@ on:
- OPTIMISM_GOERLI
- MUMBAI
- SEPOLIA
+ - BASE_GOERLI
+ - ARBITRUM_SEPOLIA
TestInputs:
description: TestInputs
required: false
@@ -55,7 +57,7 @@ jobs:
id-token: write
contents: read
name: ${{ inputs.network }} Automation Benchmark Test
- runs-on: ubuntu-latest
+ runs-on: ubuntu20.04-16cores-64GB
env:
SELECTED_NETWORKS: ${{ inputs.network }}
SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }}
@@ -107,7 +109,7 @@ jobs:
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
env:
DETACH_RUNNER: true
TEST_SUITE: benchmark
diff --git a/.github/workflows/automation-ondemand-tests.yml b/.github/workflows/automation-ondemand-tests.yml
index ac0e34e0834..88c2c126dc6 100644
--- a/.github/workflows/automation-ondemand-tests.yml
+++ b/.github/workflows/automation-ondemand-tests.yml
@@ -172,7 +172,7 @@ jobs:
echo "version=${{ inputs.chainlinkVersionUpdate }}" >>$GITHUB_OUTPUT
fi
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
env:
PYROSCOPE_SERVER: ${{ matrix.tests.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725
PYROSCOPE_ENVIRONMENT: ${{ matrix.tests.pyroscope_env }}
@@ -182,7 +182,7 @@ jobs:
UPGRADE_VERSION: ${{ steps.determine-build.outputs.upgrade_version }}
UPGRADE_IMAGE: ${{ steps.determine-build.outputs.upgrade_image }}
with:
- test_command_to_run: make test_need_operator_assets && cd ./integration-tests && go test -timeout 60m -count=1 -json -test.parallel=${{ matrix.tests.nodes }} ${{ matrix.tests.command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_command_to_run: cd ./integration-tests && go test -timeout 60m -count=1 -json -test.parallel=${{ matrix.tests.nodes }} ${{ matrix.tests.command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: cd ./integration-tests && go mod download
cl_repo: ${{ steps.determine-build.outputs.image }}
cl_image_tag: ${{ steps.determine-build.outputs.version }}
diff --git a/.github/workflows/build-publish-develop.yml b/.github/workflows/build-publish-develop.yml
index 076fdf817df..b8859722378 100644
--- a/.github/workflows/build-publish-develop.yml
+++ b/.github/workflows/build-publish-develop.yml
@@ -52,6 +52,8 @@ jobs:
ecr-image-name: chainlink
ecr-tag-suffix: ${{ matrix.image.tag-suffix }}
dockerfile: ${{ matrix.image.dockerfile }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
git-commit-sha: ${{ steps.git-ref.outputs.checked-out || github.sha }}
- name: Collect Metrics
if: always()
diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml
index 4d5a42a369f..1bda6957a2a 100644
--- a/.github/workflows/build-publish.yml
+++ b/.github/workflows/build-publish.yml
@@ -1,17 +1,17 @@
-name: 'Build Chainlink and Publish'
+name: "Build Chainlink and Publish"
on:
# Mimics old circleci behaviour
push:
tags:
- - 'v*'
+ - "v*"
branches:
- master
- - 'release/**'
+ - "release/**"
jobs:
checks:
- name: 'Checks'
+ name: "Checks"
runs-on: ubuntu-20.04
steps:
- name: Checkout repository
@@ -42,10 +42,12 @@ jobs:
aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }}
aws-region: ${{ secrets.AWS_REGION }}
sign-images: true
- sign-method: 'keypair'
+ sign-method: "keypair"
cosign-private-key: ${{ secrets.COSIGN_PRIVATE_KEY }}
cosign-public-key: ${{ secrets.COSIGN_PUBLIC_KEY }}
cosign-password: ${{ secrets.COSIGN_PASSWORD }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
verify-signature: true
- name: Collect Metrics
if: always()
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 0f9a8ea8b35..6282e2168d8 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,4 +1,4 @@
-name: 'Build Chainlink'
+name: "Build Chainlink"
on:
pull_request:
@@ -7,7 +7,6 @@ on:
- master
jobs:
-
build-chainlink:
runs-on: ubuntu-20.04
steps:
@@ -17,6 +16,8 @@ jobs:
- name: Build chainlink image
uses: ./.github/actions/build-sign-publish-chainlink
with:
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
publish: false
sign-images: false
- name: Collect Metrics
diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml
index 40535855906..d0bae664801 100644
--- a/.github/workflows/ci-core.yml
+++ b/.github/workflows/ci-core.yml
@@ -166,6 +166,7 @@ jobs:
GITHUB_EVENT_PATH: ${{ github.event_path }}
GITHUB_EVENT_NAME: ${{ github.event_name }}
GITHUB_REPO: ${{ github.repository }}
+ GITHUB_RUN_ID: ${{ github.run_id }}
run: |
./runner \
-grafana_auth=$GRAFANA_CLOUD_BASIC_AUTH \
@@ -173,6 +174,7 @@ jobs:
-gh_sha=$GITHUB_SHA \
-gh_event_path=$GITHUB_EVENT_PATH \
-gh_event_name=$GITHUB_EVENT_NAME \
+ -gh_run_id=$GITHUB_RUN_ID \
-gh_repo=$GITHUB_REPO \
-command=./tools/bin/go_core_tests \
`ls -R ./artifacts/go_core_tests*/output.txt`
diff --git a/.github/workflows/integration-chaos-tests.yml b/.github/workflows/integration-chaos-tests.yml
index 648d5f9daa3..892a43e76f0 100644
--- a/.github/workflows/integration-chaos-tests.yml
+++ b/.github/workflows/integration-chaos-tests.yml
@@ -109,9 +109,9 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
- test_command_to_run: make test_need_operator_assets && cd integration-tests && go test -timeout 1h -count=1 -json -test.parallel 11 ./chaos 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_command_to_run: cd integration-tests && go test -timeout 1h -count=1 -json -test.parallel 11 ./chaos 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: cd ./integration-tests && go mod download
cl_repo: ${{ env.CHAINLINK_IMAGE }}
cl_image_tag: ${{ github.sha }}
diff --git a/.github/workflows/integration-tests-publish.yml b/.github/workflows/integration-tests-publish.yml
index 60f67f03574..a66ea612281 100644
--- a/.github/workflows/integration-tests-publish.yml
+++ b/.github/workflows/integration-tests-publish.yml
@@ -37,3 +37,11 @@ jobs:
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ - name: Notify Slack
+ if: failure()
+ uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
+ env:
+ SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
+ with:
+ channel-id: "#team-test-tooling-internal"
+ slack-message: ":x: :mild-panic-intensifies: Publish Integration Test Image failed: ${{ job.html_url }}\n${{ format('https://github.com/smartcontractkit/chainlink/actions/runs/{0}', github.run_id) }}"
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index 5074fc35b9a..9294dceae6d 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -2,19 +2,10 @@ name: Integration Tests
on:
merge_group:
pull_request:
- schedule:
- - cron: "0 0 * * *"
- # - cron: "0 * * * *" # DEBUG: Run every hour to nail down flakes
push:
tags:
- "*"
workflow_dispatch:
- inputs:
- liveNetwork:
- description: "Run Live Testnet Tests"
- required: false
- type: boolean
-
# Only run 1 of this workflow at a time per PR
concurrency:
@@ -31,6 +22,18 @@ env:
MOD_CACHE_VERSION: 2
jobs:
+ enforce-ctf-version:
+ name: Enforce CTF Version
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - name: Enforce CTF Version
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/mod-version@e865e376b8c2d594028c8d645dd6c47169b72974 # v2.2.16
+ with:
+ go-project-path: ./integration-tests
+ module-name: github.com/smartcontractkit/chainlink-testing-framework
+ enforce-semantic-tag: "true"
changes:
environment: integration
name: Check Paths That Require Tests To Run
@@ -60,6 +63,37 @@ jobs:
continue-on-error: true
outputs:
src: ${{ steps.changes.outputs.src }}
+
+ build-lint-integration-tests:
+ name: Build and Lint integration-tests
+ runs-on: ubuntu20.04-16cores-64GB
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - name: Setup Go
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-go@e865e376b8c2d594028c8d645dd6c47169b72974 # v2.2.16
+ with:
+ test_download_vendor_packages_command: cd ./integration-tests && go mod download
+ go_mod_path: ./integration-tests/go.mod
+ cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
+ cache_restore_only: "true"
+ - name: Build Go
+ run: |
+ cd ./integration-tests
+ go build ./...
+ SELECTED_NETWORKS=SIMULATED go test -run=^# ./...
+ - name: Lint Go
+ uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
+ with:
+ version: v1.55.2
+ # We already cache these directories in setup-go
+ skip-pkg-cache: true
+ skip-build-cache: true
+ # only-new-issues is only applicable to PRs, otherwise it is always set to false
+ only-new-issues: false # disabled for PRs due to unreliability
+ args: --out-format colored-line-number,checkstyle:golangci-lint-report.xml
+ working-directory: ./integration-tests
+
build-chainlink:
environment: integration
permissions:
@@ -76,7 +110,7 @@ jobs:
tag-suffix: -plugins
name: Build Chainlink Image ${{ matrix.image.name }}
runs-on: ubuntu20.04-16cores-64GB
- needs: [changes]
+ needs: [changes, enforce-ctf-version]
steps:
- name: Collect Metrics
if: needs.changes.outputs.src == 'true'
@@ -91,30 +125,17 @@ jobs:
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
- - name: Check if image exists
+ - name: Build Chainlink Image
if: needs.changes.outputs.src == 'true'
- id: check-image
- uses: smartcontractkit/chainlink-github-actions/docker/image-exists@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
- with:
- repository: chainlink
- tag: ${{ github.sha }}${{ matrix.image.tag-suffix }}
+ uses: ./.github/actions/build-chainlink-image
+ with:
+ tag_suffix: ${{ matrix.image.tag-suffix }}
+ dockerfile: ${{ matrix.image.dockerfile }}
+ git_commit_sha: ${{ github.sha }}
+ GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }}
AWS_REGION: ${{ secrets.QA_AWS_REGION }}
AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- - name: Build Image
- if: steps.check-image.outputs.exists == 'false' && needs.changes.outputs.src == 'true'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
- with:
- cl_repo: smartcontractkit/chainlink
- cl_ref: ${{ github.sha }}
- cl_dockerfile: ${{ matrix.image.dockerfile }}
- push_tag: ${{ env.CHAINLINK_IMAGE }}:${{ github.sha }}${{ matrix.image.tag-suffix }}
- QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- - name: Print Chainlink Image Built
- if: needs.changes.outputs.src == 'true'
- run: |
- echo "### Chainlink node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY
- echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY
build-test-image:
if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'schedule' || contains(join(github.event.pull_request.labels.*.name, ' '), 'build-test-image')
@@ -176,14 +197,14 @@ jobs:
echo "MATRIX_JSON=${COMBINED_ARRAY}" >> $GITHUB_ENV
eth-smoke-tests-matrix-automation:
- if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }}
+ if: ${{ !(contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') || (github.event_name == 'workflow_dispatch' && !inputs.simulatedNetwork)) }}
environment: integration
permissions:
checks: write
pull-requests: write
id-token: write
contents: read
- needs: [build-chainlink, changes, compare-tests]
+ needs: [build-chainlink, changes, compare-tests, build-lint-integration-tests]
env:
SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2
CHAINLINK_COMMIT_SHA: ${{ github.sha }}
@@ -212,13 +233,13 @@ jobs:
## Run this step when changes that require tests to be run are made
- name: Run Tests
if: needs.changes.outputs.src == 'true'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
env:
PYROSCOPE_SERVER: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725
PYROSCOPE_ENVIRONMENT: ${{ matrix.product.pyroscope_env }}
PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
with:
- test_command_to_run: make test_need_operator_assets && cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: cd ./integration-tests && go mod download
cl_repo: ${{ env.CHAINLINK_IMAGE }}
cl_image_tag: ${{ github.sha }}
@@ -231,7 +252,7 @@ jobs:
cache_restore_only: "true"
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ QA_KUBECONFIG: ""
- name: Collect Metrics
if: always()
id: collect-gha-metrics
@@ -244,14 +265,14 @@ jobs:
continue-on-error: true
eth-smoke-tests-matrix:
- if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }}
+ if: ${{ !(contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') || (github.event_name == 'workflow_dispatch' && !inputs.simulatedNetwork)) }}
environment: integration
permissions:
checks: write
pull-requests: write
id-token: write
contents: read
- needs: [build-chainlink, changes]
+ needs: [build-chainlink, changes, build-lint-integration-tests]
env:
SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2
CHAINLINK_COMMIT_SHA: ${{ github.sha }}
@@ -296,7 +317,7 @@ jobs:
pyroscope_env: ci-smoke-vrf2-evm-simulated
- name: vrfv2plus
nodes: 1
- os: ubuntu-latest
+ os: ubuntu20.04-8cores-32GB
pyroscope_env: ci-smoke-vrf2plus-evm-simulated
- name: forwarder_ocr
nodes: 1
@@ -365,16 +386,16 @@ jobs:
run: |
PORT_BASE=3001
MAX_PORT=8000
-
+
# Use PR number as offset. Given GitHub PRs are incremental, this guarantees uniqueness for at least 5000 PRs.
OFFSET=$GITHUB_PR_NUMBER
echo "PR Number: $OFFSET"
-
+
# Ensure that we don't exceed the max port
if (( OFFSET > (MAX_PORT - PORT_BASE) )); then
OFFSET=$((OFFSET % (MAX_PORT - PORT_BASE)))
fi
-
+
# Map the offset to the port range
REMOTE_PORT=$((PORT_BASE + OFFSET))
echo "REMOTE_PORT=$REMOTE_PORT" >> $GITHUB_OUTPUT
@@ -385,25 +406,25 @@ jobs:
TRACING_SSH_SERVER: ${{ secrets.TRACING_SSH_SERVER }}
REMOTE_PORT: ${{ steps.generate-port.outputs.REMOTE_PORT }}
run: |
- eval $(ssh-agent)
- echo "test"
- echo "$TRACING_SSH_KEY" | wc -c
- echo "$TRACING_SSH_KEY" | tr -d '\r' | wc -c
- echo "$TRACING_SSH_KEY" | tr -d '\r' | base64 --decode | ssh-add -
- # f: background process
- # N: do not execute a remote command
- # R: remote port forwarding
- ssh -o StrictHostKeyChecking=no -f -N -R $REMOTE_PORT:127.0.0.1:3000 user-gha@$TRACING_SSH_SERVER
- echo "To view Grafana locally:"
- echo "ssh -N -L 8000:localhost:$REMOTE_PORT user-gha@$TRACING_SSH_SERVER"
- echo "Then visit http://localhost:8000 in a browser."
- echo "If you are unable to connect, check with the security team that you have access to the tracing server."
+ eval $(ssh-agent)
+ echo "test"
+ echo "$TRACING_SSH_KEY" | wc -c
+ echo "$TRACING_SSH_KEY" | tr -d '\r' | wc -c
+ echo "$TRACING_SSH_KEY" | tr -d '\r' | base64 --decode | ssh-add -
+ # f: background process
+ # N: do not execute a remote command
+ # R: remote port forwarding
+ ssh -o StrictHostKeyChecking=no -f -N -R $REMOTE_PORT:127.0.0.1:3000 user-gha@$TRACING_SSH_SERVER
+ echo "To view Grafana locally:"
+ echo "ssh -N -L 8000:localhost:$REMOTE_PORT user-gha@$TRACING_SSH_SERVER"
+ echo "Then visit http://localhost:8000 in a browser."
+ echo "If you are unable to connect, check with the security team that you have access to the tracing server."
- name: Show Grafana Logs
if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins'
run: |
- docker logs grafana
- docker logs tempo
- docker logs otel-collector
+ docker logs grafana
+ docker logs tempo
+ docker logs otel-collector
- name: Set sleep time to use in future steps
if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins'
run: |
@@ -411,13 +432,13 @@ jobs:
## Run this step when changes that require tests to be run are made
- name: Run Tests
if: needs.changes.outputs.src == 'true'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
env:
PYROSCOPE_SERVER: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725
PYROSCOPE_ENVIRONMENT: ${{ matrix.product.pyroscope_env }}
PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
with:
- test_command_to_run: make test_need_operator_assets && cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: cd ./integration-tests && go mod download
cl_repo: ${{ env.CHAINLINK_IMAGE }}
cl_image_tag: ${{ github.sha }}${{ matrix.product.tag_suffix }}
@@ -431,11 +452,11 @@ jobs:
cache_restore_only: "true"
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ QA_KUBECONFIG: ""
## Run this step when changes that do not need the test to run are made
- name: Run Setup
if: needs.changes.outputs.src == 'false'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
test_download_vendor_packages_command: cd ./integration-tests && go mod download
go_mod_path: ./integration-tests/go.mod
@@ -451,7 +472,7 @@ jobs:
with:
basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
- this-job-name: ETH Smoke Tests ${{ matrix.product.name }}
+ this-job-name: ETH Smoke Tests ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}
test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}'
continue-on-error: true
- name: Keep action running to view traces
@@ -572,9 +593,9 @@ jobs:
run: |
echo "Running migration tests from version '${{ steps.get_latest_version.outputs.latest_version }}' to: '${{ github.sha }}'"
- name: Run Migration Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
- test_command_to_run: make test_need_operator_assets && cd ./integration-tests && go test -timeout 30m -count=1 -json ./migration 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json ./migration 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: cd ./integration-tests && go mod download
cl_repo: ${{ env.CHAINLINK_IMAGE }}
cl_image_tag: ${{ steps.get_latest_version.outputs.latest_version }}
@@ -817,12 +838,14 @@ jobs:
ref: ${{ needs.get_solana_sha.outputs.sha }}
- name: Run Setup
if: needs.changes.outputs.src == 'true'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
go_mod_path: ./integration-tests/go.mod
cache_restore_only: true
cache_key_id: core-solana-e2e-${{ env.MOD_CACHE_VERSION }}
aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
@@ -844,7 +867,7 @@ jobs:
docker rm "$CONTAINER_ID"
- name: Run Tests
if: needs.changes.outputs.src == 'true'
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
test_command_to_run: export ENV_JOB_IMAGE=${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-solana-tests:${{ needs.get_solana_sha.outputs.sha }} && make test_smoke
cl_repo: ${{ env.CHAINLINK_IMAGE }}
@@ -857,7 +880,7 @@ jobs:
aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ QA_KUBECONFIG: ""
run_setup: false
- name: Upload test log
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
@@ -889,199 +912,3 @@ jobs:
matrix-aggregator-status: ${{ needs.solana-smoke-tests-matrix.result }}
continue-on-error: true
### End Solana Section
-
- ### Start Live Testnet Section
-
- testnet-smoke-tests-matrix:
- if: ${{ github.event_name == 'schedule' || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/')) || (github.event_name == 'workflow_dispatch' && inputs.liveNetwork) }} ## Only run live tests on new tags, schedule, or on manual request
- environment: integration
- permissions:
- checks: write
- pull-requests: write
- id-token: write
- contents: read
- needs: [build-chainlink]
- env:
- SELECTED_NETWORKS: ${{ matrix.testnet }}
- CHAINLINK_COMMIT_SHA: ${{ github.sha }}
- CHAINLINK_ENV_USER: ${{ github.actor }}
- TEST_LOG_LEVEL: debug
- EVM_KEYS: ${{ secrets.QA_EVM_KEYS }}
-
- OPTIMISM_GOERLI_URLS: ${{ secrets.QA_OPTIMISM_GOERLI_URLS }}
- OPTIMISM_GOERLI_HTTP_URLS: ${{ secrets.QA_OPTIMISM_GOERLI_HTTP_URLS }}
-
- ARBITRUM_GOERLI_URLS: ${{ secrets.QA_ARBITRUM_GOERLI_URLS }}
- ARBITRUM_GOERLI_HTTP_URLS: ${{ secrets.QA_ARBITRUM_GOERLI_HTTP_URLS }}
- strategy:
- fail-fast: false
- matrix:
- # NOTE: If changing this matrix, make sure to update the matrix in the testnet-smoke-tests-notify job to be the same
- # otherwise reporting will be broken. Getting a single matrix for multiple jobs is a pain
- # https://github.com/orgs/community/discussions/26284#discussioncomment-3251198
- testnet: [OPTIMISM_GOERLI, ARBITRUM_GOERLI]
- name: Live Testnet Smoke Tests ${{ matrix.testnet }}
- runs-on: ubuntu-latest
- steps:
- - name: Checkout the repo
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- with:
- ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
- ## Only run OCR smoke test for now
- - name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
- env:
- PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }}
- PYROSCOPE_ENVIRONMENT: ci-smoke-ocr-evm-${{ matrix.testnet }} # TODO: Only for OCR for now
- PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
- with:
- test_command_to_run: make test_need_operator_assets && cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=1 ./smoke/ocr_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt
- test_download_vendor_packages_command: cd ./integration-tests && go mod download
- cl_repo: ${{ env.CHAINLINK_IMAGE }}
- cl_image_tag: ${{ github.sha }}
- aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
- artifacts_location: ./integration-tests/smoke/logs
- publish_check_name: ${{ matrix.testnet }} OCR Smoke Test Results
- token: ${{ secrets.GITHUB_TOKEN }}
- go_mod_path: ./integration-tests/go.mod
- cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
- cache_restore_only: "true"
- QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
- QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
- QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
-
- - name: Collect Metrics
- if: always()
- id: collect-gha-metrics
- uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
- with:
- basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
- hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
- this-job-name: Live Testnet Smoke Tests ${{ matrix.testnet }}
- test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}'
- continue-on-error: true
-
- testnet-smoke-tests-notify:
- name: Live Testnet Start Slack Thread
- if: ${{ always() && needs.testnet-smoke-tests-matrix.result != 'skipped' && needs.testnet-smoke-tests-matrix.result != 'cancelled' }}
- environment: integration
- outputs:
- thread_ts: ${{ steps.slack.outputs.thread_ts }}
- permissions:
- checks: write
- pull-requests: write
- id-token: write
- contents: read
- runs-on: ubuntu-latest
- needs: testnet-smoke-tests-matrix
- steps:
- - name: Debug Result
- run: echo ${{needs.testnet-smoke-tests-matrix.result}}
- - name: Main Slack Notification
- uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
- id: slack
- with:
- channel-id: ${{ secrets.QA_SLACK_CHANNEL }}
- payload: |
- {
- "attachments": [
- {
- "color": "${{ needs.testnet-smoke-tests-matrix.result == 'success' && '#2E7D32' || '#C62828' }}",
- "blocks": [
- {
- "type": "header",
- "text": {
- "type": "plain_text",
- "text": "Live Smoke Test Results ${{ needs.testnet-smoke-tests-matrix.result == 'success' && ':white_check_mark:' || ':x:'}}",
- "emoji": true
- }
- },
- {
- "type": "divider"
- },
- {
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>"
- }
- }
- ]
- }
- ]
- }
- env:
- SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
-
- testnet-smoke-tests-results:
- name: Post Live Testnet Smoke Test Results
- if: ${{ always() && needs.testnet-smoke-tests-matrix.result != 'skipped' && needs.testnet-smoke-tests-matrix.result != 'cancelled' }}
- environment: integration
- permissions:
- checks: write
- pull-requests: write
- id-token: write
- contents: read
- runs-on: ubuntu-latest
- needs: testnet-smoke-tests-notify
- strategy:
- fail-fast: false
- matrix:
- # NOTE: If changing this matrix, make sure to update the matrix in the testnet-smoke-tests-matrix job to be the same
- # otherwise reporting will be broken. Getting a single matrix for multiple jobs is a pain
- # https://github.com/orgs/community/discussions/26284#discussioncomment-3251198
- testnet: [OPTIMISM_GOERLI, ARBITRUM_GOERLI]
- steps:
- - name: Get Results
- id: test-results
- run: |
- echo "Querying test results"
-
- echo "status=$(curl \
- -H "Authorization: Bearer ${{ github.token }}" \
- 'https://api.github.com/repos/${{github.repository}}/actions/runs/${{ github.run_id }}/jobs' \
- | jq -r '.jobs[] | select(.name == "Live Testnet Smoke Tests ${{ matrix.testnet}}").steps[] | select(.name == "Run Tests").conclusion')" >> $GITHUB_OUTPUT
-
- echo "status=$(curl \
- -H "Authorization: Bearer ${{ github.token }}" \
- 'https://api.github.com/repos/${{github.repository}}/actions/runs/${{ github.run_id }}/jobs' \
- | jq -r '.jobs[] | select(.name == "Live Testnet Smoke Tests ${{ matrix.testnet}}").steps[] | select(.name == "Run Tests").conclusion')"
- echo "thread_ts=${{ needs.testnet-smoke-tests-notify.outputs.thread_ts }}"
-
- - name: Test Details
- uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
- with:
- channel-id: ${{ secrets.QA_SLACK_CHANNEL }}
- payload: |
- {
- "thread_ts": "${{ needs.testnet-smoke-tests-notify.outputs.thread_ts }}",
- "attachments": [
- {
- "color": "${{ steps.test-results.outputs.status == 'success' && '#2E7D32' || '#C62828' }}",
- "blocks": [
- {
- "type": "header",
- "text": {
- "type": "plain_text",
- "text": "${{ matrix.testnet }} Smoke Test Results ${{ steps.test-results.outputs.status == 'success' && ':white_check_mark:' || ':x:'}}",
- "emoji": true
- }
- },
- {
- "type": "divider"
- },
- {
- "type": "section",
- "text": {
- "type": "mrkdwn",
- "text": "OCR ${{ steps.test-results.outputs.status == 'success' && ':white_check_mark:' || ':x:'}}"
- }
- }
- ]
- }
- ]
- }
- env:
- SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
-
- ### End Live Testnet Section
diff --git a/.github/workflows/live-testnet-tests.yml b/.github/workflows/live-testnet-tests.yml
new file mode 100644
index 00000000000..23e9b3c04cf
--- /dev/null
+++ b/.github/workflows/live-testnet-tests.yml
@@ -0,0 +1,342 @@
+name: Live Testnet Tests
+on:
+ schedule:
+ - cron: "0 0 * * *" # Run nightly
+ push:
+ tags:
+ - "*"
+ workflow_dispatch:
+
+env:
+ CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink
+ INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com
+ MOD_CACHE_VERSION: 2
+
+ CHAINLINK_COMMIT_SHA: ${{ github.sha }}
+ CHAINLINK_ENV_USER: ${{ github.actor }}
+ TEST_LOG_LEVEL: debug
+ EVM_KEYS: ${{ secrets.QA_EVM_KEYS }}
+
+ OPTIMISM_GOERLI_URLS: ${{ secrets.QA_OPTIMISM_GOERLI_URLS }}
+ OPTIMISM_GOERLI_HTTP_URLS: ${{ secrets.QA_OPTIMISM_GOERLI_HTTP_URLS }}
+
+ ARBITRUM_GOERLI_URLS: ${{ secrets.QA_ARBITRUM_GOERLI_URLS }}
+ ARBITRUM_GOERLI_HTTP_URLS: ${{ secrets.QA_ARBITRUM_GOERLI_HTTP_URLS }}
+
+jobs:
+ build-chainlink:
+ environment: integration
+ permissions:
+ id-token: write
+ contents: read
+ name: Build Chainlink Image
+ runs-on: ubuntu20.04-16cores-64GB
+ steps:
+ - name: Collect Metrics
+ id: collect-gha-metrics
+ uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
+ with:
+ basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
+ this-job-name: Build Chainlink Image
+ continue-on-error: true
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ with:
+ ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
+ - name: Build Chainlink Image
+ uses: ./.github/actions/build-chainlink-image
+ with:
+ tag_suffix: ""
+ dockerfile: core/chainlink.Dockerfile
+ git_commit_sha: ${{ github.sha }}
+ GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }}
+ AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+
+
+ sepolia-smoke-tests:
+ environment: integration
+ permissions:
+ checks: write
+ pull-requests: write
+ id-token: write
+ contents: read
+ needs: [build-chainlink]
+ env:
+ SELECTED_NETWORKS: SEPOLIA
+ strategy:
+ fail-fast: false
+ matrix:
+ product: [ocr, automation]
+ name: Sepolia ${{ matrix.product }} Tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ with:
+ ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
+ - name: Run Tests
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
+ env:
+ PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }}
+ PYROSCOPE_ENVIRONMENT: ci-smoke-${{ matrix.product }}-sepolia
+ PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
+ with:
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=1 ./smoke/${{ matrix.product }}_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_download_vendor_packages_command: cd ./integration-tests && go mod download
+ cl_repo: ${{ env.CHAINLINK_IMAGE }}
+ cl_image_tag: ${{ github.sha }}
+ aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
+ artifacts_location: ./integration-tests/smoke/logs
+ publish_check_name: Seplia ${{ matrix.product }} Smoke Test Results
+ token: ${{ secrets.GITHUB_TOKEN }}
+ go_mod_path: ./integration-tests/go.mod
+ cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
+ cache_restore_only: "true"
+ QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ - name: Collect Metrics
+ if: always()
+ id: collect-gha-metrics
+ uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
+ with:
+ basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
+ this-job-name: Sepolia ${{ matrix.product }} Tests
+ test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}'
+ continue-on-error: true
+
+ optimism-goerli-smoke-tests:
+ environment: integration
+ permissions:
+ checks: write
+ pull-requests: write
+ id-token: write
+ contents: read
+ needs: [build-chainlink]
+ env:
+ SELECTED_NETWORKS: OPTIMISM_GOERLI
+ strategy:
+ fail-fast: false
+ matrix:
+ product: [ocr, automation]
+ name: Optimism Goerli ${{ matrix.product }} Tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ with:
+ ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
+ - name: Run Tests
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
+ env:
+ PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }}
+ PYROSCOPE_ENVIRONMENT: ci-smoke-${{ matrix.product }}-optimism-goerli
+ PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
+ with:
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=1 ./smoke/${{ matrix.product }}_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_download_vendor_packages_command: cd ./integration-tests && go mod download
+ cl_repo: ${{ env.CHAINLINK_IMAGE }}
+ cl_image_tag: ${{ github.sha }}
+ aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
+ artifacts_location: ./integration-tests/smoke/logs
+ publish_check_name: Seplia ${{ matrix.product }} Smoke Test Results
+ token: ${{ secrets.GITHUB_TOKEN }}
+ go_mod_path: ./integration-tests/go.mod
+ cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
+ cache_restore_only: "true"
+ QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ - name: Collect Metrics
+ if: always()
+ id: collect-gha-metrics
+ uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
+ with:
+ basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
+ this-job-name: Optimism Goerli ${{ matrix.product }} Tests
+ test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}'
+ continue-on-error: true
+
+ arbitrum-goerli-smoke-tests:
+ environment: integration
+ permissions:
+ checks: write
+ pull-requests: write
+ id-token: write
+ contents: read
+ needs: [build-chainlink]
+ env:
+ SELECTED_NETWORKS: ARBITRUM_GOERLI
+ strategy:
+ fail-fast: false
+ matrix:
+ product: [ocr, automation]
+ name: Arbitrum Goerli ${{ matrix.product }} Tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ with:
+ ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }}
+ - name: Run Tests
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
+ env:
+ PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }}
+ PYROSCOPE_ENVIRONMENT: ci-smoke-${{ matrix.product }}-arbitrum-goerli
+ PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }}
+ with:
+ test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=1 ./smoke/${{ matrix.product }}_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt
+ test_download_vendor_packages_command: cd ./integration-tests && go mod download
+ cl_repo: ${{ env.CHAINLINK_IMAGE }}
+ cl_image_tag: ${{ github.sha }}
+ aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
+ dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }}
+ dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }}
+ artifacts_location: ./integration-tests/smoke/logs
+ publish_check_name: Arbitrum Goerli ${{ matrix.product }} Smoke Test Results
+ token: ${{ secrets.GITHUB_TOKEN }}
+ go_mod_path: ./integration-tests/go.mod
+ cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }}
+ cache_restore_only: "true"
+ QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
+ QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }}
+ QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }}
+ - name: Collect Metrics
+ if: always()
+ id: collect-gha-metrics
+ uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
+ with:
+ basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }}
+ hostname: ${{ secrets.GRAFANA_CLOUD_HOST }}
+ this-job-name: Arbitrum Goerli ${{ matrix.product }} Tests
+ test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}'
+ continue-on-error: true
+
+ testnet-smoke-tests-notify:
+ name: Start Slack Thread
+ if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }}
+ environment: integration
+ outputs:
+ thread_ts: ${{ steps.slack.outputs.thread_ts }}
+ permissions:
+ checks: write
+ pull-requests: write
+ id-token: write
+ contents: read
+ runs-on: ubuntu-latest
+ needs: [sepolia-smoke-tests, optimism-goerli-smoke-tests, arbitrum-goerli-smoke-tests]
+ steps:
+ - name: Debug Result
+ run: echo ${{needs.*.result}}
+ - name: Main Slack Notification
+ uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
+ id: slack
+ with:
+ channel-id: ${{ secrets.QA_SLACK_CHANNEL }}
+ payload: |
+ {
+ "attachments": [
+ {
+ "color": "${{ needs.*.result == 'success' && '#2E7D32' || '#C62828' }}",
+ "blocks": [
+ {
+ "type": "header",
+ "text": {
+ "type": "plain_text",
+ "text": "Live Smoke Test Results ${{ needs.*.result == 'success' && ':white_check_mark:' || ':x:'}}",
+ "emoji": true
+ }
+ },
+ {
+ "type": "divider"
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ env:
+ SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
+
+ testnet-smoke-tests-results:
+ name: Post Test Results
+ if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }}
+ environment: integration
+ permissions:
+ checks: write
+ pull-requests: write
+ id-token: write
+ contents: read
+ runs-on: ubuntu-latest
+ needs: testnet-smoke-tests-notify
+ strategy:
+ fail-fast: false
+ matrix:
+ testnet: [sepolia, optimism-goerli, arbitrum-goerli]
+ steps:
+ - name: Get Results
+ id: test-results
+ run: |
+ echo "Querying test results"
+
+ echo "status=$(curl \
+ -H "Authorization: Bearer ${{ github.token }}" \
+ 'https://api.github.com/repos/${{github.repository}}/actions/runs/${{ github.run_id }}/jobs' \
+ | jq -r '.jobs[] | select(.name == "Live Testnet Smoke Tests ${{ matrix.testnet }}-smoke-tests").steps[] | select(.name == "Run Tests").conclusion')" >> $GITHUB_OUTPUT
+
+ echo "status=$(curl \
+ -H "Authorization: Bearer ${{ github.token }}" \
+ 'https://api.github.com/repos/${{github.repository}}/actions/runs/${{ github.run_id }}/jobs' \
+ | jq -r '.jobs[] | select(.name == "Live Testnet Smoke Tests ${{ matrix.testnet }}-smoke-tests"").steps[] | select(.name == "Run Tests").conclusion')"
+ echo "thread_ts=${{ needs.testnet-smoke-tests-notify.outputs.thread_ts }}"
+
+ - name: Test Details
+ uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
+ with:
+ channel-id: ${{ secrets.QA_SLACK_CHANNEL }}
+ payload: |
+ {
+ "thread_ts": "${{ needs.testnet-smoke-tests-notify.outputs.thread_ts }}",
+ "attachments": [
+ {
+ "color": "${{ steps.test-results.outputs.status == 'success' && '#2E7D32' || '#C62828' }}",
+ "blocks": [
+ {
+ "type": "header",
+ "text": {
+ "type": "plain_text",
+ "text": "${{ matrix.testnet }} Smoke Test Results ${{ steps.test-results.outputs.status == 'success' && ':white_check_mark:' || ':x:'}}",
+ "emoji": true
+ }
+ },
+ {
+ "type": "divider"
+ },
+ {
+ "type": "section",
+ "text": {
+ "type": "mrkdwn",
+ "text": "OCR ${{ steps.test-results.outputs.status == 'success' && ':white_check_mark:' || ':x:'}}"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ env:
+ SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }}
diff --git a/.github/workflows/on-demand-log-poller.yml b/.github/workflows/on-demand-log-poller.yml
new file mode 100644
index 00000000000..42f901ec304
--- /dev/null
+++ b/.github/workflows/on-demand-log-poller.yml
@@ -0,0 +1,87 @@
+name: On Demand Log Poller Consistency Test
+on:
+ workflow_dispatch:
+ inputs:
+ contracts:
+ description: Number of test contracts
+ default: "2"
+ required: true
+ eventsPerTx:
+ description: Number of events to emit per transaction
+ default: "10"
+ required: true
+ useFinalityTag:
+ description: Use finality tag
+ default: "false"
+ required: true
+ loadDuration:
+ description: Load duration (e.g. 10s, 10m, 1h)
+ default: "10m"
+ required: true
+ chainlinkImage:
+ description: Chainlink image to use
+ default: "public.ecr.aws/chainlink/chainlink"
+ required: true
+ chainlinkVersion:
+ description: Chainlink version to use
+ default: "2.7.0-beta1"
+ required: true
+ selectedNetworks:
+ type: choice
+ options:
+ - "SIMULATED"
+ - "SEPOLIA"
+ - "MUMBAI"
+ fundingPrivateKey:
+ description: Private funding key (Skip for Simulated)
+ required: true
+ type: string
+ wsURL:
+ description: WS URL for the network (Skip for Simulated)
+ required: true
+ type: string
+ httpURL:
+ description: HTTP URL for the network (Skip for Simulated)
+ required: true
+ type: string
+
+jobs:
+ test:
+ env:
+ CONTRACTS: ${{ inputs.contracts }}
+ EVENTS_PER_TX: ${{ inputs.eventsPerTx }}
+ LOAD_DURATION: ${{ inputs.loadDuration }}
+ USE_FINALITY_TAG: ${{ inputs.useFinalityTag }}
+ CHAINLINK_IMAGE: ${{ inputs.chainlinkImage }}
+ CHAINLINK_VERSION: ${{ inputs.chainlinkVersion }}
+ SELECTED_NETWORKS: ${{ inputs.selectedNetworks }}
+ REF_NAME: ${{ github.head_ref || github.ref_name }}
+ runs-on: ubuntu20.04-8cores-32GB
+ steps:
+ - name: Get Inputs
+ run: |
+ EVM_URLS=$(jq -r '.inputs.wsURL' $GITHUB_EVENT_PATH)
+ EVM_HTTP_URLS=$(jq -r '.inputs.httpURL' $GITHUB_EVENT_PATH)
+ EVM_KEYS=$(jq -r '.inputs.fundingPrivateKey' $GITHUB_EVENT_PATH)
+
+ echo ::add-mask::$EVM_URLS
+ echo ::add-mask::$EVM_HTTP_URLS
+ echo ::add-mask::$EVM_KEYS
+
+ echo EVM_URLS=$EVM_URLS >> $GITHUB_ENV
+ echo EVM_HTTP_URLS=$EVM_HTTP_URLS >> $GITHUB_ENV
+ echo EVM_KEYS=$EVM_KEYS >> $GITHUB_ENV
+ - name: Checkout the repo
+ uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ with:
+ ref: ${{ env.REF_NAME }}
+ - name: Setup Go
+ uses: actions/setup-go@v3
+ with:
+ go-version-file: "integration-tests/go.mod"
+ cache: true
+ - name: Run tests
+ run: |
+ cd integration-tests
+ go mod download
+ go test -v -timeout 5h -v -count=1 -run ^TestLogPollerFromEnv$ ./reorg/log_poller_maybe_reorg_test.go
diff --git a/.github/workflows/on-demand-ocr-soak-test.yml b/.github/workflows/on-demand-ocr-soak-test.yml
index 1fb79d8ccd4..4ea10cd4823 100644
--- a/.github/workflows/on-demand-ocr-soak-test.yml
+++ b/.github/workflows/on-demand-ocr-soak-test.yml
@@ -20,12 +20,16 @@ on:
- "BSC_TESTNET"
- "SCROLL_SEPOLIA"
- "SCROLL_MAINNET"
- - "MUMBAI"
+ - "POLYGON_MUMBAI"
- "POLYGON_MAINNET"
- "LINEA_GOERLI"
- "LINEA_MAINNET"
- "FANTOM_TESTNET"
- "FANTOM_MAINNET"
+ - "KROMA_MAINNET"
+ - "KROMA_SEPOLIA"
+ - "WEMIX_TESTNET"
+ - "WEMIX_MAINNET"
fundingPrivateKey:
description: Private funding key (Skip for Simulated)
required: false
@@ -129,7 +133,7 @@ jobs:
QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }}
QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
env:
DETACH_RUNNER: true
TEST_SUITE: soak
diff --git a/.github/workflows/on-demand-vrfv2plus-performance-test.yml b/.github/workflows/on-demand-vrfv2plus-performance-test.yml
index deb977e43fc..b4f9f46de02 100644
--- a/.github/workflows/on-demand-vrfv2plus-performance-test.yml
+++ b/.github/workflows/on-demand-vrfv2plus-performance-test.yml
@@ -54,7 +54,7 @@ on:
useExistingEnv:
description: Set `true` to use existing environment or `false` to deploy CL node and all contracts
required: false
- default: false
+ default: "false"
configBase64:
description: TOML config in base64 (Needed when overriding config or providing contract addresses for existing env)
required: false
@@ -118,7 +118,7 @@ jobs:
with:
fetch-depth: 0
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
test_command_to_run: cd ./integration-tests && go test -v -count=1 -timeout 6h -run TestVRFV2PlusPerformance/vrfv2plus_performance_test ./load/vrfv2plus
test_download_vendor_packages_command: cd ./integration-tests && go mod download
diff --git a/.github/workflows/operator-ui-cd.yml b/.github/workflows/operator-ui-cd.yml
index 54f423e6dc3..bd589da728f 100644
--- a/.github/workflows/operator-ui-cd.yml
+++ b/.github/workflows/operator-ui-cd.yml
@@ -39,7 +39,7 @@ jobs:
url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }}
- name: Open PR
- uses: peter-evans/create-pull-request@38e0b6e68b4c852a5500a94740f0e535e0d7ba54 # v4.2.4
+ uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
with:
title: Update Operator UI from ${{ steps.update.outputs.current_tag }} to ${{ steps.update.outputs.latest_tag }}
token: ${{ steps.get-gh-token.outputs.access-token }}
diff --git a/.github/workflows/performance-tests.yml b/.github/workflows/performance-tests.yml
index 87fb75beca8..57907fe6c2d 100644
--- a/.github/workflows/performance-tests.yml
+++ b/.github/workflows/performance-tests.yml
@@ -57,7 +57,7 @@ jobs:
- name: Checkout the repo
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Run Tests
- uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
+ uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@7d541cbbca52d45b8a718257af86d9cf49774d1f # v2.2.15
with:
test_command_to_run: cd integration-tests && go test -timeout 1h -count=1 -json -test.parallel 10 ./performance 2>&1 | tee /tmp/gotest.log | gotestfmt
test_download_vendor_packages_command: make gomod
diff --git a/.github/workflows/solidity-foundry.yml b/.github/workflows/solidity-foundry.yml
index 19c879b09ef..90d18ecac2e 100644
--- a/.github/workflows/solidity-foundry.yml
+++ b/.github/workflows/solidity-foundry.yml
@@ -34,11 +34,12 @@ jobs:
matrix:
product: [vrf, automation, llo-feeds, functions, shared]
needs: [changes]
- if: needs.changes.outputs.changes == 'true'
- name: Tests
+ name: Foundry Tests ${{ matrix.product }} ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }}
# See https://github.com/foundry-rs/foundry/issues/3827
runs-on: ubuntu-22.04
+ # The if statements for steps after checkout repo is workaround for
+ # passing required check for PRs that don't have filtered changes.
steps:
- name: Checkout the repo
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
@@ -49,15 +50,18 @@ jobs:
# and not native Foundry. This is to make sure the dependencies
# stay in sync.
- name: Setup NodeJS
+ if: needs.changes.outputs.changes == 'true'
uses: ./.github/actions/setup-nodejs
- name: Install Foundry
+ if: needs.changes.outputs.changes == 'true'
uses: foundry-rs/foundry-toolchain@v1
with:
# Has to match the `make foundry` version.
- version: nightly-5be158ba6dc7c798a6f032026fe60fc01686b33b
+ version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
- name: Run Forge build
+ if: needs.changes.outputs.changes == 'true'
run: |
forge --version
forge build
@@ -67,6 +71,7 @@ jobs:
FOUNDRY_PROFILE: ${{ matrix.product }}
- name: Run Forge tests
+ if: needs.changes.outputs.changes == 'true'
run: |
forge test -vvv
id: test
@@ -75,7 +80,7 @@ jobs:
FOUNDRY_PROFILE: ${{ matrix.product }}
- name: Run Forge snapshot
- if: ${{ !contains(fromJson('["vrf"]'), matrix.product) && !contains(fromJson('["automation"]'), matrix.product) }}
+ if: ${{ !contains(fromJson('["vrf"]'), matrix.product) && !contains(fromJson('["automation"]'), matrix.product) && needs.changes.outputs.changes == 'true' }}
run: |
forge snapshot --nmt "testFuzz_\w{1,}?" --check gas-snapshots/${{ matrix.product }}.gas-snapshot
id: snapshot
@@ -84,7 +89,7 @@ jobs:
FOUNDRY_PROFILE: ${{ matrix.product }}
- name: Collect Metrics
- if: always()
+ if: needs.changes.outputs.changes == 'true'
id: collect-gha-metrics
uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
with:
diff --git a/.github/workflows/solidity.yml b/.github/workflows/solidity.yml
index 782dc93a0f5..5699657fa5d 100644
--- a/.github/workflows/solidity.yml
+++ b/.github/workflows/solidity.yml
@@ -20,10 +20,29 @@ jobs:
- uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1
id: changes
with:
+ list-files: "csv"
filters: |
src:
- 'contracts/**/*'
- '.github/workflows/solidity.yml'
+ - '.github/workflows/solidity-foundry.yml'
+ old_sol:
+ - 'contracts/src/v0.4/**/*'
+ - 'contracts/src/v0.5/**/*'
+ - 'contracts/src/v0.6/**/*'
+ - 'contracts/src/v0.7/**/*'
+
+
+ - name: Fail if read-only files have changed
+ if: ${{ steps.changes.outputs.old_sol == 'true' }}
+ run: |
+ echo "One or more read-only Solidity file(s) has changed."
+ for file in ${{ steps.changes.outputs.old_sol_files }}; do
+ echo "$file was changed"
+ done
+ exit 1
+
+
prepublish-test:
needs: [changes]
@@ -91,24 +110,29 @@ jobs:
this-job-name: Native Compilation
continue-on-error: true
+ # The if statements for steps after checkout repo is a workaround for
+ # passing required check for PRs that don't have filtered changes.
lint:
defaults:
run:
working-directory: contracts
needs: [changes]
- if: needs.changes.outputs.changes == 'true'
name: Lint ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }}
runs-on: ubuntu-latest
steps:
- name: Checkout the repo
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Setup NodeJS
+ if: needs.changes.outputs.changes == 'true'
uses: ./.github/actions/setup-nodejs
- name: Run pnpm lint
+ if: needs.changes.outputs.changes == 'true'
run: pnpm lint
- name: Run solhint
+ if: needs.changes.outputs.changes == 'true'
run: pnpm solhint
- name: Collect Metrics
+ if: needs.changes.outputs.changes == 'true'
id: collect-gha-metrics
uses: smartcontractkit/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2
with:
diff --git a/.gitignore b/.gitignore
index decea4a68a7..48e228eb836 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ tools/clroot/db.sqlite3-wal
.idea
.vscode/
*.iml
+debug.env
# codeship
*.aes
@@ -64,7 +65,7 @@ tests-*.xml
tmp-manifest-*.yaml
ztarrepo.tar.gz
**/test-ledger/*
-__debug_bin
+__debug_bin*
# goreleaser builds
cosign.*
@@ -78,6 +79,9 @@ MacOSX*
contracts/yarn.lock
-
# Ignore DevSpace cache and log folder
.devspace/
+go.work*
+
+# This sometimes shows up for some reason
+tools/flakeytests/coverage.txt
diff --git a/.tool-versions b/.tool-versions
index 87910cf6d6f..c60396ccb86 100644
--- a/.tool-versions
+++ b/.tool-versions
@@ -1,7 +1,7 @@
-golang 1.21.1
+golang 1.21.4
mockery 2.35.4
nodejs 16.16.0
postgres 13.3
helm 3.10.3
zig 0.10.1
-golangci-lint 1.55.0
+golangci-lint 1.55.2
diff --git a/GNUmakefile b/GNUmakefile
index 957df96ce45..2801f949682 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -61,6 +61,10 @@ chainlink-local-start:
install-median: ## Build & install the chainlink-median binary.
go install $(GOFLAGS) ./plugins/cmd/chainlink-median
+.PHONY: install-medianpoc
+install-medianpoc: ## Build & install the chainlink-medianpoc binary.
+ go install $(GOFLAGS) ./plugins/cmd/chainlink-medianpoc
+
.PHONY: docker ## Build the chainlink docker image
docker:
docker buildx build \
@@ -127,10 +131,6 @@ telemetry-protobuf: $(telemetry-protobuf) ## Generate telemetry protocol buffers
--go-wsrpc_opt=paths=source_relative \
./core/services/synchronization/telem/*.proto
-.PHONY: test_need_operator_assets
-test_need_operator_assets: ## Add blank file in web assets if operator ui has not been built
- [ -f "./core/web/assets/index.html" ] || mkdir ./core/web/assets && touch ./core/web/assets/index.html
-
.PHONY: config-docs
config-docs: ## Generate core node configuration documentation
go run ./core/config/docs/cmd/generate -o ./docs/
@@ -138,7 +138,7 @@ config-docs: ## Generate core node configuration documentation
.PHONY: golangci-lint
golangci-lint: ## Run golangci-lint for all issues.
[ -d "./golangci-lint" ] || mkdir ./golangci-lint && \
- docker run --rm -v $(shell pwd):/app -w /app golangci/golangci-lint:v1.55.0 golangci-lint run --max-issues-per-linter 0 --max-same-issues 0 > ./golangci-lint/$(shell date +%Y-%m-%d_%H:%M:%S).txt
+ docker run --rm -v $(shell pwd):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run --max-issues-per-linter 0 --max-same-issues 0 > ./golangci-lint/$(shell date +%Y-%m-%d_%H:%M:%S).txt
GORELEASER_CONFIG ?= .goreleaser.yaml
diff --git a/common/client/mock_hashable_test.go b/common/client/mock_hashable_test.go
new file mode 100644
index 00000000000..d9f1670c073
--- /dev/null
+++ b/common/client/mock_hashable_test.go
@@ -0,0 +1,18 @@
+package client
+
+import "cmp"
+
+// Hashable - simple implementation of types.Hashable interface to be used as concrete type in tests
+type Hashable string
+
+func (h Hashable) Cmp(c Hashable) int {
+ return cmp.Compare(h, c)
+}
+
+func (h Hashable) String() string {
+ return string(h)
+}
+
+func (h Hashable) Bytes() []byte {
+ return []byte(h)
+}
diff --git a/common/client/mock_head_test.go b/common/client/mock_head_test.go
new file mode 100644
index 00000000000..b9cf0a5866f
--- /dev/null
+++ b/common/client/mock_head_test.go
@@ -0,0 +1,57 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ utils "github.com/smartcontractkit/chainlink/v2/core/utils"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// mockHead is an autogenerated mock type for the Head type
+type mockHead struct {
+ mock.Mock
+}
+
+// BlockDifficulty provides a mock function with given fields:
+func (_m *mockHead) BlockDifficulty() *utils.Big {
+ ret := _m.Called()
+
+ var r0 *utils.Big
+ if rf, ok := ret.Get(0).(func() *utils.Big); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*utils.Big)
+ }
+ }
+
+ return r0
+}
+
+// BlockNumber provides a mock function with given fields:
+func (_m *mockHead) BlockNumber() int64 {
+ ret := _m.Called()
+
+ var r0 int64
+ if rf, ok := ret.Get(0).(func() int64); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(int64)
+ }
+
+ return r0
+}
+
+// newMockHead creates a new instance of mockHead. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockHead(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockHead {
+ mock := &mockHead{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go
new file mode 100644
index 00000000000..7c8eb69171f
--- /dev/null
+++ b/common/client/mock_node_client_test.go
@@ -0,0 +1,168 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ context "context"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// mockNodeClient is an autogenerated mock type for the NodeClient type
+type mockNodeClient[CHAIN_ID types.ID, HEAD Head] struct {
+ mock.Mock
+}
+
+// ChainID provides a mock function with given fields: ctx
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) {
+ ret := _m.Called(ctx)
+
+ var r0 CHAIN_ID
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(CHAIN_ID)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ClientVersion provides a mock function with given fields: _a0
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) ClientVersion(_a0 context.Context) (string, error) {
+ ret := _m.Called(_a0)
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) string); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Close provides a mock function with given fields:
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) Close() {
+ _m.Called()
+}
+
+// Dial provides a mock function with given fields: ctx
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DialHTTP provides a mock function with given fields:
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) DialHTTP() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DisconnectAll provides a mock function with given fields:
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) DisconnectAll() {
+ _m.Called()
+}
+
+// SetAliveLoopSub provides a mock function with given fields: _a0
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription) {
+ _m.Called(_a0)
+}
+
+// Subscribe provides a mock function with given fields: ctx, channel, args
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) {
+ var _ca []interface{}
+ _ca = append(_ca, ctx, channel)
+ _ca = append(_ca, args...)
+ ret := _m.Called(_ca...)
+
+ var r0 types.Subscription
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok {
+ return rf(ctx, channel, args...)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok {
+ r0 = rf(ctx, channel, args...)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(types.Subscription)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok {
+ r1 = rf(ctx, channel, args...)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SubscribersCount provides a mock function with given fields:
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribersCount() int32 {
+ ret := _m.Called()
+
+ var r0 int32
+ if rf, ok := ret.Get(0).(func() int32); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(int32)
+ }
+
+ return r0
+}
+
+// UnsubscribeAllExceptAliveLoop provides a mock function with given fields:
+func (_m *mockNodeClient[CHAIN_ID, HEAD]) UnsubscribeAllExceptAliveLoop() {
+ _m.Called()
+}
+
+// newMockNodeClient creates a new instance of mockNodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockNodeClient[CHAIN_ID types.ID, HEAD Head](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockNodeClient[CHAIN_ID, HEAD] {
+ mock := &mockNodeClient[CHAIN_ID, HEAD]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go
new file mode 100644
index 00000000000..e7b8d9ecb8d
--- /dev/null
+++ b/common/client/mock_node_selector_test.go
@@ -0,0 +1,57 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// mockNodeSelector is an autogenerated mock type for the NodeSelector type
+type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct {
+ mock.Mock
+}
+
+// Name provides a mock function with given fields:
+func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// Select provides a mock function with given fields:
+func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] {
+ ret := _m.Called()
+
+ var r0 Node[CHAIN_ID, HEAD, RPC]
+ if rf, ok := ret.Get(0).(func() Node[CHAIN_ID, HEAD, RPC]); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(Node[CHAIN_ID, HEAD, RPC])
+ }
+ }
+
+ return r0
+}
+
+// newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockNodeSelector[CHAIN_ID, HEAD, RPC] {
+ mock := &mockNodeSelector[CHAIN_ID, HEAD, RPC]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go
new file mode 100644
index 00000000000..bd704cd2c6f
--- /dev/null
+++ b/common/client/mock_node_test.go
@@ -0,0 +1,195 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ context "context"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+ mock "github.com/stretchr/testify/mock"
+
+ utils "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+// mockNode is an autogenerated mock type for the Node type
+type mockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct {
+ mock.Mock
+}
+
+// Close provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Close() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// ConfiguredChainID provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() CHAIN_ID {
+ ret := _m.Called()
+
+ var r0 CHAIN_ID
+ if rf, ok := ret.Get(0).(func() CHAIN_ID); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(CHAIN_ID)
+ }
+
+ return r0
+}
+
+// Name provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Name() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// Order provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Order() int32 {
+ ret := _m.Called()
+
+ var r0 int32
+ if rf, ok := ret.Get(0).(func() int32); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(int32)
+ }
+
+ return r0
+}
+
+// RPC provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) RPC() RPC {
+ ret := _m.Called()
+
+ var r0 RPC
+ if rf, ok := ret.Get(0).(func() RPC); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(RPC)
+ }
+
+ return r0
+}
+
+// Start provides a mock function with given fields: _a0
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Start(_a0 context.Context) error {
+ ret := _m.Called(_a0)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// State provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) State() nodeState {
+ ret := _m.Called()
+
+ var r0 nodeState
+ if rf, ok := ret.Get(0).(func() nodeState); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(nodeState)
+ }
+
+ return r0
+}
+
+// StateAndLatest provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *utils.Big) {
+ ret := _m.Called()
+
+ var r0 nodeState
+ var r1 int64
+ var r2 *utils.Big
+ if rf, ok := ret.Get(0).(func() (nodeState, int64, *utils.Big)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() nodeState); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(nodeState)
+ }
+
+ if rf, ok := ret.Get(1).(func() int64); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Get(1).(int64)
+ }
+
+ if rf, ok := ret.Get(2).(func() *utils.Big); ok {
+ r2 = rf()
+ } else {
+ if ret.Get(2) != nil {
+ r2 = ret.Get(2).(*utils.Big)
+ }
+ }
+
+ return r0, r1, r2
+}
+
+// String provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) String() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// SubscribersCount provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 {
+ ret := _m.Called()
+
+ var r0 int32
+ if rf, ok := ret.Get(0).(func() int32); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(int32)
+ }
+
+ return r0
+}
+
+// UnsubscribeAllExceptAliveLoop provides a mock function with given fields:
+func (_m *mockNode[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() {
+ _m.Called()
+}
+
+// newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockNode[CHAIN_ID, HEAD, RPC] {
+ mock := &mockNode[CHAIN_ID, HEAD, RPC]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go
new file mode 100644
index 00000000000..c378b9384e4
--- /dev/null
+++ b/common/client/mock_rpc_test.go
@@ -0,0 +1,608 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ big "math/big"
+
+ assets "github.com/smartcontractkit/chainlink/v2/core/assets"
+
+ context "context"
+
+ feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
+
+ mock "github.com/stretchr/testify/mock"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+// mockRPC is an autogenerated mock type for the RPC type
+type mockRPC[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH]] struct {
+ mock.Mock
+}
+
+// BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BalanceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (*big.Int, error) {
+ ret := _m.Called(ctx, accountAddress, blockNumber)
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) (*big.Int, error)); ok {
+ return rf(ctx, accountAddress, blockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) *big.Int); ok {
+ r0 = rf(ctx, accountAddress, blockNumber)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok {
+ r1 = rf(ctx, accountAddress, blockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// BatchCallContext provides a mock function with given fields: ctx, b
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BatchCallContext(ctx context.Context, b []interface{}) error {
+ ret := _m.Called(ctx, b)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, []interface{}) error); ok {
+ r0 = rf(ctx, b)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// BlockByHash provides a mock function with given fields: ctx, hash
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BlockByHash(ctx context.Context, hash BLOCK_HASH) (HEAD, error) {
+ ret := _m.Called(ctx, hash)
+
+ var r0 HEAD
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, BLOCK_HASH) (HEAD, error)); ok {
+ return rf(ctx, hash)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, BLOCK_HASH) HEAD); ok {
+ r0 = rf(ctx, hash)
+ } else {
+ r0 = ret.Get(0).(HEAD)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, BLOCK_HASH) error); ok {
+ r1 = rf(ctx, hash)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// BlockByNumber provides a mock function with given fields: ctx, number
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BlockByNumber(ctx context.Context, number *big.Int) (HEAD, error) {
+ ret := _m.Called(ctx, number)
+
+ var r0 HEAD
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (HEAD, error)); ok {
+ return rf(ctx, number)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *big.Int) HEAD); ok {
+ r0 = rf(ctx, number)
+ } else {
+ r0 = ret.Get(0).(HEAD)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok {
+ r1 = rf(ctx, number)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CallContext provides a mock function with given fields: ctx, result, method, args
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
+ var _ca []interface{}
+ _ca = append(_ca, ctx, result, method)
+ _ca = append(_ca, args...)
+ ret := _m.Called(_ca...)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok {
+ r0 = rf(ctx, result, method, args...)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// CallContract provides a mock function with given fields: ctx, msg, blockNumber
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) {
+ ret := _m.Called(ctx, msg, blockNumber)
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) ([]byte, error)); ok {
+ return rf(ctx, msg, blockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) []byte); ok {
+ r0 = rf(ctx, msg, blockNumber)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, interface{}, *big.Int) error); ok {
+ r1 = rf(ctx, msg, blockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ChainID provides a mock function with given fields: ctx
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) {
+ ret := _m.Called(ctx)
+
+ var r0 CHAIN_ID
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok {
+ return rf(ctx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(CHAIN_ID)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(ctx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ClientVersion provides a mock function with given fields: _a0
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) ClientVersion(_a0 context.Context) (string, error) {
+ ret := _m.Called(_a0)
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) string); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Close provides a mock function with given fields:
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Close() {
+ _m.Called()
+}
+
+// CodeAt provides a mock function with given fields: ctx, account, blockNumber
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) ([]byte, error) {
+ ret := _m.Called(ctx, account, blockNumber)
+
+ var r0 []byte
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) ([]byte, error)); ok {
+ return rf(ctx, account, blockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) []byte); ok {
+ r0 = rf(ctx, account, blockNumber)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok {
+ r1 = rf(ctx, account, blockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Dial provides a mock function with given fields: ctx
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Dial(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DialHTTP provides a mock function with given fields:
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) DialHTTP() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DisconnectAll provides a mock function with given fields:
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) DisconnectAll() {
+ _m.Called()
+}
+
+// EstimateGas provides a mock function with given fields: ctx, call
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) EstimateGas(ctx context.Context, call interface{}) (uint64, error) {
+ ret := _m.Called(ctx, call)
+
+ var r0 uint64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, interface{}) (uint64, error)); ok {
+ return rf(ctx, call)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, interface{}) uint64); ok {
+ r0 = rf(ctx, call)
+ } else {
+ r0 = ret.Get(0).(uint64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok {
+ r1 = rf(ctx, call)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FilterEvents provides a mock function with given fields: ctx, query
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) FilterEvents(ctx context.Context, query EVENT_OPS) ([]EVENT, error) {
+ ret := _m.Called(ctx, query)
+
+ var r0 []EVENT
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, EVENT_OPS) ([]EVENT, error)); ok {
+ return rf(ctx, query)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, EVENT_OPS) []EVENT); ok {
+ r0 = rf(ctx, query)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]EVENT)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, EVENT_OPS) error); ok {
+ r1 = rf(ctx, query)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// LINKBalance provides a mock function with given fields: ctx, accountAddress, linkAddress
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) LINKBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (*assets.Link, error) {
+ ret := _m.Called(ctx, accountAddress, linkAddress)
+
+ var r0 *assets.Link
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) (*assets.Link, error)); ok {
+ return rf(ctx, accountAddress, linkAddress)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) *assets.Link); ok {
+ r0 = rf(ctx, accountAddress, linkAddress)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*assets.Link)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR, ADDR) error); ok {
+ r1 = rf(ctx, accountAddress, linkAddress)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// LatestBlockHeight provides a mock function with given fields: _a0
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) LatestBlockHeight(_a0 context.Context) (*big.Int, error) {
+ ret := _m.Called(_a0)
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok {
+ r0 = rf(_a0)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// PendingSequenceAt provides a mock function with given fields: ctx, addr
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) PendingSequenceAt(ctx context.Context, addr ADDR) (SEQ, error) {
+ ret := _m.Called(ctx, addr)
+
+ var r0 SEQ
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR) (SEQ, error)); ok {
+ return rf(ctx, addr)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR) SEQ); ok {
+ r0 = rf(ctx, addr)
+ } else {
+ r0 = ret.Get(0).(SEQ)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR) error); ok {
+ r1 = rf(ctx, addr)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SendEmptyTransaction(ctx context.Context, newTxAttempt func(SEQ, uint32, FEE, ADDR) (interface{}, error), seq SEQ, gasLimit uint32, fee FEE, fromAddress ADDR) (string, error) {
+ ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress)
+
+ var r0 string
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) (string, error)); ok {
+ return rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) string); ok {
+ r0 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) error); ok {
+ r1 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SendTransaction provides a mock function with given fields: ctx, tx
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SendTransaction(ctx context.Context, tx TX) error {
+ ret := _m.Called(ctx, tx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, TX) error); ok {
+ r0 = rf(ctx, tx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SequenceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (SEQ, error) {
+ ret := _m.Called(ctx, accountAddress, blockNumber)
+
+ var r0 SEQ
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) (SEQ, error)); ok {
+ return rf(ctx, accountAddress, blockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) SEQ); ok {
+ r0 = rf(ctx, accountAddress, blockNumber)
+ } else {
+ r0 = ret.Get(0).(SEQ)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok {
+ r1 = rf(ctx, accountAddress, blockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SetAliveLoopSub provides a mock function with given fields: _a0
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SetAliveLoopSub(_a0 types.Subscription) {
+ _m.Called(_a0)
+}
+
+// SimulateTransaction provides a mock function with given fields: ctx, tx
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SimulateTransaction(ctx context.Context, tx TX) error {
+ ret := _m.Called(ctx, tx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, TX) error); ok {
+ r0 = rf(ctx, tx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Subscribe provides a mock function with given fields: ctx, channel, args
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) {
+ var _ca []interface{}
+ _ca = append(_ca, ctx, channel)
+ _ca = append(_ca, args...)
+ ret := _m.Called(_ca...)
+
+ var r0 types.Subscription
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok {
+ return rf(ctx, channel, args...)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok {
+ r0 = rf(ctx, channel, args...)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(types.Subscription)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok {
+ r1 = rf(ctx, channel, args...)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SubscribersCount provides a mock function with given fields:
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SubscribersCount() int32 {
+ ret := _m.Called()
+
+ var r0 int32
+ if rf, ok := ret.Get(0).(func() int32); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(int32)
+ }
+
+ return r0
+}
+
+// TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TokenBalance(ctx context.Context, accountAddress ADDR, tokenAddress ADDR) (*big.Int, error) {
+ ret := _m.Called(ctx, accountAddress, tokenAddress)
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) (*big.Int, error)); ok {
+ return rf(ctx, accountAddress, tokenAddress)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) *big.Int); ok {
+ r0 = rf(ctx, accountAddress, tokenAddress)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, ADDR, ADDR) error); ok {
+ r1 = rf(ctx, accountAddress, tokenAddress)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// TransactionByHash provides a mock function with given fields: ctx, txHash
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TransactionByHash(ctx context.Context, txHash TX_HASH) (TX, error) {
+ ret := _m.Called(ctx, txHash)
+
+ var r0 TX
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) (TX, error)); ok {
+ return rf(ctx, txHash)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) TX); ok {
+ r0 = rf(ctx, txHash)
+ } else {
+ r0 = ret.Get(0).(TX)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, TX_HASH) error); ok {
+ r1 = rf(ctx, txHash)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// TransactionReceipt provides a mock function with given fields: ctx, txHash
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TransactionReceipt(ctx context.Context, txHash TX_HASH) (TX_RECEIPT, error) {
+ ret := _m.Called(ctx, txHash)
+
+ var r0 TX_RECEIPT
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) (TX_RECEIPT, error)); ok {
+ return rf(ctx, txHash)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) TX_RECEIPT); ok {
+ r0 = rf(ctx, txHash)
+ } else {
+ r0 = ret.Get(0).(TX_RECEIPT)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, TX_HASH) error); ok {
+ r1 = rf(ctx, txHash)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// UnsubscribeAllExceptAliveLoop provides a mock function with given fields:
+func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) UnsubscribeAllExceptAliveLoop() {
+ _m.Called()
+}
+
+// newMockRPC creates a new instance of mockRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockRPC[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH]](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD] {
+ mock := &mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_send_only_client_test.go b/common/client/mock_send_only_client_test.go
new file mode 100644
index 00000000000..481b2602ea3
--- /dev/null
+++ b/common/client/mock_send_only_client_test.go
@@ -0,0 +1,72 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ context "context"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// mockSendOnlyClient is an autogenerated mock type for the sendOnlyClient type
+type mockSendOnlyClient[CHAIN_ID types.ID] struct {
+ mock.Mock
+}
+
+// ChainID provides a mock function with given fields: _a0
+func (_m *mockSendOnlyClient[CHAIN_ID]) ChainID(_a0 context.Context) (CHAIN_ID, error) {
+ ret := _m.Called(_a0)
+
+ var r0 CHAIN_ID
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok {
+ return rf(_a0)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Get(0).(CHAIN_ID)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context) error); ok {
+ r1 = rf(_a0)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Close provides a mock function with given fields:
+func (_m *mockSendOnlyClient[CHAIN_ID]) Close() {
+ _m.Called()
+}
+
+// DialHTTP provides a mock function with given fields:
+func (_m *mockSendOnlyClient[CHAIN_ID]) DialHTTP() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// newMockSendOnlyClient creates a new instance of mockSendOnlyClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockSendOnlyClient[CHAIN_ID types.ID](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockSendOnlyClient[CHAIN_ID] {
+ mock := &mockSendOnlyClient[CHAIN_ID]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/client/mock_send_only_node_test.go b/common/client/mock_send_only_node_test.go
new file mode 100644
index 00000000000..524d7d8a6c5
--- /dev/null
+++ b/common/client/mock_send_only_node_test.go
@@ -0,0 +1,127 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package client
+
+import (
+ context "context"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// mockSendOnlyNode is an autogenerated mock type for the SendOnlyNode type
+type mockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]] struct {
+ mock.Mock
+}
+
+// Close provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Close() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// ConfiguredChainID provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID {
+ ret := _m.Called()
+
+ var r0 CHAIN_ID
+ if rf, ok := ret.Get(0).(func() CHAIN_ID); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(CHAIN_ID)
+ }
+
+ return r0
+}
+
+// Name provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Name() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// RPC provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) RPC() RPC {
+ ret := _m.Called()
+
+ var r0 RPC
+ if rf, ok := ret.Get(0).(func() RPC); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(RPC)
+ }
+
+ return r0
+}
+
+// Start provides a mock function with given fields: _a0
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error {
+ ret := _m.Called(_a0)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(_a0)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// State provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() nodeState {
+ ret := _m.Called()
+
+ var r0 nodeState
+ if rf, ok := ret.Get(0).(func() nodeState); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(nodeState)
+ }
+
+ return r0
+}
+
+// String provides a mock function with given fields:
+func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) String() string {
+ ret := _m.Called()
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// newMockSendOnlyNode creates a new instance of mockSendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func newMockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]](t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *mockSendOnlyNode[CHAIN_ID, RPC] {
+ mock := &mockSendOnlyNode[CHAIN_ID, RPC]{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/common/chains/client/models.go b/common/client/models.go
similarity index 84%
rename from common/chains/client/models.go
rename to common/client/models.go
index ebe7bb7576d..bd974f901fc 100644
--- a/common/chains/client/models.go
+++ b/common/client/models.go
@@ -1,5 +1,9 @@
package client
+import (
+ "fmt"
+)
+
type SendTxReturnCode int
// SendTxReturnCode is a generalized client error that dictates what should be the next action, depending on the RPC error response.
@@ -15,3 +19,21 @@ const (
ExceedsMaxFee // Attempt's fee was higher than the node's limit and got rejected.
FeeOutOfValidRange // This error is returned when we use a fee price suggested from an RPC, but the network rejects the attempt due to an invalid range(mostly used by L2 chains). Retry by requesting a new suggested fee price.
)
+
+type NodeTier int
+
+const (
+ Primary = NodeTier(iota)
+ Secondary
+)
+
+func (n NodeTier) String() string {
+ switch n {
+ case Primary:
+ return "primary"
+ case Secondary:
+ return "secondary"
+ default:
+ return fmt.Sprintf("NodeTier(%d)", n)
+ }
+}
diff --git a/common/client/multi_node.go b/common/client/multi_node.go
new file mode 100644
index 00000000000..c268cfb23cd
--- /dev/null
+++ b/common/client/multi_node.go
@@ -0,0 +1,638 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/services"
+
+ feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+var (
+ // PromMultiNodeRPCNodeStates reports current RPC node state
+ PromMultiNodeRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "multi_node_states",
+ Help: "The number of RPC nodes currently in the given state for the given chain",
+ }, []string{"network", "chainId", "state"})
+ ErroringNodeError = fmt.Errorf("no live nodes available")
+)
+
+// MultiNode is a generalized multi node client interface that includes methods to interact with different chains.
+// It also handles multiple node RPC connections simultaneously.
+type MultiNode[
+ CHAIN_ID types.ID,
+ SEQ types.Sequence,
+ ADDR types.Hashable,
+ BLOCK_HASH types.Hashable,
+ TX any,
+ TX_HASH types.Hashable,
+ EVENT any,
+ EVENT_OPS any,
+ TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH],
+ FEE feetypes.Fee,
+ HEAD types.Head[BLOCK_HASH],
+ RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD],
+] interface {
+ clientAPI[
+ CHAIN_ID,
+ SEQ,
+ ADDR,
+ BLOCK_HASH,
+ TX,
+ TX_HASH,
+ EVENT,
+ EVENT_OPS,
+ TX_RECEIPT,
+ FEE,
+ HEAD,
+ ]
+ Close() error
+ NodeStates() map[string]string
+ SelectNodeRPC() (RPC_CLIENT, error)
+
+ BatchCallContextAll(ctx context.Context, b []any) error
+ ConfiguredChainID() CHAIN_ID
+ IsL2() bool
+}
+
+type multiNode[
+ CHAIN_ID types.ID,
+ SEQ types.Sequence,
+ ADDR types.Hashable,
+ BLOCK_HASH types.Hashable,
+ TX any,
+ TX_HASH types.Hashable,
+ EVENT any,
+ EVENT_OPS any,
+ TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH],
+ FEE feetypes.Fee,
+ HEAD types.Head[BLOCK_HASH],
+ RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD],
+] struct {
+ services.StateMachine
+ nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT]
+ sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT]
+ chainID CHAIN_ID
+ chainType config.ChainType
+ logger logger.Logger
+ selectionMode string
+ noNewHeadsThreshold time.Duration
+ nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT]
+ leaseDuration time.Duration
+ leaseTicker *time.Ticker
+ chainFamily string
+ reportInterval time.Duration
+
+ activeMu sync.RWMutex
+ activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT]
+
+ chStop utils.StopChan
+ wg sync.WaitGroup
+
+ sendOnlyErrorParser func(err error) SendTxReturnCode
+}
+
+func NewMultiNode[
+ CHAIN_ID types.ID,
+ SEQ types.Sequence,
+ ADDR types.Hashable,
+ BLOCK_HASH types.Hashable,
+ TX any,
+ TX_HASH types.Hashable,
+ EVENT any,
+ EVENT_OPS any,
+ TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH],
+ FEE feetypes.Fee,
+ HEAD types.Head[BLOCK_HASH],
+ RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD],
+](
+ logger logger.Logger,
+ selectionMode string,
+ leaseDuration time.Duration,
+ noNewHeadsThreshold time.Duration,
+ nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT],
+ sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT],
+ chainID CHAIN_ID,
+ chainType config.ChainType,
+ chainFamily string,
+ sendOnlyErrorParser func(err error) SendTxReturnCode,
+) MultiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT] {
+ nodeSelector := newNodeSelector(selectionMode, nodes)
+
+ lggr := logger.Named("MultiNode").With("chainID", chainID.String())
+
+ // Prometheus' default interval is 15s, set this to under 7.5s to avoid
+ // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency)
+ const reportInterval = 6500 * time.Millisecond
+ c := &multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]{
+ nodes: nodes,
+ sendonlys: sendonlys,
+ chainID: chainID,
+ chainType: chainType,
+ logger: lggr,
+ selectionMode: selectionMode,
+ noNewHeadsThreshold: noNewHeadsThreshold,
+ nodeSelector: nodeSelector,
+ chStop: make(chan struct{}),
+ leaseDuration: leaseDuration,
+ chainFamily: chainFamily,
+ sendOnlyErrorParser: sendOnlyErrorParser,
+ reportInterval: reportInterval,
+ }
+
+ c.logger.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode)
+
+ return c
+}
+
+// Dial starts every node in the pool
+//
+// Nodes handle their own redialing and runloops, so this function does not
+// return any error if the nodes aren't available
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Dial(ctx context.Context) error {
+ return c.StartOnce("MultiNode", func() (merr error) {
+ if len(c.nodes) == 0 {
+ return errors.Errorf("no available nodes for chain %s", c.chainID.String())
+ }
+ var ms services.MultiStart
+ for _, n := range c.nodes {
+ if n.ConfiguredChainID().String() != c.chainID.String() {
+ return ms.CloseBecause(errors.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String()))
+ }
+ rawNode, ok := n.(*node[CHAIN_ID, HEAD, RPC_CLIENT])
+ if ok {
+ // This is a bit hacky but it allows the node to be aware of
+ // pool state and prevent certain state transitions that might
+ // otherwise leave no nodes available. It is better to have one
+ // node in a degraded state than no nodes at all.
+ rawNode.nLiveNodes = c.nLiveNodes
+ }
+ // node will handle its own redialing and automatic recovery
+ if err := ms.Start(ctx, n); err != nil {
+ return err
+ }
+ }
+ for _, s := range c.sendonlys {
+ if s.ConfiguredChainID().String() != c.chainID.String() {
+ return ms.CloseBecause(errors.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String()))
+ }
+ if err := ms.Start(ctx, s); err != nil {
+ return err
+ }
+ }
+ c.wg.Add(1)
+ go c.runLoop()
+
+ if c.leaseDuration.Seconds() > 0 && c.selectionMode != NodeSelectionModeRoundRobin {
+ c.logger.Infof("The MultiNode will switch to best node every %s", c.leaseDuration.String())
+ c.wg.Add(1)
+ go c.checkLeaseLoop()
+ } else {
+ c.logger.Info("Best node switching is disabled")
+ }
+
+ return nil
+ })
+}
+
+// Close tears down the MultiNode and closes all nodes
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Close() error {
+ return c.StopOnce("MultiNode", func() error {
+ close(c.chStop)
+ c.wg.Wait()
+
+ return services.CloseAll(services.MultiCloser(c.nodes), services.MultiCloser(c.sendonlys))
+ })
+}
+
+// SelectNodeRPC returns an RPC of an active node. If there are no active nodes it returns an error.
+// Call this method from your chain-specific client implementation to access any chain-specific rpc calls.
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SelectNodeRPC() (rpc RPC_CLIENT, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return rpc, err
+ }
+ return n.RPC(), nil
+
+}
+
+// selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector.
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) {
+ c.activeMu.RLock()
+ node = c.activeNode
+ c.activeMu.RUnlock()
+ if node != nil && node.State() == nodeStateAlive {
+ return // still alive
+ }
+
+ // select a new one
+ c.activeMu.Lock()
+ defer c.activeMu.Unlock()
+ node = c.activeNode
+ if node != nil && node.State() == nodeStateAlive {
+ return // another goroutine beat us here
+ }
+
+ c.activeNode = c.nodeSelector.Select()
+
+ if c.activeNode == nil {
+ c.logger.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name())
+ errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String())
+ c.SvcErrBuffer.Append(errmsg)
+ err = ErroringNodeError
+ }
+
+ return c.activeNode, err
+}
+
+// nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty.
+// totalDifficulty will be 0 if all nodes return nil.
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *utils.Big) {
+ totalDifficulty = utils.NewBigI(0)
+ for _, n := range c.nodes {
+ if s, num, td := n.StateAndLatest(); s == nodeStateAlive {
+ nLiveNodes++
+ if num > blockNumber {
+ blockNumber = num
+ }
+ if td != nil && td.Cmp(totalDifficulty) > 0 {
+ totalDifficulty = td
+ }
+ }
+ }
+ return
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) checkLease() {
+ bestNode := c.nodeSelector.Select()
+ for _, n := range c.nodes {
+ // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new
+ // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription
+ if n.State() == nodeStateAlive && n != bestNode && n.SubscribersCount() > 1 {
+ c.logger.Infof("Switching to best node from %q to %q", n.String(), bestNode.String())
+ n.UnsubscribeAllExceptAliveLoop()
+ }
+ }
+
+ c.activeMu.Lock()
+ if bestNode != c.activeNode {
+ c.activeNode = bestNode
+ }
+ c.activeMu.Unlock()
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) checkLeaseLoop() {
+ defer c.wg.Done()
+ c.leaseTicker = time.NewTicker(c.leaseDuration)
+ defer c.leaseTicker.Stop()
+
+ for {
+ select {
+ case <-c.leaseTicker.C:
+ c.checkLease()
+ case <-c.chStop:
+ return
+ }
+ }
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) runLoop() {
+ defer c.wg.Done()
+
+ c.report()
+
+ monitor := time.NewTicker(utils.WithJitter(c.reportInterval))
+ defer monitor.Stop()
+
+ for {
+ select {
+ case <-monitor.C:
+ c.report()
+ case <-c.chStop:
+ return
+ }
+ }
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) report() {
+ type nodeWithState struct {
+ Node string
+ State string
+ }
+
+ var total, dead int
+ counts := make(map[nodeState]int)
+ nodeStates := make([]nodeWithState, len(c.nodes))
+ for i, n := range c.nodes {
+ state := n.State()
+ nodeStates[i] = nodeWithState{n.String(), state.String()}
+ total++
+ if state != nodeStateAlive {
+ dead++
+ }
+ counts[state]++
+ }
+ for _, state := range allNodeStates {
+ count := counts[state]
+ PromMultiNodeRPCNodeStates.WithLabelValues(c.chainFamily, c.chainID.String(), state.String()).Set(float64(count))
+ }
+
+ live := total - dead
+ c.logger.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates)
+ if total == dead {
+ rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total)
+ c.logger.Criticalw(rerr.Error(), "nodeStates", nodeStates)
+ c.SvcErrBuffer.Append(rerr)
+ } else if dead > 0 {
+ c.logger.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates)
+ }
+}
+
+// ClientAPI methods
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BalanceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (*big.Int, error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return nil, err
+ }
+ return n.RPC().BalanceAt(ctx, account, blockNumber)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BatchCallContext(ctx context.Context, b []any) error {
+ n, err := c.selectNode()
+ if err != nil {
+ return err
+ }
+ return n.RPC().BatchCallContext(ctx, b)
+}
+
+// BatchCallContextAll calls BatchCallContext for every single node including
+// sendonlys.
+// CAUTION: This should only be used for mass re-transmitting transactions, it
+// might have unexpected effects to use it for anything else.
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BatchCallContextAll(ctx context.Context, b []any) error {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ main, selectionErr := c.selectNode()
+ var all []SendOnlyNode[CHAIN_ID, RPC_CLIENT]
+ for _, n := range c.nodes {
+ all = append(all, n)
+ }
+ all = append(all, c.sendonlys...)
+ for _, n := range all {
+ if n == main {
+ // main node is used at the end for the return value
+ continue
+ }
+ // Parallel call made to all other nodes with ignored return value
+ wg.Add(1)
+ go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) {
+ defer wg.Done()
+ err := n.RPC().BatchCallContext(ctx, b)
+ if err != nil {
+ c.logger.Debugw("Secondary node BatchCallContext failed", "err", err)
+ } else {
+ c.logger.Trace("Secondary node BatchCallContext success")
+ }
+ }(n)
+ }
+
+ if selectionErr != nil {
+ return selectionErr
+ }
+ return main.RPC().BatchCallContext(ctx, b)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BlockByHash(ctx context.Context, hash BLOCK_HASH) (h HEAD, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return h, err
+ }
+ return n.RPC().BlockByHash(ctx, hash)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BlockByNumber(ctx context.Context, number *big.Int) (h HEAD, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return h, err
+ }
+ return n.RPC().BlockByNumber(ctx, number)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
+ n, err := c.selectNode()
+ if err != nil {
+ return err
+ }
+ return n.RPC().CallContext(ctx, result, method, args...)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CallContract(
+ ctx context.Context,
+ attempt interface{},
+ blockNumber *big.Int,
+) (rpcErr []byte, extractErr error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return rpcErr, err
+ }
+ return n.RPC().CallContract(ctx, attempt, blockNumber)
+}
+
+// ChainID makes a direct RPC call. In most cases it should be better to use the configured chain id instead by
+// calling ConfiguredChainID.
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ChainID(ctx context.Context) (id CHAIN_ID, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return id, err
+ }
+ return n.RPC().ChainID(ctx)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ChainType() config.ChainType {
+ return c.chainType
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) (code []byte, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return code, err
+ }
+ return n.RPC().CodeAt(ctx, account, blockNumber)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID {
+ return c.chainID
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) EstimateGas(ctx context.Context, call any) (gas uint64, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return gas, err
+ }
+ return n.RPC().EstimateGas(ctx, call)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) FilterEvents(ctx context.Context, query EVENT_OPS) (e []EVENT, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return e, err
+ }
+ return n.RPC().FilterEvents(ctx, query)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) IsL2() bool {
+ return c.ChainType().IsL2()
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) LatestBlockHeight(ctx context.Context) (h *big.Int, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return h, err
+ }
+ return n.RPC().LatestBlockHeight(ctx)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) LINKBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (b *assets.Link, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return b, err
+ }
+ return n.RPC().LINKBalance(ctx, accountAddress, linkAddress)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) NodeStates() (states map[string]string) {
+ states = make(map[string]string)
+ for _, n := range c.nodes {
+ states[n.Name()] = n.State().String()
+ }
+ for _, s := range c.sendonlys {
+ states[s.Name()] = s.State().String()
+ }
+ return
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) PendingSequenceAt(ctx context.Context, addr ADDR) (s SEQ, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return s, err
+ }
+ return n.RPC().PendingSequenceAt(ctx, addr)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SendEmptyTransaction(
+ ctx context.Context,
+ newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error),
+ seq SEQ,
+ gasLimit uint32,
+ fee FEE,
+ fromAddress ADDR,
+) (txhash string, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return txhash, err
+ }
+ return n.RPC().SendEmptyTransaction(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SendTransaction(ctx context.Context, tx TX) error {
+ main, nodeError := c.selectNode()
+ var all []SendOnlyNode[CHAIN_ID, RPC_CLIENT]
+ for _, n := range c.nodes {
+ all = append(all, n)
+ }
+ all = append(all, c.sendonlys...)
+ for _, n := range all {
+ if n == main {
+ // main node is used at the end for the return value
+ continue
+ }
+ // Parallel send to all other nodes with ignored return value
+ // Async - we do not want to block the main thread with secondary nodes
+ // in case they are unreliable/slow.
+ // It is purely a "best effort" send.
+ // Resource is not unbounded because the default context has a timeout.
+ ok := c.IfNotStopped(func() {
+ // Must wrap inside IfNotStopped to avoid waitgroup racing with Close
+ c.wg.Add(1)
+ go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) {
+ defer c.wg.Done()
+
+ txErr := n.RPC().SendTransaction(ctx, tx)
+ c.logger.Debugw("Sendonly node sent transaction", "name", n.String(), "tx", tx, "err", txErr)
+ sendOnlyError := c.sendOnlyErrorParser(txErr)
+ if sendOnlyError != Successful {
+ c.logger.Warnw("RPC returned error", "name", n.String(), "tx", tx, "err", txErr)
+ }
+ }(n)
+ })
+ if !ok {
+ c.logger.Debug("Cannot send transaction on sendonly node; MultiNode is stopped", "node", n.String())
+ }
+ }
+ if nodeError != nil {
+ return nodeError
+ }
+ return main.RPC().SendTransaction(ctx, tx)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SequenceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (s SEQ, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return s, err
+ }
+ return n.RPC().SequenceAt(ctx, account, blockNumber)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SimulateTransaction(ctx context.Context, tx TX) error {
+ n, err := c.selectNode()
+ if err != nil {
+ return err
+ }
+ return n.RPC().SimulateTransaction(ctx, tx)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (s types.Subscription, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return s, err
+ }
+ return n.RPC().Subscribe(ctx, channel, args...)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return b, err
+ }
+ return n.RPC().TokenBalance(ctx, account, tokenAddr)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TransactionByHash(ctx context.Context, txHash TX_HASH) (tx TX, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return tx, err
+ }
+ return n.RPC().TransactionByHash(ctx, txHash)
+}
+
+func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TransactionReceipt(ctx context.Context, txHash TX_HASH) (txr TX_RECEIPT, err error) {
+ n, err := c.selectNode()
+ if err != nil {
+ return txr, err
+ }
+ return n.RPC().TransactionReceipt(ctx, txHash)
+}
diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go
new file mode 100644
index 00000000000..1fddbc3be3c
--- /dev/null
+++ b/common/client/multi_node_test.go
@@ -0,0 +1,635 @@
+package client
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+type multiNodeRPCClient RPC[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]]
+
+type testMultiNode struct {
+ *multiNode[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient]
+}
+
+type multiNodeOpts struct {
+ logger logger.Logger
+ selectionMode string
+ leaseDuration time.Duration
+ noNewHeadsThreshold time.Duration
+ nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]
+ sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient]
+ chainID types.ID
+ chainType config.ChainType
+ chainFamily string
+ sendOnlyErrorParser func(err error) SendTxReturnCode
+}
+
+func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode {
+ if opts.logger == nil {
+ opts.logger = logger.TestLogger(t)
+ }
+
+ result := NewMultiNode[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient](opts.logger,
+ opts.selectionMode, opts.leaseDuration, opts.noNewHeadsThreshold, opts.nodes, opts.sendonlys,
+ opts.chainID, opts.chainType, opts.chainFamily, opts.sendOnlyErrorParser)
+ return testMultiNode{
+ result.(*multiNode[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient]),
+ }
+}
+
+func newMultiNodeRPCClient(t *testing.T) *mockRPC[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]] {
+ return newMockRPC[types.ID, *utils.Big, Hashable, Hashable, any, Hashable, any, any,
+ types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]](t)
+}
+
+func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] {
+ return newNodeWithState(t, chainID, nodeStateAlive)
+}
+
+func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] {
+ node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node.On("ConfiguredChainID").Return(chainID).Once()
+ node.On("Start", mock.Anything).Return(nil).Once()
+ node.On("Close").Return(nil).Once()
+ node.On("State").Return(state).Maybe()
+ node.On("String").Return(fmt.Sprintf("healthy_node_%d", rand.Int())).Maybe()
+ return node
+}
+func TestMultiNode_Dial(t *testing.T) {
+ t.Parallel()
+
+ newMockNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient]
+ newMockSendOnlyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient]
+
+ t.Run("Fails without nodes", func(t *testing.T) {
+ t.Parallel()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: types.RandomID(),
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, fmt.Sprintf("no available nodes for chain %s", mn.chainID.String()))
+ })
+ t.Run("Fails with wrong node's chainID", func(t *testing.T) {
+ t.Parallel()
+ node := newMockNode(t)
+ multiNodeChainID := types.NewIDFromInt(10)
+ nodeChainID := types.NewIDFromInt(11)
+ node.On("ConfiguredChainID").Return(nodeChainID).Twice()
+ const nodeName = "nodeName"
+ node.On("String").Return(nodeName).Once()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: multiNodeChainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID))
+ })
+ t.Run("Fails if node fails", func(t *testing.T) {
+ t.Parallel()
+ node := newMockNode(t)
+ chainID := types.RandomID()
+ node.On("ConfiguredChainID").Return(chainID).Once()
+ expectedError := errors.New("failed to start node")
+ node.On("Start", mock.Anything).Return(expectedError).Once()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, expectedError.Error())
+ })
+
+ t.Run("Closes started nodes on failure", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node1 := newHealthyNode(t, chainID)
+ node2 := newMockNode(t)
+ node2.On("ConfiguredChainID").Return(chainID).Once()
+ expectedError := errors.New("failed to start node")
+ node2.On("Start", mock.Anything).Return(expectedError).Once()
+
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2},
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, expectedError.Error())
+ })
+ t.Run("Fails with wrong send only node's chainID", func(t *testing.T) {
+ t.Parallel()
+ multiNodeChainID := types.NewIDFromInt(10)
+ node := newHealthyNode(t, multiNodeChainID)
+ sendOnly := newMockSendOnlyNode(t)
+ sendOnlyChainID := types.NewIDFromInt(11)
+ sendOnly.On("ConfiguredChainID").Return(sendOnlyChainID).Twice()
+ const sendOnlyName = "sendOnlyNodeName"
+ sendOnly.On("String").Return(sendOnlyName).Once()
+
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: multiNodeChainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly},
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, fmt.Sprintf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", sendOnlyName, sendOnlyChainID, mn.chainID))
+ })
+
+ newHealthySendOnly := func(t *testing.T, chainID types.ID) *mockSendOnlyNode[types.ID, multiNodeRPCClient] {
+ node := newMockSendOnlyNode(t)
+ node.On("ConfiguredChainID").Return(chainID).Once()
+ node.On("Start", mock.Anything).Return(nil).Once()
+ node.On("Close").Return(nil).Once()
+ return node
+ }
+ t.Run("Fails on send only node failure", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.NewIDFromInt(10)
+ node := newHealthyNode(t, chainID)
+ sendOnly1 := newHealthySendOnly(t, chainID)
+ sendOnly2 := newMockSendOnlyNode(t)
+ sendOnly2.On("ConfiguredChainID").Return(chainID).Once()
+ expectedError := errors.New("failed to start send only node")
+ sendOnly2.On("Start", mock.Anything).Return(expectedError).Once()
+
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2},
+ })
+ err := mn.Dial(tests.Context(t))
+ assert.EqualError(t, err, expectedError.Error())
+ })
+ t.Run("Starts successfully with healthy nodes", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.NewIDFromInt(10)
+ node := newHealthyNode(t, chainID)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)},
+ })
+ defer func() { assert.NoError(t, mn.Close()) }()
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ selectedNode, err := mn.selectNode()
+ require.NoError(t, err)
+ assert.Equal(t, node, selectedNode)
+ })
+}
+
+func TestMultiNode_Report(t *testing.T) {
+ t.Parallel()
+ t.Run("Dial starts periodical reporting", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node1 := newHealthyNode(t, chainID)
+ node2 := newNodeWithState(t, chainID, nodeStateOutOfSync)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2},
+ logger: lggr,
+ })
+ mn.reportInterval = tests.TestInterval
+ defer func() { assert.NoError(t, mn.Close()) }()
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ tests.AssertLogCountEventually(t, observedLogs, "At least one primary node is dead: 1/2 nodes are alive", 2)
+ })
+ t.Run("Report critical error on all node failure", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node := newNodeWithState(t, chainID, nodeStateOutOfSync)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ logger: lggr,
+ })
+ mn.reportInterval = tests.TestInterval
+ defer func() { assert.NoError(t, mn.Close()) }()
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ tests.AssertLogCountEventually(t, observedLogs, "no primary nodes available: 0/1 nodes are alive", 2)
+ err = mn.Healthy()
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "no primary nodes available: 0/1 nodes are alive")
+ })
+}
+
+func TestMultiNode_CheckLease(t *testing.T) {
+ t.Parallel()
+ t.Run("Round robin disables lease check", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node := newHealthyNode(t, chainID)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.InfoLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ logger: lggr,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ })
+ defer func() { assert.NoError(t, mn.Close()) }()
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled")
+ })
+ t.Run("Misconfigured lease check period won't start", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node := newHealthyNode(t, chainID)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.InfoLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeHighestHead,
+ chainID: chainID,
+ logger: lggr,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node},
+ leaseDuration: 0,
+ })
+ defer func() { assert.NoError(t, mn.Close()) }()
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled")
+ })
+ t.Run("Lease check updates active node", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node := newHealthyNode(t, chainID)
+ node.On("SubscribersCount").Return(int32(2))
+ node.On("UnsubscribeAllExceptAliveLoop")
+ bestNode := newHealthyNode(t, chainID)
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(bestNode)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.InfoLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeHighestHead,
+ chainID: chainID,
+ logger: lggr,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node, bestNode},
+ leaseDuration: tests.TestInterval,
+ })
+ defer func() { assert.NoError(t, mn.Close()) }()
+ mn.nodeSelector = nodeSelector
+ err := mn.Dial(tests.Context(t))
+ require.NoError(t, err)
+ tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("Switching to best node from %q to %q", node.String(), bestNode.String()))
+ tests.AssertEventually(t, func() bool {
+ mn.activeMu.RLock()
+ active := mn.activeNode
+ mn.activeMu.RUnlock()
+ return bestNode == active
+ })
+ })
+ t.Run("NodeStates returns proper states", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.NewIDFromInt(10)
+ nodes := map[string]nodeState{
+ "node_1": nodeStateAlive,
+ "node_2": nodeStateUnreachable,
+ "node_3": nodeStateDialed,
+ }
+
+ opts := multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ }
+
+ expectedResult := map[string]string{}
+ for name, state := range nodes {
+ node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node.On("Name").Return(name).Once()
+ node.On("State").Return(state).Once()
+ opts.nodes = append(opts.nodes, node)
+
+ sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t)
+ sendOnlyName := "send_only_" + name
+ sendOnly.On("Name").Return(sendOnlyName).Once()
+ sendOnly.On("State").Return(state).Once()
+ opts.sendonlys = append(opts.sendonlys, sendOnly)
+
+ expectedResult[name] = state.String()
+ expectedResult[sendOnlyName] = state.String()
+ }
+
+ mn := newTestMultiNode(t, opts)
+ states := mn.NodeStates()
+ assert.Equal(t, expectedResult, states)
+ })
+}
+
+func TestMultiNode_selectNode(t *testing.T) {
+ t.Parallel()
+ t.Run("Returns same node, if it's still healthy", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ node1 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node1.On("State").Return(nodeStateAlive).Once()
+ node1.On("String").Return("node1").Maybe()
+ node2 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node2.On("String").Return("node2").Maybe()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2},
+ })
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(node1).Once()
+ mn.nodeSelector = nodeSelector
+ prevActiveNode, err := mn.selectNode()
+ require.NoError(t, err)
+ require.Equal(t, node1.String(), prevActiveNode.String())
+ newActiveNode, err := mn.selectNode()
+ require.NoError(t, err)
+ require.Equal(t, prevActiveNode.String(), newActiveNode.String())
+
+ })
+ t.Run("Updates node if active is not healthy", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ oldBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ oldBest.On("String").Return("oldBest").Maybe()
+ newBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ newBest.On("String").Return("newBest").Maybe()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{oldBest, newBest},
+ })
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(oldBest).Once()
+ mn.nodeSelector = nodeSelector
+ activeNode, err := mn.selectNode()
+ require.NoError(t, err)
+ require.Equal(t, oldBest.String(), activeNode.String())
+ // old best died, so we should replace it
+ oldBest.On("State").Return(nodeStateOutOfSync).Twice()
+ nodeSelector.On("Select").Return(newBest).Once()
+ newActiveNode, err := mn.selectNode()
+ require.NoError(t, err)
+ require.Equal(t, newBest.String(), newActiveNode.String())
+
+ })
+ t.Run("No active nodes - reports critical error", func(t *testing.T) {
+ t.Parallel()
+ chainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.InfoLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ logger: lggr,
+ })
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(nil).Once()
+ nodeSelector.On("Name").Return("MockedNodeSelector").Once()
+ mn.nodeSelector = nodeSelector
+ node, err := mn.selectNode()
+ require.EqualError(t, err, ErroringNodeError.Error())
+ require.Nil(t, node)
+ tests.RequireLogMessage(t, observedLogs, "No live RPC nodes available")
+
+ })
+}
+
+func TestMultiNode_nLiveNodes(t *testing.T) {
+ t.Parallel()
+ type nodeParams struct {
+ BlockNumber int64
+ TotalDifficulty *utils.Big
+ State nodeState
+ }
+ testCases := []struct {
+ Name string
+ ExpectedNLiveNodes int
+ ExpectedBlockNumber int64
+ ExpectedTotalDifficulty *utils.Big
+ NodeParams []nodeParams
+ }{
+ {
+ Name: "no nodes",
+ ExpectedTotalDifficulty: utils.NewBigI(0),
+ },
+ {
+ Name: "Best node is not healthy",
+ ExpectedTotalDifficulty: utils.NewBigI(10),
+ ExpectedBlockNumber: 20,
+ ExpectedNLiveNodes: 3,
+ NodeParams: []nodeParams{
+ {
+ State: nodeStateOutOfSync,
+ BlockNumber: 1000,
+ TotalDifficulty: utils.NewBigI(2000),
+ },
+ {
+ State: nodeStateAlive,
+ BlockNumber: 20,
+ TotalDifficulty: utils.NewBigI(9),
+ },
+ {
+ State: nodeStateAlive,
+ BlockNumber: 19,
+ TotalDifficulty: utils.NewBigI(10),
+ },
+ {
+ State: nodeStateAlive,
+ BlockNumber: 11,
+ TotalDifficulty: nil,
+ },
+ },
+ },
+ }
+
+ chainID := types.RandomID()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ })
+ for i := range testCases {
+ tc := testCases[i]
+ t.Run(tc.Name, func(t *testing.T) {
+ for _, params := range tc.NodeParams {
+ node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node.On("StateAndLatest").Return(params.State, params.BlockNumber, params.TotalDifficulty)
+ mn.nodes = append(mn.nodes, node)
+ }
+
+ nNodes, blockNum, td := mn.nLiveNodes()
+ assert.Equal(t, tc.ExpectedNLiveNodes, nNodes)
+ assert.Equal(t, tc.ExpectedTotalDifficulty, td)
+ assert.Equal(t, tc.ExpectedBlockNumber, blockNum)
+ })
+ }
+}
+
+func TestMultiNode_BatchCallContextAll(t *testing.T) {
+ t.Parallel()
+ t.Run("Fails if failed to select active node", func(t *testing.T) {
+ chainID := types.RandomID()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ })
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(nil).Once()
+ nodeSelector.On("Name").Return("MockedNodeSelector").Once()
+ mn.nodeSelector = nodeSelector
+ err := mn.BatchCallContextAll(tests.Context(t), nil)
+ require.EqualError(t, err, ErroringNodeError.Error())
+ })
+ t.Run("Returns error if RPC call fails for active node", func(t *testing.T) {
+ chainID := types.RandomID()
+ rpc := newMultiNodeRPCClient(t)
+ expectedError := errors.New("rpc failed to do the batch call")
+ rpc.On("BatchCallContext", mock.Anything, mock.Anything).Return(expectedError).Once()
+ node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node.On("RPC").Return(rpc)
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(node).Once()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ })
+ mn.nodeSelector = nodeSelector
+ err := mn.BatchCallContextAll(tests.Context(t), nil)
+ require.EqualError(t, err, expectedError.Error())
+ })
+ t.Run("Waits for all nodes to complete the call and logs results", func(t *testing.T) {
+ // setup RPCs
+ failedRPC := newMultiNodeRPCClient(t)
+ failedRPC.On("BatchCallContext", mock.Anything, mock.Anything).
+ Return(errors.New("rpc failed to do the batch call")).Once()
+ okRPC := newMultiNodeRPCClient(t)
+ okRPC.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Twice()
+
+ // setup ok and failed auxiliary nodes
+ okNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t)
+ okNode.On("RPC").Return(okRPC).Once()
+ failedNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ failedNode.On("RPC").Return(failedRPC).Once()
+
+ // setup main node
+ mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ mainNode.On("RPC").Return(okRPC)
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(mainNode).Once()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: types.RandomID(),
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{failedNode, mainNode},
+ sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{okNode},
+ logger: lggr,
+ })
+ mn.nodeSelector = nodeSelector
+
+ err := mn.BatchCallContextAll(tests.Context(t), nil)
+ require.NoError(t, err)
+ tests.RequireLogMessage(t, observedLogs, "Secondary node BatchCallContext failed")
+ })
+}
+
+func TestMultiNode_SendTransaction(t *testing.T) {
+ t.Parallel()
+ t.Run("Fails if failed to select active node", func(t *testing.T) {
+ chainID := types.RandomID()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ })
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(nil).Once()
+ nodeSelector.On("Name").Return("MockedNodeSelector").Once()
+ mn.nodeSelector = nodeSelector
+ err := mn.SendTransaction(tests.Context(t), nil)
+ require.EqualError(t, err, ErroringNodeError.Error())
+ })
+ t.Run("Returns error if RPC call fails for active node", func(t *testing.T) {
+ chainID := types.RandomID()
+ rpc := newMultiNodeRPCClient(t)
+ expectedError := errors.New("rpc failed to do the batch call")
+ rpc.On("SendTransaction", mock.Anything, mock.Anything).Return(expectedError).Once()
+ node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ node.On("RPC").Return(rpc)
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(node).Once()
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: chainID,
+ })
+ mn.nodeSelector = nodeSelector
+ err := mn.SendTransaction(tests.Context(t), nil)
+ require.EqualError(t, err, expectedError.Error())
+ })
+ t.Run("Returns result of main node and logs secondary nodes results", func(t *testing.T) {
+ // setup RPCs
+ failedRPC := newMultiNodeRPCClient(t)
+ failedRPC.On("SendTransaction", mock.Anything, mock.Anything).
+ Return(errors.New("rpc failed to do the batch call")).Once()
+ okRPC := newMultiNodeRPCClient(t)
+ okRPC.On("SendTransaction", mock.Anything, mock.Anything).Return(nil).Twice()
+
+ // setup ok and failed auxiliary nodes
+ okNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t)
+ okNode.On("RPC").Return(okRPC).Once()
+ okNode.On("String").Return("okNode")
+ failedNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ failedNode.On("RPC").Return(failedRPC).Once()
+ failedNode.On("String").Return("failedNode")
+
+ // setup main node
+ mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ mainNode.On("RPC").Return(okRPC)
+ nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t)
+ nodeSelector.On("Select").Return(mainNode).Once()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ mn := newTestMultiNode(t, multiNodeOpts{
+ selectionMode: NodeSelectionModeRoundRobin,
+ chainID: types.RandomID(),
+ nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{failedNode, mainNode},
+ sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{okNode},
+ logger: lggr,
+ sendOnlyErrorParser: func(err error) SendTxReturnCode {
+ if err != nil {
+ return Fatal
+ }
+
+ return Successful
+ },
+ })
+ mn.nodeSelector = nodeSelector
+
+ err := mn.SendTransaction(tests.Context(t), nil)
+ require.NoError(t, err)
+ tests.AssertLogEventually(t, observedLogs, "Sendonly node sent transaction")
+ tests.AssertLogEventually(t, observedLogs, "RPC returned error")
+ })
+}
diff --git a/common/client/node.go b/common/client/node.go
new file mode 100644
index 00000000000..f28a171a558
--- /dev/null
+++ b/common/client/node.go
@@ -0,0 +1,284 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/services"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+const QueryTimeout = 10 * time.Second
+
+var errInvalidChainID = errors.New("invalid chain id")
+
+var (
+ promPoolRPCNodeVerifies = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_verifies",
+ Help: "The total number of chain ID verifications for the given RPC node",
+ }, []string{"network", "chainID", "nodeName"})
+ promPoolRPCNodeVerifiesFailed = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_verifies_failed",
+ Help: "The total number of failed chain ID verifications for the given RPC node",
+ }, []string{"network", "chainID", "nodeName"})
+ promPoolRPCNodeVerifiesSuccess = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_verifies_success",
+ Help: "The total number of successful chain ID verifications for the given RPC node",
+ }, []string{"network", "chainID", "nodeName"})
+)
+
+type NodeConfig interface {
+ PollFailureThreshold() uint32
+ PollInterval() time.Duration
+ SelectionMode() string
+ SyncThreshold() uint32
+}
+
+//go:generate mockery --quiet --name Node --structname mockNode --filename "mock_node_test.go" --inpackage --case=underscore
+type Node[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] interface {
+ // State returns nodeState
+ State() nodeState
+ // StateAndLatest returns nodeState with the latest received block number & total difficulty.
+ StateAndLatest() (nodeState, int64, *utils.Big)
+ // Name is a unique identifier for this node.
+ Name() string
+ String() string
+ RPC() RPC
+ SubscribersCount() int32
+ UnsubscribeAllExceptAliveLoop()
+ ConfiguredChainID() CHAIN_ID
+ Order() int32
+ Start(context.Context) error
+ Close() error
+}
+
+type node[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] struct {
+ services.StateMachine
+ lfcLog logger.Logger
+ name string
+ id int32
+ chainID CHAIN_ID
+ nodePoolCfg NodeConfig
+ noNewHeadsThreshold time.Duration
+ order int32
+ chainFamily string
+
+ ws url.URL
+ http *url.URL
+
+ rpc RPC
+
+ stateMu sync.RWMutex // protects state* fields
+ state nodeState
+ // Each node is tracking the last received head number and total difficulty
+ stateLatestBlockNumber int64
+ stateLatestTotalDifficulty *utils.Big
+
+ // nodeCtx is the node lifetime's context
+ nodeCtx context.Context
+ // cancelNodeCtx cancels nodeCtx when stopping the node
+ cancelNodeCtx context.CancelFunc
+ // wg waits for subsidiary goroutines
+ wg sync.WaitGroup
+
+ // nLiveNodes is a passed in function that allows this node to:
+ // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being
+ // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all.
+ // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far.
+ nLiveNodes func() (count int, blockNumber int64, totalDifficulty *utils.Big)
+}
+
+func NewNode[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](
+ nodeCfg NodeConfig,
+ noNewHeadsThreshold time.Duration,
+ lggr logger.Logger,
+ wsuri url.URL,
+ httpuri *url.URL,
+ name string,
+ id int32,
+ chainID CHAIN_ID,
+ nodeOrder int32,
+ rpc RPC,
+ chainFamily string,
+) Node[CHAIN_ID, HEAD, RPC] {
+ n := new(node[CHAIN_ID, HEAD, RPC])
+ n.name = name
+ n.id = id
+ n.chainID = chainID
+ n.nodePoolCfg = nodeCfg
+ n.noNewHeadsThreshold = noNewHeadsThreshold
+ n.ws = wsuri
+ n.order = nodeOrder
+ if httpuri != nil {
+ n.http = httpuri
+ }
+ n.nodeCtx, n.cancelNodeCtx = context.WithCancel(context.Background())
+ lggr = lggr.Named("Node").With(
+ "nodeTier", Primary.String(),
+ "nodeName", name,
+ "node", n.String(),
+ "chainID", chainID,
+ "nodeOrder", n.order,
+ )
+ n.lfcLog = lggr.Named("Lifecycle")
+ n.stateLatestBlockNumber = -1
+ n.rpc = rpc
+ n.chainFamily = chainFamily
+ return n
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) String() string {
+ s := fmt.Sprintf("(%s)%s:%s", Primary.String(), n.name, n.ws.String())
+ if n.http != nil {
+ s = s + fmt.Sprintf(":%s", n.http.String())
+ }
+ return s
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() (chainID CHAIN_ID) {
+ return n.chainID
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) Name() string {
+ return n.name
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) RPC() RPC {
+ return n.rpc
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 {
+ return n.rpc.SubscribersCount()
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() {
+ n.rpc.UnsubscribeAllExceptAliveLoop()
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) Close() error {
+ return n.StopOnce(n.name, n.close)
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) close() error {
+ defer func() {
+ n.wg.Wait()
+ n.rpc.Close()
+ }()
+
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+
+ n.cancelNodeCtx()
+ n.state = nodeStateClosed
+ return nil
+}
+
+// Start dials and verifies the node
+// Should only be called once in a node's lifecycle
+// Return value is necessary to conform to interface but this will never
+// actually return an error.
+func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error {
+ return n.StartOnce(n.name, func() error {
+ n.start(startCtx)
+ return nil
+ })
+}
+
+// start initially dials the node and verifies chain ID
+// This spins off lifecycle goroutines.
+// Not thread-safe.
+// Node lifecycle is synchronous: only one goroutine should be running at a
+// time.
+func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) {
+ if n.state != nodeStateUndialed {
+ panic(fmt.Sprintf("cannot dial node with state %v", n.state))
+ }
+
+ if err := n.rpc.Dial(startCtx); err != nil {
+ n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err)
+ n.declareUnreachable()
+ return
+ }
+ n.setState(nodeStateDialed)
+
+ if err := n.verify(startCtx); errors.Is(err, errInvalidChainID) {
+ n.lfcLog.Errorw("Verify failed: Node has the wrong chain ID", "err", err)
+ n.declareInvalidChainID()
+ return
+ } else if err != nil {
+ n.lfcLog.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err)
+ n.declareUnreachable()
+ return
+ }
+
+ n.declareAlive()
+}
+
+// verify checks that all connections to eth nodes match the given chain ID
+// Not thread-safe
+// Pure verify: does not mutate node "state" field.
+func (n *node[CHAIN_ID, HEAD, RPC]) verify(callerCtx context.Context) (err error) {
+ promPoolRPCNodeVerifies.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc()
+ promFailed := func() {
+ promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc()
+ }
+
+ st := n.State()
+ switch st {
+ case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID:
+ default:
+ panic(fmt.Sprintf("cannot verify node in state %v", st))
+ }
+
+ var chainID CHAIN_ID
+ if chainID, err = n.rpc.ChainID(callerCtx); err != nil {
+ promFailed()
+ return errors.Wrapf(err, "failed to verify chain ID for node %s", n.name)
+ } else if chainID.String() != n.chainID.String() {
+ promFailed()
+ return errors.Wrapf(
+ errInvalidChainID,
+ "rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s",
+ chainID.String(),
+ n.chainID.String(),
+ n.name,
+ )
+ }
+
+ promPoolRPCNodeVerifiesSuccess.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc()
+
+ return nil
+}
+
+// disconnectAll disconnects all clients connected to the node
+// WARNING: NOT THREAD-SAFE
+// This must be called from within the n.stateMu lock
+func (n *node[CHAIN_ID, HEAD, RPC]) disconnectAll() {
+ n.rpc.DisconnectAll()
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 {
+ return n.order
+}
diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go
new file mode 100644
index 00000000000..d4fc19140e9
--- /dev/null
+++ b/common/client/node_fsm.go
@@ -0,0 +1,266 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+var (
+ promPoolRPCNodeTransitionsToAlive = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_alive",
+ Help: transitionString(nodeStateAlive),
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeTransitionsToInSync = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_in_sync",
+ Help: fmt.Sprintf("%s to %s", transitionString(nodeStateOutOfSync), nodeStateAlive),
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeTransitionsToOutOfSync = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_out_of_sync",
+ Help: transitionString(nodeStateOutOfSync),
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeTransitionsToUnreachable = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_unreachable",
+ Help: transitionString(nodeStateUnreachable),
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeTransitionsToInvalidChainID = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_invalid_chain_id",
+ Help: transitionString(nodeStateInvalidChainID),
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeTransitionsToUnusable = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_transitions_to_unusable",
+ Help: transitionString(nodeStateUnusable),
+ }, []string{"chainID", "nodeName"})
+)
+
+// nodeState represents the current state of the node
+// Node is a FSM (finite state machine)
+type nodeState int
+
+func (n nodeState) String() string {
+ switch n {
+ case nodeStateUndialed:
+ return "Undialed"
+ case nodeStateDialed:
+ return "Dialed"
+ case nodeStateInvalidChainID:
+ return "InvalidChainID"
+ case nodeStateAlive:
+ return "Alive"
+ case nodeStateUnreachable:
+ return "Unreachable"
+ case nodeStateUnusable:
+ return "Unusable"
+ case nodeStateOutOfSync:
+ return "OutOfSync"
+ case nodeStateClosed:
+ return "Closed"
+ default:
+ return fmt.Sprintf("nodeState(%d)", n)
+ }
+}
+
+// GoString prints a prettier state
+func (n nodeState) GoString() string {
+ return fmt.Sprintf("nodeState%s(%d)", n.String(), n)
+}
+
+const (
+ // nodeStateUndialed is the first state of a virgin node
+ nodeStateUndialed = nodeState(iota)
+ // nodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID
+ nodeStateDialed
+ // nodeStateInvalidChainID is after chain ID verification failed
+ nodeStateInvalidChainID
+ // nodeStateAlive is a healthy node after chain ID verification succeeded
+ nodeStateAlive
+ // nodeStateUnreachable is a node that cannot be dialed or has disconnected
+ nodeStateUnreachable
+ // nodeStateOutOfSync is a node that is accepting connections but exceeded
+ // the failure threshold without sending any new heads. It will be
+ // disconnected, then put into a revive loop and re-awakened after redial
+ // if a new head arrives
+ nodeStateOutOfSync
+ // nodeStateUnusable is a sendonly node that has an invalid URL that can never be reached
+ nodeStateUnusable
+ // nodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle
+ nodeStateClosed
+ // nodeStateLen tracks the number of states
+ nodeStateLen
+)
+
+// allNodeStates represents all possible states a node can be in
+var allNodeStates []nodeState
+
+func init() {
+ for s := nodeState(0); s < nodeStateLen; s++ {
+ allNodeStates = append(allNodeStates, s)
+ }
+}
+
+// FSM methods
+
+// State allows reading the current state of the node.
+func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState {
+ n.stateMu.RLock()
+ defer n.stateMu.RUnlock()
+ return n.state
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *utils.Big) {
+ n.stateMu.RLock()
+ defer n.stateMu.RUnlock()
+ return n.state, n.stateLatestBlockNumber, n.stateLatestTotalDifficulty
+}
+
+// setState is only used by internal state management methods.
+// This is low-level; care should be taken by the caller to ensure the new state is a valid transition.
+// State changes should always be synchronous: only one goroutine at a time should change state.
+// n.stateMu should not be locked for long periods of time because external clients expect a timely response from n.State()
+func (n *node[CHAIN_ID, HEAD, RPC]) setState(s nodeState) {
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ n.state = s
+}
+
+// declareXXX methods change the state and pass conrol off the new state
+// management goroutine
+
+func (n *node[CHAIN_ID, HEAD, RPC]) declareAlive() {
+ n.transitionToAlive(func() {
+ n.lfcLog.Infow("RPC Node is online", "nodeState", n.state)
+ n.wg.Add(1)
+ go n.aliveLoop()
+ })
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) {
+ promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc()
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ if n.state == nodeStateClosed {
+ return
+ }
+ switch n.state {
+ case nodeStateDialed, nodeStateInvalidChainID:
+ n.state = nodeStateAlive
+ default:
+ panic(transitionFail(n.state, nodeStateAlive))
+ }
+ fn()
+}
+
+// declareInSync puts a node back into Alive state, allowing it to be used by
+// pool consumers again
+func (n *node[CHAIN_ID, HEAD, RPC]) declareInSync() {
+ n.transitionToInSync(func() {
+ n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state)
+ n.wg.Add(1)
+ go n.aliveLoop()
+ })
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) {
+ promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc()
+ promPoolRPCNodeTransitionsToInSync.WithLabelValues(n.chainID.String(), n.name).Inc()
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ if n.state == nodeStateClosed {
+ return
+ }
+ switch n.state {
+ case nodeStateOutOfSync:
+ n.state = nodeStateAlive
+ default:
+ panic(transitionFail(n.state, nodeStateAlive))
+ }
+ fn()
+}
+
+// declareOutOfSync puts a node into OutOfSync state, disconnecting all current
+// clients and making it unavailable for use until back in-sync.
+func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *utils.Big) bool) {
+ n.transitionToOutOfSync(func() {
+ n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state)
+ n.wg.Add(1)
+ go n.outOfSyncLoop(isOutOfSync)
+ })
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) {
+ promPoolRPCNodeTransitionsToOutOfSync.WithLabelValues(n.chainID.String(), n.name).Inc()
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ if n.state == nodeStateClosed {
+ return
+ }
+ switch n.state {
+ case nodeStateAlive:
+ n.disconnectAll()
+ n.state = nodeStateOutOfSync
+ default:
+ panic(transitionFail(n.state, nodeStateOutOfSync))
+ }
+ fn()
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) declareUnreachable() {
+ n.transitionToUnreachable(func() {
+ n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state)
+ n.wg.Add(1)
+ go n.unreachableLoop()
+ })
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) {
+ promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(n.chainID.String(), n.name).Inc()
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ if n.state == nodeStateClosed {
+ return
+ }
+ switch n.state {
+ case nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID:
+ n.disconnectAll()
+ n.state = nodeStateUnreachable
+ default:
+ panic(transitionFail(n.state, nodeStateUnreachable))
+ }
+ fn()
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) declareInvalidChainID() {
+ n.transitionToInvalidChainID(func() {
+ n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state)
+ n.wg.Add(1)
+ go n.invalidChainIDLoop()
+ })
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) {
+ promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(n.chainID.String(), n.name).Inc()
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ if n.state == nodeStateClosed {
+ return
+ }
+ switch n.state {
+ case nodeStateDialed, nodeStateOutOfSync:
+ n.disconnectAll()
+ n.state = nodeStateInvalidChainID
+ default:
+ panic(transitionFail(n.state, nodeStateInvalidChainID))
+ }
+ fn()
+}
+
+func transitionString(state nodeState) string {
+ return fmt.Sprintf("Total number of times node has transitioned to %s", state)
+}
+
+func transitionFail(from nodeState, to nodeState) string {
+ return fmt.Sprintf("cannot transition from %#v to %#v", from, to)
+}
diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go
new file mode 100644
index 00000000000..87e90846699
--- /dev/null
+++ b/common/client/node_fsm_test.go
@@ -0,0 +1,108 @@
+package client
+
+import (
+ "slices"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type fnMock struct{ calls int }
+
+func (fm *fnMock) Fn() {
+ fm.calls++
+}
+
+func (fm *fnMock) AssertNotCalled(t *testing.T) {
+ assert.Equal(t, 0, fm.calls)
+}
+
+func (fm *fnMock) AssertCalled(t *testing.T) {
+ assert.Greater(t, fm.calls, 0)
+}
+
+func newTestTransitionNode(t *testing.T, rpc *mockNodeClient[types.ID, Head]) testNode {
+ return newTestNode(t, testNodeOpts{rpc: rpc})
+}
+
+func TestUnit_Node_StateTransitions(t *testing.T) {
+ t.Parallel()
+
+ t.Run("setState", func(t *testing.T) {
+ n := newTestTransitionNode(t, nil)
+ assert.Equal(t, nodeStateUndialed, n.State())
+ n.setState(nodeStateAlive)
+ assert.Equal(t, nodeStateAlive, n.State())
+ n.setState(nodeStateUndialed)
+ assert.Equal(t, nodeStateUndialed, n.State())
+ })
+
+ t.Run("transitionToAlive", func(t *testing.T) {
+ const destinationState = nodeStateAlive
+ allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID}
+ rpc := newMockNodeClient[types.ID, Head](t)
+ testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...)
+ })
+
+ t.Run("transitionToInSync", func(t *testing.T) {
+ const destinationState = nodeStateAlive
+ allowedStates := []nodeState{nodeStateOutOfSync}
+ rpc := newMockNodeClient[types.ID, Head](t)
+ testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...)
+ })
+ t.Run("transitionToOutOfSync", func(t *testing.T) {
+ const destinationState = nodeStateOutOfSync
+ allowedStates := []nodeState{nodeStateAlive}
+ rpc := newMockNodeClient[types.ID, Head](t)
+ rpc.On("DisconnectAll").Once()
+ testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...)
+ })
+ t.Run("transitionToUnreachable", func(t *testing.T) {
+ const destinationState = nodeStateUnreachable
+ allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID}
+ rpc := newMockNodeClient[types.ID, Head](t)
+ rpc.On("DisconnectAll").Times(len(allowedStates))
+ testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...)
+ })
+ t.Run("transitionToInvalidChain", func(t *testing.T) {
+ const destinationState = nodeStateInvalidChainID
+ allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync}
+ rpc := newMockNodeClient[types.ID, Head](t)
+ rpc.On("DisconnectAll").Times(len(allowedStates))
+ testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...)
+ })
+}
+
+func testTransition(t *testing.T, rpc *mockNodeClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) {
+ node := newTestTransitionNode(t, rpc)
+ for _, allowedState := range allowedStates {
+ m := new(fnMock)
+ node.setState(allowedState)
+ transition(node, m.Fn)
+ assert.Equal(t, destinationState, node.State(), "Expected node to successfully transition from %s to %s state", allowedState, destinationState)
+ m.AssertCalled(t)
+ }
+ // noop on attempt to transition from Closed state
+ m := new(fnMock)
+ node.setState(nodeStateClosed)
+ transition(node, m.Fn)
+ m.AssertNotCalled(t)
+ assert.Equal(t, nodeStateClosed, node.State(), "Expected node to remain in closed state on transition attempt")
+
+ for _, nodeState := range allNodeStates {
+ if slices.Contains(allowedStates, nodeState) || nodeState == nodeStateClosed {
+ continue
+ }
+
+ m := new(fnMock)
+ node.setState(nodeState)
+ assert.Panics(t, func() {
+ transition(node, m.Fn)
+ }, "Expected transition from `%s` to `%s` to panic", nodeState, destinationState)
+ m.AssertNotCalled(t)
+ assert.Equal(t, nodeState, node.State(), "Expected node to remain in initial state on invalid transition")
+
+ }
+}
diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go
new file mode 100644
index 00000000000..4193560e296
--- /dev/null
+++ b/common/client/node_lifecycle.go
@@ -0,0 +1,435 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+var (
+ promPoolRPCNodeHighestSeenBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Name: "pool_rpc_node_highest_seen_block",
+ Help: "The highest seen block for the given RPC node",
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodeNumSeenBlocks = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_num_seen_blocks",
+ Help: "The total number of new blocks seen by the given RPC node",
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodePolls = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_polls_total",
+ Help: "The total number of poll checks for the given RPC node",
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodePollsFailed = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_polls_failed",
+ Help: "The total number of failed poll checks for the given RPC node",
+ }, []string{"chainID", "nodeName"})
+ promPoolRPCNodePollsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "pool_rpc_node_polls_success",
+ Help: "The total number of successful poll checks for the given RPC node",
+ }, []string{"chainID", "nodeName"})
+)
+
+// zombieNodeCheckInterval controls how often to re-check to see if we need to
+// state change in case we have to force a state transition due to no available
+// nodes.
+// NOTE: This only applies to out-of-sync nodes if they are the last available node
+func zombieNodeCheckInterval(noNewHeadsThreshold time.Duration) time.Duration {
+ interval := noNewHeadsThreshold
+ if interval <= 0 || interval > QueryTimeout {
+ interval = QueryTimeout
+ }
+ return utils.WithJitter(interval)
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) setLatestReceived(blockNumber int64, totalDifficulty *utils.Big) {
+ n.stateMu.Lock()
+ defer n.stateMu.Unlock()
+ n.stateLatestBlockNumber = blockNumber
+ n.stateLatestTotalDifficulty = totalDifficulty
+}
+
+const (
+ msgCannotDisable = "but cannot disable this connection because there are no other RPC endpoints, or all other RPC endpoints are dead."
+ msgDegradedState = "Chainlink is now operating in a degraded state and urgent action is required to resolve the issue"
+)
+
+const rpcSubscriptionMethodNewHeads = "newHeads"
+
+// Node is a FSM
+// Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary.
+// Only one loop must run at a time.
+// Each loop passes control onto the next loop as it exits, except when the node is Closed which terminates the loop permanently.
+
+// This handles node lifecycle for the ALIVE state
+// Should only be run ONCE per node, after a successful Dial
+func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() {
+ defer n.wg.Done()
+
+ {
+ // sanity check
+ state := n.State()
+ switch state {
+ case nodeStateAlive:
+ case nodeStateClosed:
+ return
+ default:
+ panic(fmt.Sprintf("aliveLoop can only run for node in Alive state, got: %s", state))
+ }
+ }
+
+ noNewHeadsTimeoutThreshold := n.noNewHeadsThreshold
+ pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold()
+ pollInterval := n.nodePoolCfg.PollInterval()
+
+ lggr := n.lfcLog.Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold)
+ lggr.Tracew("Alive loop starting", "nodeState", n.State())
+
+ headsC := make(chan HEAD)
+ sub, err := n.rpc.Subscribe(n.nodeCtx, headsC, rpcSubscriptionMethodNewHeads)
+ if err != nil {
+ lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State())
+ n.declareUnreachable()
+ return
+ }
+ // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll
+ // falsely transition this node to unreachable state
+ n.rpc.SetAliveLoopSub(sub)
+ defer sub.Unsubscribe()
+
+ var outOfSyncT *time.Ticker
+ var outOfSyncTC <-chan time.Time
+ if noNewHeadsTimeoutThreshold > 0 {
+ lggr.Debugw("Head liveness checking enabled", "nodeState", n.State())
+ outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold)
+ defer outOfSyncT.Stop()
+ outOfSyncTC = outOfSyncT.C
+ } else {
+ lggr.Debug("Head liveness checking disabled")
+ }
+
+ var pollCh <-chan time.Time
+ if pollInterval > 0 {
+ lggr.Debug("Polling enabled")
+ pollT := time.NewTicker(pollInterval)
+ defer pollT.Stop()
+ pollCh = pollT.C
+ if pollFailureThreshold > 0 {
+ // polling can be enabled with no threshold to enable polling but
+ // the node will not be marked offline regardless of the number of
+ // poll failures
+ lggr.Debug("Polling liveness checking enabled")
+ }
+ } else {
+ lggr.Debug("Polling disabled")
+ }
+
+ _, highestReceivedBlockNumber, _ := n.StateAndLatest()
+ var pollFailures uint32
+
+ for {
+ select {
+ case <-n.nodeCtx.Done():
+ return
+ case <-pollCh:
+ var version string
+ promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc()
+ lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures)
+ ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval)
+ version, err := n.RPC().ClientVersion(ctx)
+ cancel()
+ if err != nil {
+ // prevent overflow
+ if pollFailures < math.MaxUint32 {
+ promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc()
+ pollFailures++
+ }
+ lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State())
+ } else {
+ lggr.Debugw("Version poll successful", "nodeState", n.State(), "clientVersion", version)
+ promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc()
+ pollFailures = 0
+ }
+ if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold {
+ lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State())
+ if n.nLiveNodes != nil {
+ if l, _, _ := n.nLiveNodes(); l < 2 {
+ lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState)
+ continue
+ }
+ }
+ n.declareUnreachable()
+ return
+ }
+ _, num, td := n.StateAndLatest()
+ if outOfSync, liveNodes := n.syncStatus(num, td); outOfSync {
+ // note: there must be another live node for us to be out of sync
+ lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State())
+ if liveNodes < 2 {
+ lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)
+ continue
+ }
+ n.declareOutOfSync(n.isOutOfSync)
+ return
+ }
+ case bh, open := <-headsC:
+ if !open {
+ lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State())
+ n.declareUnreachable()
+ return
+ }
+ promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc()
+ lggr.Tracew("Got head", "head", bh)
+ if bh.BlockNumber() > highestReceivedBlockNumber {
+ promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber()))
+ lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State())
+ highestReceivedBlockNumber = bh.BlockNumber()
+ } else {
+ lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State())
+ }
+ if outOfSyncT != nil {
+ outOfSyncT.Reset(noNewHeadsTimeoutThreshold)
+ }
+ n.setLatestReceived(bh.BlockNumber(), bh.BlockDifficulty())
+ case err := <-sub.Err():
+ lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State())
+ n.declareUnreachable()
+ return
+ case <-outOfSyncTC:
+ // We haven't received a head on the channel for at least the
+ // threshold amount of time, mark it broken
+ lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold)
+ if n.nLiveNodes != nil {
+ if l, _, _ := n.nLiveNodes(); l < 2 {
+ lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)
+ // We don't necessarily want to wait the full timeout to check again, we should
+ // check regularly and log noisily in this state
+ outOfSyncT.Reset(zombieNodeCheckInterval(n.noNewHeadsThreshold))
+ continue
+ }
+ }
+ n.declareOutOfSync(func(num int64, td *utils.Big) bool { return num < highestReceivedBlockNumber })
+ return
+ }
+ }
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSync(num int64, td *utils.Big) (outOfSync bool) {
+ outOfSync, _ = n.syncStatus(num, td)
+ return
+}
+
+// syncStatus returns outOfSync true if num or td is more than SyncThresold behind the best node.
+// Always returns outOfSync false for SyncThreshold 0.
+// liveNodes is only included when outOfSync is true.
+func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *utils.Big) (outOfSync bool, liveNodes int) {
+ if n.nLiveNodes == nil {
+ return // skip for tests
+ }
+ threshold := n.nodePoolCfg.SyncThreshold()
+ if threshold == 0 {
+ return // disabled
+ }
+ // Check against best node
+ ln, highest, greatest := n.nLiveNodes()
+ mode := n.nodePoolCfg.SelectionMode()
+ switch mode {
+ case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel:
+ return num < highest-int64(threshold), ln
+ case NodeSelectionModeTotalDifficulty:
+ bigThreshold := utils.NewBigI(int64(threshold))
+ return td.Cmp(greatest.Sub(bigThreshold)) < 0, ln
+ default:
+ panic("unrecognized NodeSelectionMode: " + mode)
+ }
+}
+
+const (
+ msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again"
+ msgInSync = "RPC node back in sync"
+)
+
+// outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status
+func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td *utils.Big) bool) {
+ defer n.wg.Done()
+
+ {
+ // sanity check
+ state := n.State()
+ switch state {
+ case nodeStateOutOfSync:
+ case nodeStateClosed:
+ return
+ default:
+ panic(fmt.Sprintf("outOfSyncLoop can only run for node in OutOfSync state, got: %s", state))
+ }
+ }
+
+ outOfSyncAt := time.Now()
+
+ lggr := n.lfcLog.Named("OutOfSync")
+ lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State())
+
+ // Need to redial since out-of-sync nodes are automatically disconnected
+ if err := n.rpc.Dial(n.nodeCtx); err != nil {
+ lggr.Errorw("Failed to dial out-of-sync RPC node", "nodeState", n.State())
+ n.declareUnreachable()
+ return
+ }
+
+ // Manually re-verify since out-of-sync nodes are automatically disconnected
+ if err := n.verify(n.nodeCtx); err != nil {
+ lggr.Errorw(fmt.Sprintf("Failed to verify out-of-sync RPC node: %v", err), "err", err)
+ n.declareInvalidChainID()
+ return
+ }
+
+ lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State())
+
+ ch := make(chan HEAD)
+ sub, err := n.rpc.Subscribe(n.nodeCtx, ch, rpcSubscriptionMethodNewHeads)
+ if err != nil {
+ lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err)
+ n.declareUnreachable()
+ return
+ }
+ defer sub.Unsubscribe()
+
+ for {
+ select {
+ case <-n.nodeCtx.Done():
+ return
+ case head, open := <-ch:
+ if !open {
+ lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State())
+ n.declareUnreachable()
+ return
+ }
+ n.setLatestReceived(head.BlockNumber(), head.BlockDifficulty())
+ if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) {
+ // back in-sync! flip back into alive loop
+ lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State())
+ n.declareInSync()
+ return
+ }
+ lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State())
+ case <-time.After(zombieNodeCheckInterval(n.noNewHeadsThreshold)):
+ if n.nLiveNodes != nil {
+ if l, _, _ := n.nLiveNodes(); l < 1 {
+ lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state")
+ n.declareInSync()
+ return
+ }
+ }
+ case err := <-sub.Err():
+ lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err)
+ n.declareUnreachable()
+ return
+ }
+ }
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() {
+ defer n.wg.Done()
+
+ {
+ // sanity check
+ state := n.State()
+ switch state {
+ case nodeStateUnreachable:
+ case nodeStateClosed:
+ return
+ default:
+ panic(fmt.Sprintf("unreachableLoop can only run for node in Unreachable state, got: %s", state))
+ }
+ }
+
+ unreachableAt := time.Now()
+
+ lggr := n.lfcLog.Named("Unreachable")
+ lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State())
+
+ dialRetryBackoff := utils.NewRedialBackoff()
+
+ for {
+ select {
+ case <-n.nodeCtx.Done():
+ return
+ case <-time.After(dialRetryBackoff.Duration()):
+ lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State())
+
+ err := n.rpc.Dial(n.nodeCtx)
+ if err != nil {
+ lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State())
+ continue
+ }
+
+ n.setState(nodeStateDialed)
+
+ err = n.verify(n.nodeCtx)
+
+ if errors.Is(err, errInvalidChainID) {
+ lggr.Errorw("Failed to redial RPC node; remote endpoint returned the wrong chain ID", "err", err)
+ n.declareInvalidChainID()
+ return
+ } else if err != nil {
+ lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; verify failed: %v", err), "err", err)
+ n.declareUnreachable()
+ return
+ }
+
+ lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State())
+ n.declareAlive()
+ return
+ }
+ }
+}
+
+func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() {
+ defer n.wg.Done()
+
+ {
+ // sanity check
+ state := n.State()
+ switch state {
+ case nodeStateInvalidChainID:
+ case nodeStateClosed:
+ return
+ default:
+ panic(fmt.Sprintf("invalidChainIDLoop can only run for node in InvalidChainID state, got: %s", state))
+ }
+ }
+
+ invalidAt := time.Now()
+
+ lggr := n.lfcLog.Named("InvalidChainID")
+ lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State())
+
+ chainIDRecheckBackoff := utils.NewRedialBackoff()
+
+ for {
+ select {
+ case <-n.nodeCtx.Done():
+ return
+ case <-time.After(chainIDRecheckBackoff.Duration()):
+ err := n.verify(n.nodeCtx)
+ if errors.Is(err, errInvalidChainID) {
+ lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err)
+ continue
+ } else if err != nil {
+ lggr.Errorw(fmt.Sprintf("Unexpected error while verifying RPC node chain ID; %v", err), "err", err)
+ n.declareUnreachable()
+ return
+ }
+ lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State())
+ n.declareAlive()
+ return
+ }
+ }
+}
diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go
new file mode 100644
index 00000000000..564c08bbdcc
--- /dev/null
+++ b/common/client/node_lifecycle_test.go
@@ -0,0 +1,1070 @@
+package client
+
+import (
+ "fmt"
+ "sync/atomic"
+ "testing"
+
+ "github.com/cometbft/cometbft/libs/rand"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "go.uber.org/zap"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/common/types/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) {
+ t.Parallel()
+
+ newDialedNode := func(t *testing.T, opts testNodeOpts) testNode {
+ node := newTestNode(t, opts)
+ opts.rpc.On("Close").Return(nil).Once()
+
+ node.setState(nodeStateDialed)
+ return node
+ }
+
+ t.Run("returns on closed", func(t *testing.T) {
+ node := newTestNode(t, testNodeOpts{})
+ node.setState(nodeStateClosed)
+ node.wg.Add(1)
+ node.aliveLoop()
+
+ })
+ t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ node := newDialedNode(t, testNodeOpts{
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ expectedError := errors.New("failed to subscribe to rpc")
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once()
+ rpc.On("DisconnectAll").Once()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+
+ })
+ t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ node := newDialedNode(t, testNodeOpts{
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ sub := mocks.NewSubscription(t)
+ errChan := make(chan error)
+ close(errChan)
+ sub.On("Err").Return((<-chan error)(errChan)).Once()
+ sub.On("Unsubscribe").Once()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once()
+ rpc.On("SetAliveLoopSub", sub).Once()
+ // disconnects all on transfer to unreachable
+ rpc.On("DisconnectAll").Once()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, "Subscription was terminated")
+ assert.Equal(t, nodeStateUnreachable, node.State())
+ })
+
+ newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode {
+ sub := mocks.NewSubscription(t)
+ sub.On("Err").Return((<-chan error)(nil))
+ sub.On("Unsubscribe").Once()
+ opts.rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once()
+ opts.rpc.On("SetAliveLoopSub", sub).Once()
+ return newDialedNode(t, opts)
+ }
+ t.Run("Stays alive and waits for signal", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{},
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, "Head liveness checking disabled")
+ tests.AssertLogEventually(t, observedLogs, "Polling disabled")
+ assert.Equal(t, nodeStateAlive, node.State())
+ })
+ t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ const pollFailureThreshold = 3
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollFailureThreshold: pollFailureThreshold,
+ pollInterval: tests.TestInterval,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ pollError := errors.New("failed to get ClientVersion")
+ // 1. Return error several times, but below threshold
+ rpc.On("ClientVersion", mock.Anything).Return("", pollError).Run(func(_ mock.Arguments) {
+ // stays healthy while below threshold
+ assert.Equal(t, nodeStateAlive, node.State())
+ }).Times(pollFailureThreshold - 1)
+ // 2. Successful call that is expected to reset counter
+ rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Once()
+ // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState
+ rpc.On("ClientVersion", mock.Anything).Return("", pollError).Once()
+ // 4. Once during the call, check if node is alive
+ var ensuredAlive atomic.Bool
+ rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Run(func(_ mock.Arguments) {
+ if ensuredAlive.Load() {
+ return
+ }
+ ensuredAlive.Store(true)
+ assert.Equal(t, nodeStateAlive, node.State())
+ }).Once()
+ // redundant call to stay in alive state
+ rpc.On("ClientVersion", mock.Anything).Return("client_version", nil)
+ node.declareAlive()
+ tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold)
+ tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2)
+ assert.True(t, ensuredAlive.Load(), "expected to ensure that node was alive")
+
+ })
+ t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ const pollFailureThreshold = 3
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollFailureThreshold: pollFailureThreshold,
+ pollInterval: tests.TestInterval,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ pollError := errors.New("failed to get ClientVersion")
+ rpc.On("ClientVersion", mock.Anything).Return("", pollError)
+ // disconnects all on transfer to unreachable
+ rpc.On("DisconnectAll").Once()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold)
+ tests.AssertEventually(t, func() bool {
+ return nodeStateUnreachable == node.State()
+ })
+ })
+ t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ const pollFailureThreshold = 3
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollFailureThreshold: pollFailureThreshold,
+ pollInterval: tests.TestInterval,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 1, 20, utils.NewBigI(10)
+ }
+ pollError := errors.New("failed to get ClientVersion")
+ rpc.On("ClientVersion", mock.Anything).Return("", pollError)
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailureThreshold))
+ assert.Equal(t, nodeStateAlive, node.State())
+ })
+ t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ const syncThreshold = 10
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollInterval: tests.TestInterval,
+ syncThreshold: syncThreshold,
+ selectionMode: NodeSelectionModeRoundRobin,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.stateLatestBlockNumber = 20
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 10, syncThreshold + node.stateLatestBlockNumber + 1, utils.NewBigI(10)
+ }
+ rpc.On("ClientVersion", mock.Anything).Return("", nil)
+ // tries to redial in outOfSync
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) {
+ assert.Equal(t, nodeStateOutOfSync, node.State())
+ }).Once()
+ // disconnects all on transfer to unreachable or outOfSync
+ rpc.On("DisconnectAll").Maybe()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, "Failed to dial out-of-sync RPC node")
+ })
+ t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ const syncThreshold = 10
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollInterval: tests.TestInterval,
+ syncThreshold: syncThreshold,
+ selectionMode: NodeSelectionModeRoundRobin,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.stateLatestBlockNumber = 20
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 1, syncThreshold + node.stateLatestBlockNumber + 1, utils.NewBigI(10)
+ }
+ rpc.On("ClientVersion", mock.Anything).Return("", nil)
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState))
+ })
+ t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{
+ pollInterval: tests.TestInterval,
+ syncThreshold: 0,
+ selectionMode: NodeSelectionModeRoundRobin,
+ },
+ rpc: rpc,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.stateLatestBlockNumber = 20
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 1, node.stateLatestBlockNumber + 100, utils.NewBigI(10)
+ }
+ rpc.On("ClientVersion", mock.Anything).Return("", nil)
+ node.declareAlive()
+ tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2)
+ assert.Equal(t, nodeStateAlive, node.State())
+ })
+
+ t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{},
+ noNewHeadsThreshold: tests.TestInterval,
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ // tries to redial in outOfSync
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) {
+ assert.Equal(t, nodeStateOutOfSync, node.State())
+ }).Once()
+ // disconnects all on transfer to unreachable or outOfSync
+ rpc.On("DisconnectAll").Maybe()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertEventually(t, func() bool {
+ // right after outOfSync we'll transfer to unreachable due to returned error on Dial
+ // we check that we were in out of sync state on first Dial call
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newSubscribedNode(t, testNodeOpts{
+ config: testNodeConfig{},
+ lggr: lggr,
+ noNewHeadsThreshold: tests.TestInterval,
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 1, 20, utils.NewBigI(10)
+ }
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState))
+ assert.Equal(t, nodeStateAlive, node.State())
+ })
+
+ t.Run("rpc closed head channel", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ sub := mocks.NewSubscription(t)
+ sub.On("Err").Return((<-chan error)(nil))
+ sub.On("Unsubscribe").Once()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) {
+ ch := args.Get(1).(chan<- Head)
+ close(ch)
+ }).Return(sub, nil).Once()
+ rpc.On("SetAliveLoopSub", sub).Once()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.ErrorLevel)
+ node := newDialedNode(t, testNodeOpts{
+ lggr: lggr,
+ config: testNodeConfig{},
+ noNewHeadsThreshold: tests.TestInterval,
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ // disconnects all on transfer to unreachable or outOfSync
+ rpc.On("DisconnectAll").Once()
+ // might be called in unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareAlive()
+ tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed")
+ assert.Equal(t, nodeStateUnreachable, node.State())
+
+ })
+ t.Run("updates block number and difficulty on new head", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ sub := mocks.NewSubscription(t)
+ sub.On("Err").Return((<-chan error)(nil))
+ sub.On("Unsubscribe").Once()
+ expectedBlockNumber := rand.Int64()
+ expectedDiff := utils.NewBigI(rand.Int64())
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) {
+ ch := args.Get(1).(chan<- Head)
+ go writeHeads(t, ch, head{BlockNumber: expectedBlockNumber, BlockDifficulty: expectedDiff})
+ }).Return(sub, nil).Once()
+ rpc.On("SetAliveLoopSub", sub).Once()
+ node := newDialedNode(t, testNodeOpts{
+ config: testNodeConfig{},
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.declareAlive()
+ tests.AssertEventually(t, func() bool {
+ state, block, diff := node.StateAndLatest()
+ return state == nodeStateAlive && block == expectedBlockNumber == diff.Equal(expectedDiff)
+ })
+ })
+}
+
+type head struct {
+ BlockNumber int64
+ BlockDifficulty *utils.Big
+}
+
+func writeHeads(t *testing.T, ch chan<- Head, heads ...head) {
+ for _, head := range heads {
+ h := newMockHead(t)
+ h.On("BlockNumber").Return(head.BlockNumber)
+ h.On("BlockDifficulty").Return(head.BlockDifficulty)
+ select {
+ case ch <- h:
+ case <-tests.Context(t).Done():
+ return
+ }
+ }
+}
+
+func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) {
+ rpc.On("Dial", mock.Anything).Return(nil).Maybe()
+ aliveSubscription := mocks.NewSubscription(t)
+ aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe()
+ aliveSubscription.On("Unsubscribe").Maybe()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(aliveSubscription, nil).Maybe()
+ rpc.On("SetAliveLoopSub", mock.Anything).Maybe()
+}
+
+func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) {
+ t.Parallel()
+
+ newAliveNode := func(t *testing.T, opts testNodeOpts) testNode {
+ node := newTestNode(t, opts)
+ opts.rpc.On("Close").Return(nil).Once()
+ // disconnects all on transfer to unreachable or outOfSync
+ opts.rpc.On("DisconnectAll")
+ node.setState(nodeStateAlive)
+ return node
+ }
+
+ stubIsOutOfSync := func(num int64, td *utils.Big) bool {
+ return false
+ }
+
+ t.Run("returns on closed", func(t *testing.T) {
+ t.Parallel()
+ node := newTestNode(t, testNodeOpts{})
+ node.setState(nodeStateClosed)
+ node.wg.Add(1)
+ node.outOfSyncLoop(stubIsOutOfSync)
+ })
+ t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+
+ outOfSyncSubscription := mocks.NewSubscription(t)
+ outOfSyncSubscription.On("Err").Return((<-chan error)(nil))
+ outOfSyncSubscription.On("Unsubscribe").Once()
+ heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}}
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) {
+ ch := args.Get(1).(chan<- Head)
+ go writeHeads(t, ch, heads...)
+ }).Return(outOfSyncSubscription, nil).Once()
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe()
+
+ node.declareOutOfSync(func(num int64, td *utils.Big) bool {
+ return true
+ })
+ tests.AssertLogCountEventually(t, observedLogs, msgReceivedBlock, len(heads))
+ assert.Equal(t, nodeStateOutOfSync, node.State())
+ })
+ t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ expectedError := errors.New("failed to dial rpc")
+ // might be called again in unreachable loop, so no need to set once
+ rpc.On("Dial", mock.Anything).Return(expectedError)
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("if fail to get chainID, transitions to invalidChainID", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ expectedError := errors.New("failed to get chain ID")
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError)
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateInvalidChainID
+ })
+ })
+ t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.NewIDFromInt(10)
+ rpcChainID := types.NewIDFromInt(11)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil)
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateInvalidChainID
+ })
+ })
+ t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+ expectedError := errors.New("failed to subscribe")
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError)
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe()
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("on subscription termination becomes unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.ErrorLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+
+ sub := mocks.NewSubscription(t)
+ errChan := make(chan error, 1)
+ errChan <- errors.New("subscription was terminate")
+ sub.On("Err").Return((<-chan error)(errChan))
+ sub.On("Unsubscribe").Once()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once()
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe()
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertLogEventually(t, observedLogs, "Subscription was terminated")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("becomes unreachable if head channel is closed", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.ErrorLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+
+ sub := mocks.NewSubscription(t)
+ sub.On("Err").Return((<-chan error)(nil))
+ sub.On("Unsubscribe").Once()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) {
+ ch := args.Get(1).(chan<- Head)
+ close(ch)
+ }).Return(sub, nil).Once()
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe()
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+
+ t.Run("becomes alive if it receives a newer head", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+
+ outOfSyncSubscription := mocks.NewSubscription(t)
+ outOfSyncSubscription.On("Err").Return((<-chan error)(nil))
+ outOfSyncSubscription.On("Unsubscribe").Once()
+ const highestBlock = 1000
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) {
+ ch := args.Get(1).(chan<- Head)
+ go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock})
+ }).Return(outOfSyncSubscription, nil).Once()
+
+ setupRPCForAliveLoop(t, rpc)
+
+ node.declareOutOfSync(func(num int64, td *utils.Big) bool {
+ return num < highestBlock
+ })
+ tests.AssertLogEventually(t, observedLogs, msgReceivedBlock)
+ tests.AssertLogEventually(t, observedLogs, msgInSync)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateAlive
+ })
+ })
+ t.Run("becomes alive if there is no other nodes", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newAliveNode(t, testNodeOpts{
+ noNewHeadsThreshold: tests.TestInterval,
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return 0, 100, utils.NewBigI(200)
+ }
+
+ rpc.On("Dial", mock.Anything).Return(nil).Once()
+ // might be called multiple times
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once()
+
+ outOfSyncSubscription := mocks.NewSubscription(t)
+ outOfSyncSubscription.On("Err").Return((<-chan error)(nil))
+ outOfSyncSubscription.On("Unsubscribe").Once()
+ rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(outOfSyncSubscription, nil).Once()
+
+ setupRPCForAliveLoop(t, rpc)
+
+ node.declareOutOfSync(stubIsOutOfSync)
+ tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateAlive
+ })
+ })
+}
+
+func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) {
+ t.Parallel()
+
+ newAliveNode := func(t *testing.T, opts testNodeOpts) testNode {
+ node := newTestNode(t, opts)
+ opts.rpc.On("Close").Return(nil).Once()
+ // disconnects all on transfer to unreachable
+ opts.rpc.On("DisconnectAll")
+
+ node.setState(nodeStateAlive)
+ return node
+ }
+ t.Run("returns on closed", func(t *testing.T) {
+ t.Parallel()
+ node := newTestNode(t, testNodeOpts{})
+ node.setState(nodeStateClosed)
+ node.wg.Add(1)
+ node.unreachableLoop()
+
+ })
+ t.Run("on failed redial, keeps trying", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial"))
+ node.declareUnreachable()
+ tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; still unreachable", 2)
+ })
+ t.Run("on failed chainID verification, keep trying", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) {
+ assert.Equal(t, nodeStateDialed, node.State())
+ }).Return(nodeChainID, errors.New("failed to get chain id"))
+ node.declareUnreachable()
+ tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; verify failed", 2)
+ })
+ t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.NewIDFromInt(10)
+ rpcChainID := types.NewIDFromInt(11)
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil)
+ node.declareUnreachable()
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateInvalidChainID
+ })
+ })
+ t.Run("on valid chain ID becomes alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ node := newAliveNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil)
+
+ setupRPCForAliveLoop(t, rpc)
+
+ node.declareUnreachable()
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateAlive
+ })
+ })
+}
+
+func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) {
+ t.Parallel()
+ newDialedNode := func(t *testing.T, opts testNodeOpts) testNode {
+ node := newTestNode(t, opts)
+ opts.rpc.On("Close").Return(nil).Once()
+ opts.rpc.On("DisconnectAll")
+
+ node.setState(nodeStateDialed)
+ return node
+ }
+ t.Run("returns on closed", func(t *testing.T) {
+ t.Parallel()
+ node := newTestNode(t, testNodeOpts{})
+ node.setState(nodeStateClosed)
+ node.wg.Add(1)
+ node.invalidChainIDLoop()
+
+ })
+ t.Run("on failed chainID call becomes unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newDialedNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id"))
+ // for unreachable loop
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe()
+ node.declareInvalidChainID()
+ tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node chain ID")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("on chainID mismatch keeps trying", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.NewIDFromInt(10)
+ rpcChainID := types.NewIDFromInt(11)
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newDialedNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil)
+ node.declareInvalidChainID()
+ tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateInvalidChainID
+ })
+ })
+ t.Run("on valid chainID becomes alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ node := newDialedNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil)
+
+ setupRPCForAliveLoop(t, rpc)
+
+ node.declareInvalidChainID()
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateAlive
+ })
+ })
+}
+
+func TestUnit_NodeLifecycle_start(t *testing.T) {
+ t.Parallel()
+
+ newNode := func(t *testing.T, opts testNodeOpts) testNode {
+ node := newTestNode(t, opts)
+ opts.rpc.On("Close").Return(nil).Once()
+
+ return node
+ }
+ t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial"))
+ // disconnects all on transfer to unreachable
+ rpc.On("DisconnectAll")
+ err := node.Start(tests.Context(t))
+ assert.NoError(t, err)
+ tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.DebugLevel)
+ node := newNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ lggr: lggr,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) {
+ assert.Equal(t, nodeStateDialed, node.State())
+ }).Return(nodeChainID, errors.New("failed to get chain id"))
+ // disconnects all on transfer to unreachable
+ rpc.On("DisconnectAll")
+ err := node.Start(tests.Context(t))
+ assert.NoError(t, err)
+ tests.AssertLogEventually(t, observedLogs, "Verify failed")
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateUnreachable
+ })
+ })
+ t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.NewIDFromInt(10)
+ rpcChainID := types.NewIDFromInt(11)
+ node := newNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil)
+ // disconnects all on transfer to unreachable
+ rpc.On("DisconnectAll")
+ err := node.Start(tests.Context(t))
+ assert.NoError(t, err)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateInvalidChainID
+ })
+ })
+ t.Run("on valid chain ID becomes alive", func(t *testing.T) {
+ t.Parallel()
+ rpc := newMockNodeClient[types.ID, Head](t)
+ nodeChainID := types.RandomID()
+ node := newNode(t, testNodeOpts{
+ rpc: rpc,
+ chainID: nodeChainID,
+ })
+ defer func() { assert.NoError(t, node.close()) }()
+
+ rpc.On("Dial", mock.Anything).Return(nil)
+ rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil)
+
+ setupRPCForAliveLoop(t, rpc)
+
+ err := node.Start(tests.Context(t))
+ assert.NoError(t, err)
+ tests.AssertEventually(t, func() bool {
+ return node.State() == nodeStateAlive
+ })
+ })
+}
+
+func TestUnit_NodeLifecycle_syncStatus(t *testing.T) {
+ t.Parallel()
+ t.Run("skip if nLiveNodes is not configured", func(t *testing.T) {
+ node := newTestNode(t, testNodeOpts{})
+ outOfSync, liveNodes := node.syncStatus(0, nil)
+ assert.Equal(t, false, outOfSync)
+ assert.Equal(t, 0, liveNodes)
+ })
+ t.Run("skip if syncThreshold is not configured", func(t *testing.T) {
+ node := newTestNode(t, testNodeOpts{})
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return
+ }
+ outOfSync, liveNodes := node.syncStatus(0, nil)
+ assert.Equal(t, false, outOfSync)
+ assert.Equal(t, 0, liveNodes)
+ })
+ t.Run("panics on invalid selection mode", func(t *testing.T) {
+ node := newTestNode(t, testNodeOpts{
+ config: testNodeConfig{syncThreshold: 1},
+ })
+ node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *utils.Big) {
+ return
+ }
+ assert.Panics(t, func() {
+ _, _ = node.syncStatus(0, nil)
+ })
+ })
+ t.Run("block height selection mode", func(t *testing.T) {
+ const syncThreshold = 10
+ const highestBlock = 1000
+ const nodesNum = 20
+ const totalDifficulty = 3000
+ testCases := []struct {
+ name string
+ blockNumber int64
+ outOfSync bool
+ }{
+ {
+ name: "below threshold",
+ blockNumber: highestBlock - syncThreshold - 1,
+ outOfSync: true,
+ },
+ {
+ name: "equal to threshold",
+ blockNumber: highestBlock - syncThreshold,
+ outOfSync: false,
+ },
+ {
+ name: "equal to highest block",
+ blockNumber: highestBlock,
+ outOfSync: false,
+ },
+ {
+ name: "higher than highest block",
+ blockNumber: highestBlock,
+ outOfSync: false,
+ },
+ }
+
+ for _, selectionMode := range []string{NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel} {
+ node := newTestNode(t, testNodeOpts{
+ config: testNodeConfig{
+ syncThreshold: syncThreshold,
+ selectionMode: selectionMode,
+ },
+ })
+ node.nLiveNodes = func() (int, int64, *utils.Big) {
+ return nodesNum, highestBlock, utils.NewBigI(totalDifficulty)
+ }
+ for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} {
+ for _, testCase := range testCases {
+ t.Run(fmt.Sprintf("%s: selectionMode: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) {
+ outOfSync, liveNodes := node.syncStatus(testCase.blockNumber, utils.NewBigI(td))
+ assert.Equal(t, nodesNum, liveNodes)
+ assert.Equal(t, testCase.outOfSync, outOfSync)
+ })
+ }
+ }
+ }
+
+ })
+ t.Run("total difficulty selection mode", func(t *testing.T) {
+ const syncThreshold = 10
+ const highestBlock = 1000
+ const nodesNum = 20
+ const totalDifficulty = 3000
+ testCases := []struct {
+ name string
+ totalDifficulty int64
+ outOfSync bool
+ }{
+ {
+ name: "below threshold",
+ totalDifficulty: totalDifficulty - syncThreshold - 1,
+ outOfSync: true,
+ },
+ {
+ name: "equal to threshold",
+ totalDifficulty: totalDifficulty - syncThreshold,
+ outOfSync: false,
+ },
+ {
+ name: "equal to highest block",
+ totalDifficulty: totalDifficulty,
+ outOfSync: false,
+ },
+ {
+ name: "higher than highest block",
+ totalDifficulty: totalDifficulty,
+ outOfSync: false,
+ },
+ }
+
+ node := newTestNode(t, testNodeOpts{
+ config: testNodeConfig{
+ syncThreshold: syncThreshold,
+ selectionMode: NodeSelectionModeTotalDifficulty,
+ },
+ })
+ node.nLiveNodes = func() (int, int64, *utils.Big) {
+ return nodesNum, highestBlock, utils.NewBigI(totalDifficulty)
+ }
+ for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} {
+ for _, testCase := range testCases {
+ t.Run(fmt.Sprintf("%s: selectionMode: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) {
+ outOfSync, liveNodes := node.syncStatus(hb, utils.NewBigI(testCase.totalDifficulty))
+ assert.Equal(t, nodesNum, liveNodes)
+ assert.Equal(t, testCase.outOfSync, outOfSync)
+ })
+ }
+ }
+
+ })
+}
diff --git a/common/client/node_selector.go b/common/client/node_selector.go
new file mode 100644
index 00000000000..45604ebe8d9
--- /dev/null
+++ b/common/client/node_selector.go
@@ -0,0 +1,46 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+const (
+ NodeSelectionModeHighestHead = "HighestHead"
+ NodeSelectionModeRoundRobin = "RoundRobin"
+ NodeSelectionModeTotalDifficulty = "TotalDifficulty"
+ NodeSelectionModePriorityLevel = "PriorityLevel"
+)
+
+//go:generate mockery --quiet --name NodeSelector --structname mockNodeSelector --filename "mock_node_selector_test.go" --inpackage --case=underscore
+type NodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] interface {
+ // Select returns a Node, or nil if none can be selected.
+ // Implementation must be thread-safe.
+ Select() Node[CHAIN_ID, HEAD, RPC]
+ // Name returns the strategy name, e.g. "HighestHead" or "RoundRobin"
+ Name() string
+}
+
+func newNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](selectionMode string, nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] {
+ switch selectionMode {
+ case NodeSelectionModeHighestHead:
+ return NewHighestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes)
+ case NodeSelectionModeRoundRobin:
+ return NewRoundRobinSelector[CHAIN_ID, HEAD, RPC](nodes)
+ case NodeSelectionModeTotalDifficulty:
+ return NewTotalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes)
+ case NodeSelectionModePriorityLevel:
+ return NewPriorityLevelNodeSelector[CHAIN_ID, HEAD, RPC](nodes)
+ default:
+ panic(fmt.Sprintf("unsupported NodeSelectionMode: %s", selectionMode))
+ }
+}
diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go
new file mode 100644
index 00000000000..99a130004a9
--- /dev/null
+++ b/common/client/node_selector_highest_head.go
@@ -0,0 +1,41 @@
+package client
+
+import (
+ "math"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type highestHeadNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] []Node[CHAIN_ID, HEAD, RPC]
+
+func NewHighestHeadNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] {
+ return highestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes)
+}
+
+func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] {
+ var highestHeadNumber int64 = math.MinInt64
+ var highestHeadNodes []Node[CHAIN_ID, HEAD, RPC]
+ for _, n := range s {
+ state, currentHeadNumber, _ := n.StateAndLatest()
+ if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber {
+ if highestHeadNumber < currentHeadNumber {
+ highestHeadNumber = currentHeadNumber
+ highestHeadNodes = nil
+ }
+ highestHeadNodes = append(highestHeadNodes, n)
+ }
+ }
+ return firstOrHighestPriority(highestHeadNodes)
+}
+
+func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string {
+ return NodeSelectionModeHighestHead
+}
diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go
new file mode 100644
index 00000000000..6e47bbedcae
--- /dev/null
+++ b/common/client/node_selector_highest_head_test.go
@@ -0,0 +1,176 @@
+package client
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+func TestHighestHeadNodeSelectorName(t *testing.T) {
+ selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeHighestHead, nil)
+ assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead)
+}
+
+func TestHighestHeadNodeSelector(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil)
+ } else if i == 1 {
+ // second node is alive, LatestReceivedBlockNumber = 1
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil)
+ } else {
+ // third node is alive, LatestReceivedBlockNumber = 2 (best node)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil)
+ }
+ node.On("Order").Maybe().Return(int32(1))
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector[types.ID, Head, nodeClient](NodeSelectionModeHighestHead, nodes)
+ assert.Same(t, nodes[2], selector.Select())
+
+ t.Run("stick to the same node", func(t *testing.T) {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil)
+ node.On("Order").Return(int32(1))
+ nodes = append(nodes, node)
+
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ assert.Same(t, nodes[2], selector.Select())
+ })
+
+ t.Run("another best node", func(t *testing.T) {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil)
+ node.On("Order").Return(int32(1))
+ nodes = append(nodes, node)
+
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ assert.Same(t, nodes[4], selector.Select())
+ })
+
+ t.Run("nodes never update latest block number", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil)
+ node1.On("Order").Return(int32(1))
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil)
+ node2.On("Order").Return(int32(1))
+ selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2})
+ assert.Same(t, node1, selector.Select())
+ })
+}
+
+func TestHighestHeadNodeSelector_None(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil)
+ } else {
+ // others are unreachable
+ node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), nil)
+ }
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ assert.Nil(t, selector.Select())
+}
+
+func TestHighestHeadNodeSelectorWithOrder(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ t.Run("same head and order", func(t *testing.T) {
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil)
+ node.On("Order").Return(int32(2))
+ nodes = append(nodes, node)
+ }
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ //Should select the first node because all things are equal
+ assert.Same(t, nodes[0], selector.Select())
+ })
+
+ t.Run("same head but different order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil)
+ node1.On("Order").Return(int32(3))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil)
+ node2.On("Order").Return(int32(1))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil)
+ node3.On("Order").Return(int32(2))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3}
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ //Should select the second node as it has the highest priority
+ assert.Same(t, nodes[1], selector.Select())
+ })
+
+ t.Run("different head but same order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil)
+ node1.On("Order").Maybe().Return(int32(3))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil)
+ node2.On("Order").Maybe().Return(int32(3))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil)
+ node3.On("Order").Return(int32(3))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3}
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ //Should select the third node as it has the highest head
+ assert.Same(t, nodes[2], selector.Select())
+ })
+
+ t.Run("different head and different order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil)
+ node1.On("Order").Maybe().Return(int32(3))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil)
+ node2.On("Order").Maybe().Return(int32(4))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil)
+ node3.On("Order").Maybe().Return(int32(3))
+
+ node4 := newMockNode[types.ID, Head, nodeClient](t)
+ node4.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil)
+ node4.On("Order").Maybe().Return(int32(1))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4}
+ selector := newNodeSelector(NodeSelectionModeHighestHead, nodes)
+ //Should select the third node as it has the highest head and will win the priority tie-breaker
+ assert.Same(t, nodes[2], selector.Select())
+ })
+}
diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go
new file mode 100644
index 00000000000..45cc62de077
--- /dev/null
+++ b/common/client/node_selector_priority_level.go
@@ -0,0 +1,129 @@
+package client
+
+import (
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type priorityLevelNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] struct {
+ nodes []Node[CHAIN_ID, HEAD, RPC]
+ roundRobinCount []atomic.Uint32
+}
+
+type nodeWithPriority[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] struct {
+ node Node[CHAIN_ID, HEAD, RPC]
+ priority int32
+}
+
+func NewPriorityLevelNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] {
+ return &priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]{
+ nodes: nodes,
+ roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)),
+ }
+}
+
+func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] {
+ nodes := s.getHighestPriorityAliveTier()
+
+ if len(nodes) == 0 {
+ return nil
+ }
+ priorityLevel := nodes[len(nodes)-1].priority
+
+ // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter
+ count := s.roundRobinCount[priorityLevel].Add(1) - 1
+ idx := int(count % uint32(len(nodes)))
+
+ return nodes[idx].node
+}
+
+func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string {
+ return NodeSelectionModePriorityLevel
+}
+
+// getHighestPriorityAliveTier filters nodes that are not in state nodeStateAlive and
+// returns only the highest tier of alive nodes
+func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, HEAD, RPC] {
+ var nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC]
+ for _, n := range s.nodes {
+ if n.State() == nodeStateAlive {
+ nodes = append(nodes, nodeWithPriority[CHAIN_ID, HEAD, RPC]{n, n.Order()})
+ }
+ }
+
+ if len(nodes) == 0 {
+ return nil
+ }
+
+ return removeLowerTiers(nodes)
+}
+
+// removeLowerTiers take a slice of nodeWithPriority[CHAIN_ID, BLOCK_HASH, HEAD, RPC] and keeps only the highest tier
+func removeLowerTiers[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC]) []nodeWithPriority[CHAIN_ID, HEAD, RPC] {
+ sort.SliceStable(nodes, func(i, j int) bool {
+ return nodes[i].priority > nodes[j].priority
+ })
+
+ var nodes2 []nodeWithPriority[CHAIN_ID, HEAD, RPC]
+ currentPriority := nodes[len(nodes)-1].priority
+
+ for _, n := range nodes {
+ if n.priority == currentPriority {
+ nodes2 = append(nodes2, n)
+ }
+ }
+
+ return nodes2
+}
+
+// nrOfPriorityTiers calculates the total number of priority tiers
+func nrOfPriorityTiers[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) int32 {
+ highestPriority := int32(0)
+ for _, n := range nodes {
+ priority := n.Order()
+ if highestPriority < priority {
+ highestPriority = priority
+ }
+ }
+ return highestPriority + 1
+}
+
+// firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority
+func firstOrHighestPriority[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) Node[CHAIN_ID, HEAD, RPC] {
+ hp := int32(math.MaxInt32)
+ var node Node[CHAIN_ID, HEAD, RPC]
+ for _, n := range nodes {
+ if n.Order() < hp {
+ hp = n.Order()
+ node = n
+ }
+ }
+ return node
+}
diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go
new file mode 100644
index 00000000000..ac84645e91c
--- /dev/null
+++ b/common/client/node_selector_priority_level_test.go
@@ -0,0 +1,88 @@
+package client
+
+import (
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPriorityLevelNodeSelectorName(t *testing.T) {
+ selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil)
+ assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel)
+}
+
+func TestPriorityLevelNodeSelector(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+ n1 := newMockNode[types.ID, Head, nodeClient](t)
+ n1.On("State").Return(nodeStateAlive)
+ n1.On("Order").Return(int32(1))
+
+ n2 := newMockNode[types.ID, Head, nodeClient](t)
+ n2.On("State").Return(nodeStateAlive)
+ n2.On("Order").Return(int32(1))
+
+ n3 := newMockNode[types.ID, Head, nodeClient](t)
+ n3.On("State").Return(nodeStateAlive)
+ n3.On("Order").Return(int32(1))
+
+ nodes = append(nodes, n1, n2, n3)
+ selector := newNodeSelector(NodeSelectionModePriorityLevel, nodes)
+ assert.Same(t, nodes[0], selector.Select())
+ assert.Same(t, nodes[1], selector.Select())
+ assert.Same(t, nodes[2], selector.Select())
+ assert.Same(t, nodes[0], selector.Select())
+ assert.Same(t, nodes[1], selector.Select())
+ assert.Same(t, nodes[2], selector.Select())
+}
+
+func TestPriorityLevelNodeSelector_None(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("State").Return(nodeStateOutOfSync)
+ node.On("Order").Return(int32(1))
+ } else {
+ // others are unreachable
+ node.On("State").Return(nodeStateUnreachable)
+ node.On("Order").Return(int32(1))
+ }
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModePriorityLevel, nodes)
+ assert.Nil(t, selector.Select())
+}
+
+func TestPriorityLevelNodeSelector_DifferentOrder(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+ n1 := newMockNode[types.ID, Head, nodeClient](t)
+ n1.On("State").Return(nodeStateAlive)
+ n1.On("Order").Return(int32(1))
+
+ n2 := newMockNode[types.ID, Head, nodeClient](t)
+ n2.On("State").Return(nodeStateAlive)
+ n2.On("Order").Return(int32(2))
+
+ n3 := newMockNode[types.ID, Head, nodeClient](t)
+ n3.On("State").Return(nodeStateAlive)
+ n3.On("Order").Return(int32(3))
+
+ nodes = append(nodes, n1, n2, n3)
+ selector := newNodeSelector(NodeSelectionModePriorityLevel, nodes)
+ assert.Same(t, nodes[0], selector.Select())
+ assert.Same(t, nodes[0], selector.Select())
+}
diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go
new file mode 100644
index 00000000000..5cdad7f52ee
--- /dev/null
+++ b/common/client/node_selector_round_robin.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "sync/atomic"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type roundRobinSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] struct {
+ nodes []Node[CHAIN_ID, HEAD, RPC]
+ roundRobinCount atomic.Uint32
+}
+
+func NewRoundRobinSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] {
+ return &roundRobinSelector[CHAIN_ID, HEAD, RPC]{
+ nodes: nodes,
+ }
+}
+
+func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] {
+ var liveNodes []Node[CHAIN_ID, HEAD, RPC]
+ for _, n := range s.nodes {
+ if n.State() == nodeStateAlive {
+ liveNodes = append(liveNodes, n)
+ }
+ }
+
+ nNodes := len(liveNodes)
+ if nNodes == 0 {
+ return nil
+ }
+
+ // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter
+ count := s.roundRobinCount.Add(1) - 1
+ idx := int(count % uint32(nNodes))
+
+ return liveNodes[idx]
+}
+
+func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Name() string {
+ return NodeSelectionModeRoundRobin
+}
diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go
new file mode 100644
index 00000000000..e5078d858f1
--- /dev/null
+++ b/common/client/node_selector_round_robin_test.go
@@ -0,0 +1,61 @@
+package client
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+func TestRoundRobinNodeSelectorName(t *testing.T) {
+ selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil)
+ assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin)
+}
+
+func TestRoundRobinNodeSelector(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("State").Return(nodeStateOutOfSync)
+ } else {
+ // second & third nodes are alive
+ node.On("State").Return(nodeStateAlive)
+ }
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes)
+ assert.Same(t, nodes[1], selector.Select())
+ assert.Same(t, nodes[2], selector.Select())
+ assert.Same(t, nodes[1], selector.Select())
+ assert.Same(t, nodes[2], selector.Select())
+}
+
+func TestRoundRobinNodeSelector_None(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("State").Return(nodeStateOutOfSync)
+ } else {
+ // others are unreachable
+ node.On("State").Return(nodeStateUnreachable)
+ }
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes)
+ assert.Nil(t, selector.Select())
+}
diff --git a/common/client/node_selector_test.go b/common/client/node_selector_test.go
new file mode 100644
index 00000000000..226cb67168d
--- /dev/null
+++ b/common/client/node_selector_test.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+func TestNodeSelector(t *testing.T) {
+ // rest of the tests are located in specific node selectors tests
+ t.Run("panics on unknown type", func(t *testing.T) {
+ assert.Panics(t, func() {
+ _ = newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]]("unknown", nil)
+ })
+ })
+}
diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go
new file mode 100644
index 00000000000..9b29642d033
--- /dev/null
+++ b/common/client/node_selector_total_difficulty.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+)
+
+type totalDifficultyNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+] []Node[CHAIN_ID, HEAD, RPC]
+
+func NewTotalDifficultyNodeSelector[
+ CHAIN_ID types.ID,
+ HEAD Head,
+ RPC NodeClient[CHAIN_ID, HEAD],
+](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] {
+ return totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes)
+}
+
+func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] {
+ // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil
+ var highestTD *utils.Big
+ var nodes []Node[CHAIN_ID, HEAD, RPC]
+ var aliveNodes []Node[CHAIN_ID, HEAD, RPC]
+
+ for _, n := range s {
+ state, _, currentTD := n.StateAndLatest()
+ if state != nodeStateAlive {
+ continue
+ }
+
+ aliveNodes = append(aliveNodes, n)
+ if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) {
+ if highestTD == nil || currentTD.Cmp(highestTD) > 0 {
+ highestTD = currentTD
+ nodes = nil
+ }
+ nodes = append(nodes, n)
+ }
+ }
+
+ //If all nodes have td == nil pick one from the nodes that are alive
+ if len(nodes) == 0 {
+ return firstOrHighestPriority(aliveNodes)
+ }
+ return firstOrHighestPriority(nodes)
+}
+
+func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string {
+ return NodeSelectionModeTotalDifficulty
+}
diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go
new file mode 100644
index 00000000000..4eecb859db9
--- /dev/null
+++ b/common/client/node_selector_total_difficulty_test.go
@@ -0,0 +1,178 @@
+package client
+
+import (
+ "testing"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTotalDifficultyNodeSelectorName(t *testing.T) {
+ selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil)
+ assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty)
+}
+
+func TestTotalDifficultyNodeSelector(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil)
+ } else if i == 1 {
+ // second node is alive
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(7))
+ } else {
+ // third node is alive and best
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(2), utils.NewBigI(8))
+ }
+ node.On("Order").Maybe().Return(int32(1))
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ assert.Same(t, nodes[2], selector.Select())
+
+ t.Run("stick to the same node", func(t *testing.T) {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ // fourth node is alive (same as 3rd)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(2), utils.NewBigI(8))
+ node.On("Order").Maybe().Return(int32(1))
+ nodes = append(nodes, node)
+
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ assert.Same(t, nodes[2], selector.Select())
+ })
+
+ t.Run("another best node", func(t *testing.T) {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ // fifth node is alive (better than 3rd and 4th)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(3), utils.NewBigI(11))
+ node.On("Order").Maybe().Return(int32(1))
+ nodes = append(nodes, node)
+
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ assert.Same(t, nodes[4], selector.Select())
+ })
+
+ t.Run("nodes never update latest block number", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil)
+ node1.On("Order").Maybe().Return(int32(1))
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil)
+ node2.On("Order").Maybe().Return(int32(1))
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2}
+
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ assert.Same(t, node1, selector.Select())
+ })
+}
+
+func TestTotalDifficultyNodeSelector_None(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ if i == 0 {
+ // first node is out of sync
+ node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil)
+ } else {
+ // others are unreachable
+ node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), utils.NewBigI(7))
+ }
+ nodes = append(nodes, node)
+ }
+
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ assert.Nil(t, selector.Select())
+}
+
+func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) {
+ t.Parallel()
+
+ type nodeClient NodeClient[types.ID, Head]
+ var nodes []Node[types.ID, Head, nodeClient]
+
+ t.Run("same td and order", func(t *testing.T) {
+ for i := 0; i < 3; i++ {
+ node := newMockNode[types.ID, Head, nodeClient](t)
+ node.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(10))
+ node.On("Order").Return(int32(2))
+ nodes = append(nodes, node)
+ }
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ //Should select the first node because all things are equal
+ assert.Same(t, nodes[0], selector.Select())
+ })
+
+ t.Run("same td but different order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), utils.NewBigI(10))
+ node1.On("Order").Return(int32(3))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), utils.NewBigI(10))
+ node2.On("Order").Return(int32(1))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), utils.NewBigI(10))
+ node3.On("Order").Return(int32(2))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3}
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ //Should select the second node as it has the highest priority
+ assert.Same(t, nodes[1], selector.Select())
+ })
+
+ t.Run("different td but same order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(10))
+ node1.On("Order").Maybe().Return(int32(3))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(11))
+ node2.On("Order").Maybe().Return(int32(3))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(12))
+ node3.On("Order").Return(int32(3))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3}
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ //Should select the third node as it has the highest td
+ assert.Same(t, nodes[2], selector.Select())
+ })
+
+ t.Run("different head and different order", func(t *testing.T) {
+ node1 := newMockNode[types.ID, Head, nodeClient](t)
+ node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(100))
+ node1.On("Order").Maybe().Return(int32(4))
+
+ node2 := newMockNode[types.ID, Head, nodeClient](t)
+ node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(110))
+ node2.On("Order").Maybe().Return(int32(5))
+
+ node3 := newMockNode[types.ID, Head, nodeClient](t)
+ node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(110))
+ node3.On("Order").Maybe().Return(int32(1))
+
+ node4 := newMockNode[types.ID, Head, nodeClient](t)
+ node4.On("StateAndLatest").Return(nodeStateAlive, int64(1), utils.NewBigI(105))
+ node4.On("Order").Maybe().Return(int32(2))
+
+ nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4}
+ selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes)
+ //Should select the third node as it has the highest td and will win the priority tie-breaker
+ assert.Same(t, nodes[2], selector.Select())
+ })
+}
diff --git a/common/client/node_test.go b/common/client/node_test.go
new file mode 100644
index 00000000000..0438e11e612
--- /dev/null
+++ b/common/client/node_test.go
@@ -0,0 +1,80 @@
+package client
+
+import (
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+type testNodeConfig struct {
+ pollFailureThreshold uint32
+ pollInterval time.Duration
+ selectionMode string
+ syncThreshold uint32
+}
+
+func (n testNodeConfig) PollFailureThreshold() uint32 {
+ return n.pollFailureThreshold
+}
+
+func (n testNodeConfig) PollInterval() time.Duration {
+ return n.pollInterval
+}
+
+func (n testNodeConfig) SelectionMode() string {
+ return n.selectionMode
+}
+
+func (n testNodeConfig) SyncThreshold() uint32 {
+ return n.syncThreshold
+}
+
+type testNode struct {
+ *node[types.ID, Head, NodeClient[types.ID, Head]]
+}
+
+type testNodeOpts struct {
+ config testNodeConfig
+ noNewHeadsThreshold time.Duration
+ lggr logger.Logger
+ wsuri url.URL
+ httpuri *url.URL
+ name string
+ id int32
+ chainID types.ID
+ nodeOrder int32
+ rpc *mockNodeClient[types.ID, Head]
+ chainFamily string
+}
+
+func newTestNode(t *testing.T, opts testNodeOpts) testNode {
+ if opts.lggr == nil {
+ opts.lggr = logger.TestLogger(t)
+ }
+
+ if opts.name == "" {
+ opts.name = "tes node"
+ }
+
+ if opts.chainFamily == "" {
+ opts.chainFamily = "test node chain family"
+ }
+
+ if opts.chainID == nil {
+ opts.chainID = types.RandomID()
+ }
+
+ if opts.id == 0 {
+ opts.id = 42
+ }
+
+ nodeI := NewNode[types.ID, Head, NodeClient[types.ID, Head]](opts.config, opts.noNewHeadsThreshold, opts.lggr,
+ opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily)
+
+ return testNode{
+ nodeI.(*node[types.ID, Head, NodeClient[types.ID, Head]]),
+ }
+}
diff --git a/common/client/send_only_node.go b/common/client/send_only_node.go
new file mode 100644
index 00000000000..fa793a826a6
--- /dev/null
+++ b/common/client/send_only_node.go
@@ -0,0 +1,186 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "sync"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/services"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+//go:generate mockery --quiet --name sendOnlyClient --structname mockSendOnlyClient --filename "mock_send_only_client_test.go" --inpackage --case=underscore
+type sendOnlyClient[
+ CHAIN_ID types.ID,
+] interface {
+ Close()
+ ChainID(context.Context) (CHAIN_ID, error)
+ DialHTTP() error
+}
+
+// SendOnlyNode represents one node used as a sendonly
+//
+//go:generate mockery --quiet --name SendOnlyNode --structname mockSendOnlyNode --filename "mock_send_only_node_test.go" --inpackage --case=underscore
+type SendOnlyNode[
+ CHAIN_ID types.ID,
+ RPC sendOnlyClient[CHAIN_ID],
+] interface {
+ // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors.
+ Start(context.Context) error
+ Close() error
+
+ ConfiguredChainID() CHAIN_ID
+ RPC() RPC
+
+ String() string
+ // State returns nodeState
+ State() nodeState
+ // Name is a unique identifier for this node.
+ Name() string
+}
+
+// It only supports sending transactions
+// It must use an http(s) url
+type sendOnlyNode[
+ CHAIN_ID types.ID,
+ RPC sendOnlyClient[CHAIN_ID],
+] struct {
+ services.StateMachine
+
+ stateMu sync.RWMutex // protects state* fields
+ state nodeState
+
+ rpc RPC
+ uri url.URL
+ log logger.Logger
+ name string
+ chainID CHAIN_ID
+ chStop utils.StopChan
+ wg sync.WaitGroup
+}
+
+// NewSendOnlyNode returns a new sendonly node
+func NewSendOnlyNode[
+ CHAIN_ID types.ID,
+ RPC sendOnlyClient[CHAIN_ID],
+](
+ lggr logger.Logger,
+ httpuri url.URL,
+ name string,
+ chainID CHAIN_ID,
+ rpc RPC,
+) SendOnlyNode[CHAIN_ID, RPC] {
+ s := new(sendOnlyNode[CHAIN_ID, RPC])
+ s.name = name
+ s.log = lggr.Named("SendOnlyNode").Named(name).With(
+ "nodeTier", "sendonly",
+ )
+ s.rpc = rpc
+ s.uri = httpuri
+ s.chainID = chainID
+ s.chStop = make(chan struct{})
+ return s
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) Start(ctx context.Context) error {
+ return s.StartOnce(s.name, func() error {
+ s.start(ctx)
+ return nil
+ })
+}
+
+// Start setups up and verifies the sendonly node
+// Should only be called once in a node's lifecycle
+func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) {
+ if s.State() != nodeStateUndialed {
+ panic(fmt.Sprintf("cannot dial node with state %v", s.state))
+ }
+
+ err := s.rpc.DialHTTP()
+ if err != nil {
+ promPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc()
+ s.log.Errorw("Dial failed: SendOnly Node is unusable", "err", err)
+ s.setState(nodeStateUnusable)
+ return
+ }
+ s.setState(nodeStateDialed)
+
+ if s.chainID.String() == "0" {
+ // Skip verification if chainID is zero
+ s.log.Warn("sendonly rpc ChainID verification skipped")
+ } else {
+ chainID, err := s.rpc.ChainID(startCtx)
+ if err != nil || chainID.String() != s.chainID.String() {
+ promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc()
+ if err != nil {
+ promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc()
+ s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err)
+ s.setState(nodeStateUnreachable)
+ } else {
+ promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc()
+ s.log.Errorf(
+ "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s",
+ chainID.String(),
+ s.chainID.String(),
+ s.name,
+ )
+ s.setState(nodeStateInvalidChainID)
+ }
+ // Since it has failed, spin up the verifyLoop that will keep
+ // retrying until success
+ s.wg.Add(1)
+ go s.verifyLoop()
+ return
+ }
+ }
+
+ promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc()
+ s.setState(nodeStateAlive)
+ s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state)
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) Close() error {
+ return s.StopOnce(s.name, func() error {
+ s.rpc.Close()
+ close(s.chStop)
+ s.wg.Wait()
+ s.setState(nodeStateClosed)
+ return nil
+ })
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID {
+ return s.chainID
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) RPC() RPC {
+ return s.rpc
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) String() string {
+ return fmt.Sprintf("(%s)%s:%s", Secondary.String(), s.name, s.uri.Redacted())
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state nodeState) (changed bool) {
+ s.stateMu.Lock()
+ defer s.stateMu.Unlock()
+ if s.state == state {
+ return false
+ }
+ s.state = state
+ return true
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) State() nodeState {
+ s.stateMu.RLock()
+ defer s.stateMu.RUnlock()
+ return s.state
+}
+
+func (s *sendOnlyNode[CHAIN_ID, RPC]) Name() string {
+ return s.name
+}
diff --git a/common/client/send_only_node_lifecycle.go b/common/client/send_only_node_lifecycle.go
new file mode 100644
index 00000000000..0f663eab30e
--- /dev/null
+++ b/common/client/send_only_node_lifecycle.go
@@ -0,0 +1,66 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+// verifyLoop may only be triggered once, on Start, if initial chain ID check
+// fails.
+//
+// It will continue checking until success and then exit permanently.
+func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() {
+ defer s.wg.Done()
+
+ backoff := utils.NewRedialBackoff()
+ for {
+ select {
+ case <-s.chStop:
+ return
+ case <-time.After(backoff.Duration()):
+ }
+ chainID, err := s.rpc.ChainID(context.Background())
+ if err != nil {
+ ok := s.IfStarted(func() {
+ if changed := s.setState(nodeStateUnreachable); changed {
+ promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc()
+ }
+ })
+ if !ok {
+ return
+ }
+ s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err)
+ continue
+ } else if chainID.String() != s.chainID.String() {
+ ok := s.IfStarted(func() {
+ if changed := s.setState(nodeStateInvalidChainID); changed {
+ promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc()
+ }
+ })
+ if !ok {
+ return
+ }
+ s.log.Errorf(
+ "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s",
+ chainID.String(),
+ s.chainID.String(),
+ s.name,
+ )
+
+ continue
+ }
+ ok := s.IfStarted(func() {
+ if changed := s.setState(nodeStateAlive); changed {
+ promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc()
+ }
+ })
+ if !ok {
+ return
+ }
+ s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state)
+ return
+ }
+}
diff --git a/common/client/send_only_node_test.go b/common/client/send_only_node_test.go
new file mode 100644
index 00000000000..bfe55153656
--- /dev/null
+++ b/common/client/send_only_node_test.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+ "fmt"
+ "net/url"
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/utils/tests"
+
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestNewSendOnlyNode(t *testing.T) {
+ t.Parallel()
+
+ urlFormat := "http://user:%s@testurl.com"
+ password := "pass"
+ u, err := url.Parse(fmt.Sprintf(urlFormat, password))
+ require.NoError(t, err)
+ redacted := fmt.Sprintf(urlFormat, "xxxxx")
+ lggr := logger.TestLogger(t)
+ name := "TestNewSendOnlyNode"
+ chainID := types.RandomID()
+ client := newMockSendOnlyClient[types.ID](t)
+
+ node := NewSendOnlyNode(lggr, *u, name, chainID, client)
+ assert.NotNil(t, node)
+
+ // Must contain name & url with redacted password
+ assert.Contains(t, node.String(), fmt.Sprintf("%s:%s", name, redacted))
+ assert.Equal(t, node.ConfiguredChainID(), chainID)
+}
+
+func TestStartSendOnlyNode(t *testing.T) {
+ t.Parallel()
+ t.Run("becomes unusable if initial dial fails", func(t *testing.T) {
+ t.Parallel()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ client := newMockSendOnlyClient[types.ID](t)
+ client.On("Close").Once()
+ expectedError := errors.New("some http error")
+ client.On("DialHTTP").Return(expectedError).Once()
+ s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.RandomID(), client)
+
+ defer func() { assert.NoError(t, s.Close()) }()
+ err := s.Start(tests.Context(t))
+ require.NoError(t, err)
+
+ assert.Equal(t, nodeStateUnusable, s.State())
+ tests.RequireLogMessage(t, observedLogs, "Dial failed: SendOnly Node is unusable")
+ })
+ t.Run("Default ChainID(0) produces warn and skips checks", func(t *testing.T) {
+ t.Parallel()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ client := newMockSendOnlyClient[types.ID](t)
+ client.On("Close").Once()
+ client.On("DialHTTP").Return(nil).Once()
+ s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.NewIDFromInt(0), client)
+
+ defer func() { assert.NoError(t, s.Close()) }()
+ err := s.Start(tests.Context(t))
+ require.NoError(t, err)
+
+ assert.Equal(t, nodeStateAlive, s.State())
+ tests.RequireLogMessage(t, observedLogs, "sendonly rpc ChainID verification skipped")
+ })
+ t.Run("Can recover from chainID verification failure", func(t *testing.T) {
+ t.Parallel()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ client := newMockSendOnlyClient[types.ID](t)
+ client.On("Close").Once()
+ client.On("DialHTTP").Return(nil)
+ expectedError := errors.New("failed to get chain ID")
+ chainID := types.RandomID()
+ const failuresCount = 2
+ client.On("ChainID", mock.Anything).Return(types.RandomID(), expectedError).Times(failuresCount)
+ client.On("ChainID", mock.Anything).Return(chainID, nil)
+
+ s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), chainID, client)
+
+ defer func() { assert.NoError(t, s.Close()) }()
+ err := s.Start(tests.Context(t))
+ require.NoError(t, err)
+
+ assert.Equal(t, nodeStateUnreachable, s.State())
+ tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Verify failed: %v", expectedError), failuresCount)
+ tests.AssertEventually(t, func() bool {
+ return s.State() == nodeStateAlive
+ })
+ })
+ t.Run("Can recover from chainID mismatch", func(t *testing.T) {
+ t.Parallel()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ client := newMockSendOnlyClient[types.ID](t)
+ client.On("Close").Once()
+ client.On("DialHTTP").Return(nil).Once()
+ configuredChainID := types.NewIDFromInt(11)
+ rpcChainID := types.NewIDFromInt(20)
+ const failuresCount = 2
+ client.On("ChainID", mock.Anything).Return(rpcChainID, nil).Times(failuresCount)
+ client.On("ChainID", mock.Anything).Return(configuredChainID, nil)
+ s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client)
+
+ defer func() { assert.NoError(t, s.Close()) }()
+ err := s.Start(tests.Context(t))
+ require.NoError(t, err)
+
+ assert.Equal(t, nodeStateInvalidChainID, s.State())
+ tests.AssertLogCountEventually(t, observedLogs, "sendonly rpc ChainID doesn't match local chain ID", failuresCount)
+ tests.AssertEventually(t, func() bool {
+ return s.State() == nodeStateAlive
+ })
+ })
+ t.Run("Start with Random ChainID", func(t *testing.T) {
+ t.Parallel()
+ lggr, observedLogs := logger.TestLoggerObserved(t, zap.WarnLevel)
+ client := newMockSendOnlyClient[types.ID](t)
+ client.On("Close").Once()
+ client.On("DialHTTP").Return(nil).Once()
+ configuredChainID := types.RandomID()
+ client.On("ChainID", mock.Anything).Return(configuredChainID, nil)
+ s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client)
+
+ defer func() { assert.NoError(t, s.Close()) }()
+ err := s.Start(tests.Context(t))
+ assert.NoError(t, err)
+ tests.AssertEventually(t, func() bool {
+ return s.State() == nodeStateAlive
+ })
+ assert.Equal(t, 0, observedLogs.Len()) // No warnings expected
+ })
+}
diff --git a/common/client/types.go b/common/client/types.go
new file mode 100644
index 00000000000..0e52f1db72c
--- /dev/null
+++ b/common/client/types.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+ "context"
+ "math/big"
+
+ feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
+ "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+// RPC includes all the necessary methods for a multi-node client to interact directly with any RPC endpoint.
+//
+//go:generate mockery --quiet --name RPC --structname mockRPC --inpackage --filename "mock_rpc_test.go" --case=underscore
+type RPC[
+ CHAIN_ID types.ID,
+ SEQ types.Sequence,
+ ADDR types.Hashable,
+ BLOCK_HASH types.Hashable,
+ TX any,
+ TX_HASH types.Hashable,
+ EVENT any,
+ EVENT_OPS any,
+ TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH],
+ FEE feetypes.Fee,
+ HEAD types.Head[BLOCK_HASH],
+
+] interface {
+ NodeClient[
+ CHAIN_ID,
+ HEAD,
+ ]
+ clientAPI[
+ CHAIN_ID,
+ SEQ,
+ ADDR,
+ BLOCK_HASH,
+ TX,
+ TX_HASH,
+ EVENT,
+ EVENT_OPS,
+ TX_RECEIPT,
+ FEE,
+ HEAD,
+ ]
+}
+
+// Head is the interface required by the NodeClient
+//
+//go:generate mockery --quiet --name Head --structname mockHead --filename "mock_head_test.go" --inpackage --case=underscore
+type Head interface {
+ BlockNumber() int64
+ BlockDifficulty() *utils.Big
+}
+
+// NodeClient includes all the necessary RPC methods required by a node.
+//
+//go:generate mockery --quiet --name NodeClient --structname mockNodeClient --filename "mock_node_client_test.go" --inpackage --case=underscore
+type NodeClient[
+ CHAIN_ID types.ID,
+ HEAD Head,
+] interface {
+ connection[CHAIN_ID, HEAD]
+
+ DialHTTP() error
+ DisconnectAll()
+ Close()
+ ClientVersion(context.Context) (string, error)
+ SubscribersCount() int32
+ SetAliveLoopSub(types.Subscription)
+ UnsubscribeAllExceptAliveLoop()
+}
+
+// clientAPI includes all the direct RPC methods required by the generalized common client to implement its own.
+type clientAPI[
+ CHAIN_ID types.ID,
+ SEQ types.Sequence,
+ ADDR types.Hashable,
+ BLOCK_HASH types.Hashable,
+ TX any,
+ TX_HASH types.Hashable,
+ EVENT any,
+ EVENT_OPS any, // event filter query options
+ TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH],
+ FEE feetypes.Fee,
+ HEAD types.Head[BLOCK_HASH],
+] interface {
+ connection[CHAIN_ID, HEAD]
+
+ // Account
+ BalanceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (*big.Int, error)
+ TokenBalance(ctx context.Context, accountAddress ADDR, tokenAddress ADDR) (*big.Int, error)
+ SequenceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (SEQ, error)
+ LINKBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (*assets.Link, error)
+ PendingSequenceAt(ctx context.Context, addr ADDR) (SEQ, error)
+ EstimateGas(ctx context.Context, call any) (gas uint64, err error)
+
+ // Transactions
+ SendTransaction(ctx context.Context, tx TX) error
+ SimulateTransaction(ctx context.Context, tx TX) error
+ TransactionByHash(ctx context.Context, txHash TX_HASH) (TX, error)
+ TransactionReceipt(ctx context.Context, txHash TX_HASH) (TX_RECEIPT, error)
+ SendEmptyTransaction(
+ ctx context.Context,
+ newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error),
+ seq SEQ,
+ gasLimit uint32,
+ fee FEE,
+ fromAddress ADDR,
+ ) (txhash string, err error)
+
+ // Blocks
+ BlockByNumber(ctx context.Context, number *big.Int) (HEAD, error)
+ BlockByHash(ctx context.Context, hash BLOCK_HASH) (HEAD, error)
+ LatestBlockHeight(context.Context) (*big.Int, error)
+
+ // Events
+ FilterEvents(ctx context.Context, query EVENT_OPS) ([]EVENT, error)
+
+ // Misc
+ BatchCallContext(ctx context.Context, b []any) error
+ CallContract(
+ ctx context.Context,
+ msg interface{},
+ blockNumber *big.Int,
+ ) (rpcErr []byte, extractErr error)
+ CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
+ CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) ([]byte, error)
+}
+
+type connection[
+ CHAIN_ID types.ID,
+ HEAD Head,
+] interface {
+ ChainID(ctx context.Context) (CHAIN_ID, error)
+ Dial(ctx context.Context) error
+ Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error)
+}
diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go
index c24dde595cf..54262dd93f7 100644
--- a/common/headtracker/head_tracker.go
+++ b/common/headtracker/head_tracker.go
@@ -197,6 +197,9 @@ func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context
"blockHeight", head.BlockNumber(),
"blockHash", head.BlockHash(),
"parentHeadHash", head.GetParentHash(),
+ "blockTs", head.GetTimestamp(),
+ "blockTsUnix", head.GetTimestamp().Unix(),
+ "blockDifficulty", head.BlockDifficulty(),
)
err := ht.headSaver.Save(ctx, head)
diff --git a/common/headtracker/types/mocks/head.go b/common/headtracker/types/mocks/head.go
index edda18d57e8..1de1f78de8c 100644
--- a/common/headtracker/types/mocks/head.go
+++ b/common/headtracker/types/mocks/head.go
@@ -3,8 +3,13 @@
package mocks
import (
- types "github.com/smartcontractkit/chainlink/v2/common/types"
+ time "time"
+
mock "github.com/stretchr/testify/mock"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+
+ utils "github.com/smartcontractkit/chainlink/v2/core/utils"
)
// Head is an autogenerated mock type for the Head type
@@ -12,6 +17,22 @@ type Head[BLOCK_HASH types.Hashable, CHAIN_ID types.ID] struct {
mock.Mock
}
+// BlockDifficulty provides a mock function with given fields:
+func (_m *Head[BLOCK_HASH, CHAIN_ID]) BlockDifficulty() *utils.Big {
+ ret := _m.Called()
+
+ var r0 *utils.Big
+ if rf, ok := ret.Get(0).(func() *utils.Big); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*utils.Big)
+ }
+ }
+
+ return r0
+}
+
// BlockHash provides a mock function with given fields:
func (_m *Head[BLOCK_HASH, CHAIN_ID]) BlockHash() BLOCK_HASH {
ret := _m.Called()
@@ -114,6 +135,20 @@ func (_m *Head[BLOCK_HASH, CHAIN_ID]) GetParentHash() BLOCK_HASH {
return r0
}
+// GetTimestamp provides a mock function with given fields:
+func (_m *Head[BLOCK_HASH, CHAIN_ID]) GetTimestamp() time.Time {
+ ret := _m.Called()
+
+ var r0 time.Time
+ if rf, ok := ret.Get(0).(func() time.Time); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(time.Time)
+ }
+
+ return r0
+}
+
// HasChainID provides a mock function with given fields:
func (_m *Head[BLOCK_HASH, CHAIN_ID]) HasChainID() bool {
ret := _m.Called()
diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go
index 011866bf39d..00522abf229 100644
--- a/common/txmgr/broadcaster.go
+++ b/common/txmgr/broadcaster.go
@@ -14,14 +14,13 @@ import (
"go.uber.org/multierr"
"gopkg.in/guregu/null.v4"
+ "github.com/smartcontractkit/chainlink-relay/pkg/chains/label"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/common/types"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/label"
"github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -123,8 +122,6 @@ type Broadcaster[
// when Start is called
autoSyncSequence bool
- txInsertListener pg.Subscription
- eventBroadcaster pg.EventBroadcaster
processUnstartedTxsImpl ProcessUnstartedTxs[ADDR]
ks txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ]
@@ -143,8 +140,6 @@ type Broadcaster[
initSync sync.Mutex
isStarted bool
- parseAddr func(string) (ADDR, error)
-
sequenceLock sync.RWMutex
nextSequenceMap map[ADDR]SEQ
generateNextSequence types.GenerateNextSequenceFunc[SEQ]
@@ -166,13 +161,11 @@ func NewBroadcaster[
txConfig txmgrtypes.BroadcasterTransactionsConfig,
listenerConfig txmgrtypes.BroadcasterListenerConfig,
keystore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ],
- eventBroadcaster pg.EventBroadcaster,
txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE],
sequenceSyncer SequenceSyncer[ADDR, TX_HASH, BLOCK_HASH, SEQ],
logger logger.Logger,
checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE],
autoSyncSequence bool,
- parseAddress func(string) (ADDR, error),
generateNextSequence types.GenerateNextSequenceFunc[SEQ],
) *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] {
logger = logger.Named("Broadcaster")
@@ -187,11 +180,9 @@ func NewBroadcaster[
feeConfig: feeConfig,
txConfig: txConfig,
listenerConfig: listenerConfig,
- eventBroadcaster: eventBroadcaster,
ks: keystore,
checkerFactory: checkerFactory,
autoSyncSequence: autoSyncSequence,
- parseAddr: parseAddress,
}
b.processUnstartedTxsImpl = b.processUnstartedTxs
@@ -215,10 +206,6 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) star
return errors.New("Broadcaster is already started")
}
var err error
- eb.txInsertListener, err = eb.eventBroadcaster.Subscribe(pg.ChannelInsertOnTx, "")
- if err != nil {
- return errors.Wrap(err, "Broadcaster could not start")
- }
eb.enabledAddresses, err = eb.ks.EnabledAddressesForChain(eb.chainID)
if err != nil {
return errors.Wrap(err, "Broadcaster: failed to load EnabledAddressesForChain")
@@ -239,9 +226,6 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) star
go eb.monitorTxs(addr, triggerCh)
}
- eb.wg.Add(1)
- go eb.txInsertTriggerer()
-
eb.sequenceLock.Lock()
defer eb.sequenceLock.Unlock()
eb.nextSequenceMap, err = eb.loadNextSequenceMap(eb.enabledAddresses)
@@ -266,9 +250,6 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) clos
if !eb.isStarted {
return errors.Wrap(utils.ErrAlreadyStopped, "Broadcaster is not started")
}
- if eb.txInsertListener != nil {
- eb.txInsertListener.Close()
- }
close(eb.chStop)
eb.wg.Wait()
eb.isStarted = false
@@ -305,27 +286,6 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Trig
}
}
-func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) txInsertTriggerer() {
- defer eb.wg.Done()
- for {
- select {
- case ev, ok := <-eb.txInsertListener.Events():
- if !ok {
- eb.logger.Debug("txInsertListener channel closed, exiting trigger loop")
- return
- }
- addr, err := eb.parseAddr(ev.Payload)
- if err != nil {
- eb.logger.Errorw("failed to parse address in trigger", "err", err)
- continue
- }
- eb.Trigger(addr)
- case <-eb.chStop:
- return
- }
- }
-}
-
// Load the next sequence map using the tx table or on-chain (if not found in tx table)
func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) loadNextSequenceMap(addresses []ADDR) (map[ADDR]SEQ, error) {
ctx, cancel := eb.chStop.NewCtx()
@@ -593,19 +553,19 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
lgr.Infow("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "err", err, "meta", etx.Meta, "feeLimit", etx.FeeLimit, "attempt", attempt, "etx", etx)
errType, err := eb.client.SendTransactionReturnCode(ctx, etx, attempt, lgr)
- if errType != clienttypes.Fatal {
+ if errType != client.Fatal {
etx.InitialBroadcastAt = &initialBroadcastAt
etx.BroadcastAt = &initialBroadcastAt
}
switch errType {
- case clienttypes.Fatal:
+ case client.Fatal:
eb.SvcErrBuffer.Append(err)
etx.Error = null.StringFrom(err.Error())
return eb.saveFatallyErroredTransaction(lgr, &etx), true
- case clienttypes.TransactionAlreadyKnown:
+ case client.TransactionAlreadyKnown:
fallthrough
- case clienttypes.Successful:
+ case client.Successful:
// Either the transaction was successful or one of the following four scenarios happened:
//
// SCENARIO 1
@@ -658,9 +618,9 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
// Increment sequence if successfully broadcasted
eb.IncrementNextSequence(etx.FromAddress, sequence)
return err, true
- case clienttypes.Underpriced:
+ case client.Underpriced:
return eb.tryAgainBumpingGas(ctx, lgr, err, etx, attempt, initialBroadcastAt)
- case clienttypes.InsufficientFunds:
+ case client.InsufficientFunds:
// NOTE: This bails out of the entire cycle and essentially "blocks" on
// any transaction that gets insufficient_funds. This is OK if a
// transaction with a large VALUE blocks because this always comes last
@@ -670,13 +630,13 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
// theoretically be sent, but will instead be blocked.
eb.SvcErrBuffer.Append(err)
fallthrough
- case clienttypes.Retryable:
+ case client.Retryable:
return err, true
- case clienttypes.FeeOutOfValidRange:
+ case client.FeeOutOfValidRange:
return eb.tryAgainWithNewEstimation(ctx, lgr, err, etx, attempt, initialBroadcastAt)
- case clienttypes.Unsupported:
+ case client.Unsupported:
return err, false
- case clienttypes.ExceedsMaxFee:
+ case client.ExceedsMaxFee:
// Broadcaster: Note that we may have broadcast to multiple nodes and had it
// accepted by one of them! It is not guaranteed that all nodes share
// the same tx fee cap. That is why we must treat this as an unknown
@@ -689,7 +649,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
default:
// Every error that doesn't fall under one of the above categories will be treated as Unknown.
fallthrough
- case clienttypes.Unknown:
+ case client.Unknown:
eb.SvcErrBuffer.Append(err)
lgr.Criticalw(`Unknown error occurred while handling tx queue in ProcessUnstartedTxs. This chain/RPC client may not be supported. `+
`Urgent resolution required, Chainlink is currently operating in a degraded state and may miss transactions`, "err", err, "etx", etx, "attempt", attempt)
@@ -815,12 +775,17 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) save
// Now we have an errored pipeline even though the tx succeeded. This case
// is relatively benign and probably nobody will ever run into it in
// practice, but something to be aware of.
- if etx.PipelineTaskRunID.Valid && eb.resumeCallback != nil {
+ if etx.PipelineTaskRunID.Valid && eb.resumeCallback != nil && etx.SignalCallback {
err := eb.resumeCallback(etx.PipelineTaskRunID.UUID, nil, errors.Errorf("fatal error while sending transaction: %s", etx.Error.String))
if errors.Is(err, sql.ErrNoRows) {
lgr.Debugw("callback missing or already resumed", "etxID", etx.ID)
} else if err != nil {
return errors.Wrap(err, "failed to resume pipeline")
+ } else {
+ // Mark tx as having completed callback
+ if err := eb.txStore.UpdateTxCallbackCompleted(ctx, etx.PipelineTaskRunID.UUID, eb.chainID); err != nil {
+ return err
+ }
}
}
return eb.txStore.UpdateTxFatalError(ctx, etx)
diff --git a/common/txmgr/confirmer.go b/common/txmgr/confirmer.go
index c22a1594570..afb2b3003a1 100644
--- a/common/txmgr/confirmer.go
+++ b/common/txmgr/confirmer.go
@@ -14,13 +14,13 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"go.uber.org/multierr"
+ "github.com/smartcontractkit/chainlink-relay/pkg/chains/label"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
commonfee "github.com/smartcontractkit/chainlink/v2/common/fee"
feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/common/types"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/label"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -362,7 +362,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Che
for idx, txErr := range txErrs {
// Add to Unconfirm array, all tx where error wasn't TransactionAlreadyKnown.
if txErr != nil {
- if txCodes[idx] == clienttypes.TransactionAlreadyKnown {
+ if txCodes[idx] == client.TransactionAlreadyKnown {
continue
}
}
@@ -819,7 +819,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han
errType, sendError := ec.client.SendTransactionReturnCode(ctx, etx, attempt, lggr)
switch errType {
- case clienttypes.Underpriced:
+ case client.Underpriced:
// This should really not ever happen in normal operation since we
// already bumped above the required minimum in broadcaster.
ec.lggr.Warnw("Got terminally underpriced error for gas bump, this should never happen unless the remote RPC node changed its configuration on the fly, or you are using multiple RPC nodes with different minimum gas price requirements. This is not recommended", "err", sendError, "attempt", attempt)
@@ -854,12 +854,12 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han
return errors.Wrap(err, "saveReplacementInProgressAttempt failed")
}
return ec.handleInProgressAttempt(ctx, lggr, etx, replacementAttempt, blockHeight)
- case clienttypes.ExceedsMaxFee:
+ case client.ExceedsMaxFee:
// Confirmer: The gas price was bumped too high. This transaction attempt cannot be accepted.
// Best thing we can do is to re-send the previous attempt at the old
// price and discard this bumped version.
fallthrough
- case clienttypes.Fatal:
+ case client.Fatal:
// WARNING: This should never happen!
// Should NEVER be fatal this is an invariant violation. The
// Broadcaster can never create a TxAttempt that will
@@ -874,20 +874,20 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han
ec.SvcErrBuffer.Append(sendError)
// This will loop continuously on every new head so it must be handled manually by the node operator!
return ec.txStore.DeleteInProgressAttempt(ctx, attempt)
- case clienttypes.TransactionAlreadyKnown:
+ case client.TransactionAlreadyKnown:
// Sequence too low indicated that a transaction at this sequence was confirmed already.
// Mark confirmed_missing_receipt and wait for the next cycle to try to get a receipt
lggr.Debugw("Sequence already used", "txAttemptID", attempt.ID, "txHash", attempt.Hash.String(), "err", sendError)
timeout := ec.dbConfig.DefaultQueryTimeout()
return ec.txStore.SaveConfirmedMissingReceiptAttempt(ctx, timeout, &attempt, now)
- case clienttypes.InsufficientFunds:
+ case client.InsufficientFunds:
timeout := ec.dbConfig.DefaultQueryTimeout()
return ec.txStore.SaveInsufficientFundsAttempt(ctx, timeout, &attempt, now)
- case clienttypes.Successful:
+ case client.Successful:
lggr.Debugw("Successfully broadcast transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash.String())
timeout := ec.dbConfig.DefaultQueryTimeout()
return ec.txStore.SaveSentAttempt(ctx, timeout, &attempt, now)
- case clienttypes.Unknown:
+ case client.Unknown:
// Every error that doesn't fall under one of the above categories will be treated as Unknown.
fallthrough
default:
@@ -1058,7 +1058,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) For
}
attempt.Tx = *etx // for logging
ec.lggr.Debugw("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "err", err, "meta", etx.Meta, "feeLimit", etx.FeeLimit, "attempt", attempt)
- if errCode, err := ec.client.SendTransactionReturnCode(context.TODO(), *etx, attempt, ec.lggr); errCode != clienttypes.Successful && err != nil {
+ if errCode, err := ec.client.SendTransactionReturnCode(context.TODO(), *etx, attempt, ec.lggr); errCode != client.Successful && err != nil {
ec.lggr.Errorw(fmt.Sprintf("ForceRebroadcast: failed to rebroadcast tx %v with sequence %v and gas limit %v: %s", etx.ID, *etx.Sequence, etx.FeeLimit, err.Error()), "err", err, "fee", attempt.TxFee)
continue
}
@@ -1083,7 +1083,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) sen
// ResumePendingTaskRuns issues callbacks to task runs that are pending waiting for receipts
func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ResumePendingTaskRuns(ctx context.Context, head types.Head[BLOCK_HASH]) error {
- receiptsPlus, err := ec.txStore.FindReceiptsPendingConfirmation(ctx, head.BlockNumber(), ec.chainID)
+ receiptsPlus, err := ec.txStore.FindTxesPendingCallback(ctx, head.BlockNumber(), ec.chainID)
if err != nil {
return err
@@ -1105,6 +1105,10 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Res
ec.lggr.Debugw("Callback: resuming tx with receipt", "output", output, "taskErr", taskErr, "pipelineTaskRunID", data.ID)
if err := ec.resumeCallback(data.ID, output, taskErr); err != nil {
+ return fmt.Errorf("failed to resume suspended pipeline run: %w", err)
+ }
+ // Mark tx as having completed callback
+ if err := ec.txStore.UpdateTxCallbackCompleted(ctx, data.ID, ec.chainID); err != nil {
return err
}
}
diff --git a/common/txmgr/mocks/tx_manager.go b/common/txmgr/mocks/tx_manager.go
index c01f182c9bd..89abf1dea51 100644
--- a/common/txmgr/mocks/tx_manager.go
+++ b/common/txmgr/mocks/tx_manager.go
@@ -59,6 +59,110 @@ func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Create
return r0, r1
}
+// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID
+func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, metaValue, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, metaValue, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID
+func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, ids, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, ids, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, ids, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, ids, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID
+func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, blockNum, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, blockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID
+func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// GetForwarderForEOA provides a mock function with given fields: eoa
func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetForwarderForEOA(eoa ADDR) (ADDR, error) {
ret := _m.Called(eoa)
diff --git a/common/txmgr/resender.go b/common/txmgr/resender.go
index 655de0f1135..75781c08407 100644
--- a/common/txmgr/resender.go
+++ b/common/txmgr/resender.go
@@ -6,11 +6,11 @@ import (
"fmt"
"time"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink-relay/pkg/chains/label"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/common/types"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/label"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -175,13 +175,13 @@ func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) resendUnconfi
return nil
}
-func logResendResult(lggr logger.Logger, codes []clienttypes.SendTxReturnCode) {
+func logResendResult(lggr logger.Logger, codes []client.SendTxReturnCode) {
var nNew int
var nFatal int
for _, c := range codes {
- if c == clienttypes.Successful {
+ if c == client.Successful {
nNew++
- } else if c == clienttypes.Fatal {
+ } else if c == client.Fatal {
nFatal++
}
}
diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go
index 0c7117afab0..5b7afd32242 100644
--- a/common/txmgr/txmgr.go
+++ b/common/txmgr/txmgr.go
@@ -10,7 +10,6 @@ import (
"time"
"github.com/google/uuid"
- pkgerrors "github.com/pkg/errors"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
@@ -48,6 +47,14 @@ type TxManager[
RegisterResumeCallback(fn ResumeCallback)
SendNativeToken(ctx context.Context, chainID CHAIN_ID, from, to ADDR, value big.Int, gasLimit uint32) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
Reset(addr ADDR, abandon bool) error
+ // Find transactions by a field in the TxMeta blob and transaction states
+ FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions with a non-null TxMeta field that was provided by transaction states
+ FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided
+ FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions loaded with transaction attempts and receipts by transaction IDs and states
+ FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
}
type reset struct {
@@ -166,14 +173,14 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx
return b.StartOnce("Txm", func() error {
var ms services.MultiStart
if err := ms.Start(ctx, b.broadcaster); err != nil {
- return pkgerrors.Wrap(err, "Txm: Broadcaster failed to start")
+ return fmt.Errorf("Txm: Broadcaster failed to start: %w", err)
}
if err := ms.Start(ctx, b.confirmer); err != nil {
- return pkgerrors.Wrap(err, "Txm: Confirmer failed to start")
+ return fmt.Errorf("Txm: Confirmer failed to start: %w", err)
}
if err := ms.Start(ctx, b.txAttemptBuilder); err != nil {
- return pkgerrors.Wrap(err, "Txm: Estimator failed to start")
+ return fmt.Errorf("Txm: Estimator failed to start: %w", err)
}
b.wg.Add(1)
@@ -190,7 +197,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx
if b.fwdMgr != nil {
if err := ms.Start(ctx, b.fwdMgr); err != nil {
- return pkgerrors.Wrap(err, "Txm: ForwarderManager failed to start")
+ return fmt.Errorf("Txm: ForwarderManager failed to start: %w", err)
}
}
@@ -223,8 +230,10 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Reset(addr
func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) abandon(addr ADDR) (err error) {
ctx, cancel := utils.StopChan(b.chStop).NewCtx()
defer cancel()
- err = b.txStore.Abandon(ctx, b.chainID, addr)
- return pkgerrors.Wrapf(err, "abandon failed to update txes for key %s", addr.String())
+ if err = b.txStore.Abandon(ctx, b.chainID, addr); err != nil {
+ return fmt.Errorf("abandon failed to update txes for key %s: %w", addr.String(), err)
+ }
+ return nil
}
func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (merr error) {
@@ -241,14 +250,14 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (m
}
if b.fwdMgr != nil {
if err := b.fwdMgr.Close(); err != nil {
- merr = errors.Join(merr, pkgerrors.Wrap(err, "Txm: failed to stop ForwarderManager"))
+ merr = errors.Join(merr, fmt.Errorf("Txm: failed to stop ForwarderManager: %w", err))
}
}
b.wg.Wait()
if err := b.txAttemptBuilder.Close(); err != nil {
- merr = errors.Join(merr, pkgerrors.Wrap(err, "Txm: failed to close TxAttemptBuilder"))
+ merr = errors.Join(merr, fmt.Errorf("Txm: failed to close TxAttemptBuilder: %w", err))
}
return nil
@@ -444,7 +453,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTran
var existingTx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
existingTx, err = b.txStore.FindTxWithIdempotencyKey(ctx, *txRequest.IdempotencyKey, b.chainID)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- return tx, pkgerrors.Wrap(err, "Failed to search for transaction with IdempotencyKey")
+ return tx, fmt.Errorf("Failed to search for transaction with IdempotencyKey: %w", err)
}
if existingTx != nil {
b.logger.Infow("Found a Tx with IdempotencyKey. Returning existing Tx without creating a new one.", "IdempotencyKey", *txRequest.IdempotencyKey)
@@ -470,31 +479,40 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTran
txRequest.ToAddress = txRequest.ForwarderAddress
txRequest.EncodedPayload = fwdPayload
} else {
- b.logger.Errorf("Failed to use forwarder set upstream: %s", fwdErr.Error())
+ b.logger.Errorf("Failed to use forwarder set upstream: %w", fwdErr.Error())
}
}
err = b.txStore.CheckTxQueueCapacity(ctx, txRequest.FromAddress, b.txConfig.MaxQueued(), b.chainID)
if err != nil {
- return tx, pkgerrors.Wrap(err, "Txm#CreateTransaction")
+ return tx, fmt.Errorf("Txm#CreateTransaction: %w", err)
}
tx, err = b.txStore.CreateTransaction(ctx, txRequest, b.chainID)
- return
+ if err != nil {
+ return tx, err
+ }
+
+ // Trigger the Broadcaster to check for new transaction
+ b.broadcaster.Trigger(txRequest.FromAddress)
+
+ return tx, nil
}
// Calls forwarderMgr to get a proper forwarder for a given EOA.
func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetForwarderForEOA(eoa ADDR) (forwarder ADDR, err error) {
if !b.txConfig.ForwardersEnabled() {
- return forwarder, pkgerrors.Errorf("Forwarding is not enabled, to enable set Transactions.ForwardersEnabled =true")
+ return forwarder, fmt.Errorf("forwarding is not enabled, to enable set Transactions.ForwardersEnabled =true")
}
forwarder, err = b.fwdMgr.ForwarderFor(eoa)
return
}
func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) checkEnabled(addr ADDR) error {
- err := b.keyStore.CheckEnabled(addr, b.chainID)
- return pkgerrors.Wrapf(err, "cannot send transaction from %s on chain ID %s", addr, b.chainID.String())
+ if err := b.keyStore.CheckEnabled(addr, b.chainID); err != nil {
+ return fmt.Errorf("cannot send transaction from %s on chain ID %s: %w", addr, b.chainID.String(), err)
+ }
+ return nil
}
// SendNativeToken creates a transaction that transfers the given value of native tokens
@@ -511,7 +529,33 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SendNative
Strategy: NewSendEveryStrategy(),
}
etx, err = b.txStore.CreateTransaction(ctx, txRequest, chainID)
- return etx, pkgerrors.Wrap(err, "SendNativeToken failed to insert tx")
+ if err != nil {
+ return etx, fmt.Errorf("SendNativeToken failed to insert tx: %w", err)
+ }
+
+ // Trigger the Broadcaster to check for new transaction
+ b.broadcaster.Trigger(from)
+ return etx, nil
+}
+
+func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ txes, err = b.txStore.FindTxesByMetaFieldAndStates(ctx, metaField, metaValue, states, chainID)
+ return
+}
+
+func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ txes, err = b.txStore.FindTxesWithMetaFieldByStates(ctx, metaField, states, chainID)
+ return
+}
+
+func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ txes, err = b.txStore.FindTxesWithMetaFieldByReceiptBlockNum(ctx, metaField, blockNum, chainID)
+ return
+}
+
+func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ txes, err = b.txStore.FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx, ids, states, chainID)
+ return
}
type NullTxManager[
@@ -568,3 +612,15 @@ func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Hea
}
func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) RegisterResumeCallback(fn ResumeCallback) {
}
+func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ return txes, errors.New(n.ErrMsg)
+}
+func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ return txes, errors.New(n.ErrMsg)
+}
+func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ return txes, errors.New(n.ErrMsg)
+}
+func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) {
+ return txes, errors.New(n.ErrMsg)
+}
diff --git a/common/txmgr/types/client.go b/common/txmgr/types/client.go
index 6d7f1c55558..58c1b6f6ad2 100644
--- a/common/txmgr/types/client.go
+++ b/common/txmgr/types/client.go
@@ -6,7 +6,7 @@ import (
"math/big"
"time"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
"github.com/smartcontractkit/chainlink/v2/common/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -49,7 +49,7 @@ type TransactionClient[
bathSize int,
lggr logger.Logger,
) (
- txCodes []clienttypes.SendTxReturnCode,
+ txCodes []client.SendTxReturnCode,
txErrs []error,
broadcastTime time.Time,
successfulTxIDs []int64,
@@ -59,7 +59,7 @@ type TransactionClient[
tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE],
attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE],
lggr logger.Logger,
- ) (clienttypes.SendTxReturnCode, error)
+ ) (client.SendTxReturnCode, error)
SendEmptyTransaction(
ctx context.Context,
newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error),
diff --git a/common/txmgr/types/mocks/tx_store.go b/common/txmgr/types/mocks/tx_store.go
index 02388e40f40..0e344b9b6f9 100644
--- a/common/txmgr/types/mocks/tx_store.go
+++ b/common/txmgr/types/mocks/tx_store.go
@@ -4,6 +4,7 @@ package mocks
import (
context "context"
+ big "math/big"
feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types"
mock "github.com/stretchr/testify/mock"
@@ -179,32 +180,6 @@ func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindNextUns
return r0
}
-// FindReceiptsPendingConfirmation provides a mock function with given fields: ctx, blockNum, chainID
-func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindReceiptsPendingConfirmation(ctx context.Context, blockNum int64, chainID CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error) {
- ret := _m.Called(ctx, blockNum, chainID)
-
- var r0 []txmgrtypes.ReceiptPlus[R]
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error)); ok {
- return rf(ctx, blockNum, chainID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) []txmgrtypes.ReceiptPlus[R]); ok {
- r0 = rf(ctx, blockNum, chainID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]txmgrtypes.ReceiptPlus[R])
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int64, CHAIN_ID) error); ok {
- r1 = rf(ctx, blockNum, chainID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
// FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID
func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber int64, lowBlockNumber int64, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
ret := _m.Called(ctx, highBlockNumber, lowBlockNumber, chainID)
@@ -361,6 +336,136 @@ func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxWithS
return r0, r1
}
+// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, metaValue, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, metaValue, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesPendingCallback provides a mock function with given fields: ctx, blockNum, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error) {
+ ret := _m.Called(ctx, blockNum, chainID)
+
+ var r0 []txmgrtypes.ReceiptPlus[R]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error)); ok {
+ return rf(ctx, blockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) []txmgrtypes.ReceiptPlus[R]); ok {
+ r0 = rf(ctx, blockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]txmgrtypes.ReceiptPlus[R])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64, CHAIN_ID) error); ok {
+ r1 = rf(ctx, blockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, ids, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, ids, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, ids, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, ids, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, blockNum, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, blockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
+ ret := _m.Called(ctx, metaField, states, chainID)
+
+ var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok {
+ return rf(ctx, metaField, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok {
+ r0 = rf(ctx, metaField, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// FindTxsRequiringGasBump provides a mock function with given fields: ctx, address, blockNum, gasBumpThreshold, depth, chainID
func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxsRequiringGasBump(ctx context.Context, address ADDR, blockNum int64, gasBumpThreshold int64, depth int64, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) {
ret := _m.Called(ctx, address, blockNum, gasBumpThreshold, depth, chainID)
@@ -709,6 +814,20 @@ func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxAtt
return r0
}
+// UpdateTxCallbackCompleted provides a mock function with given fields: ctx, pipelineTaskRunRid, chainId
+func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error {
+ ret := _m.Called(ctx, pipelineTaskRunRid, chainId)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, CHAIN_ID) error); ok {
+ r0 = rf(ctx, pipelineTaskRunRid, chainId)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// UpdateTxFatalError provides a mock function with given fields: ctx, etx
func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxFatalError(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error {
ret := _m.Called(ctx, etx)
diff --git a/common/txmgr/types/tx.go b/common/txmgr/types/tx.go
index d95f07afabc..11017bd0325 100644
--- a/common/txmgr/types/tx.go
+++ b/common/txmgr/types/tx.go
@@ -91,6 +91,9 @@ type TxRequest[ADDR types.Hashable, TX_HASH types.Hashable] struct {
// Checker defines the check that should be run before a transaction is submitted on chain.
Checker TransmitCheckerSpec[ADDR]
+
+ // Mark tx requiring callback
+ SignalCallback bool
}
// TransmitCheckerSpec defines the check that should be performed before a transaction is submitted
@@ -217,6 +220,11 @@ type Tx[
// TransmitChecker defines the check that should be performed before a transaction is submitted on
// chain.
TransmitChecker *datatypes.JSON
+
+ // Marks tx requiring callback
+ SignalCallback bool
+ // Marks tx callback as signaled
+ CallbackCompleted bool
}
func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetError() error {
diff --git a/common/txmgr/types/tx_store.go b/common/txmgr/types/tx_store.go
index 059a87d7ab2..c2dfeee4146 100644
--- a/common/txmgr/types/tx_store.go
+++ b/common/txmgr/types/tx_store.go
@@ -35,14 +35,24 @@ type TxStore[
TxHistoryReaper[CHAIN_ID]
TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE]
- // methods for saving & retreiving receipts
- FindReceiptsPendingConfirmation(ctx context.Context, blockNum int64, chainID CHAIN_ID) (receiptsPlus []ReceiptPlus[R], err error)
+ // Find confirmed txes beyond the minConfirmations param that require callback but have not yet been signaled
+ FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID CHAIN_ID) (receiptsPlus []ReceiptPlus[R], err error)
+ // Update tx to mark that its callback has been signaled
+ UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error
SaveFetchedReceipts(ctx context.Context, receipts []R, chainID CHAIN_ID) (err error)
// additional methods for tx store management
CheckTxQueueCapacity(ctx context.Context, fromAddress ADDR, maxQueuedTransactions uint64, chainID CHAIN_ID) (err error)
Close()
Abandon(ctx context.Context, id CHAIN_ID, addr ADDR) error
+ // Find transactions by a field in the TxMeta blob and transaction states
+ FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions with a non-null TxMeta field that was provided by transaction states
+ FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided
+ FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
+ // Find transactions loaded with transaction attempts and receipts by transaction IDs and states
+ FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error)
}
// TransactionStore contains the persistence layer methods needed to manage Txs and TxAttempts
@@ -85,6 +95,8 @@ type TransactionStore[
SetBroadcastBeforeBlockNum(ctx context.Context, blockNum int64, chainID CHAIN_ID) error
UpdateBroadcastAts(ctx context.Context, now time.Time, etxIDs []int64) error
UpdateTxAttemptInProgressToBroadcast(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], NewAttemptState TxAttemptState) error
+ // Update tx to mark that its callback has been signaled
+ UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error
UpdateTxsUnconfirmed(ctx context.Context, ids []int64) error
UpdateTxUnstartedToInProgress(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error
UpdateTxFatalError(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error
diff --git a/common/types/head.go b/common/types/head.go
index 4d339b1cddb..000bad2390e 100644
--- a/common/types/head.go
+++ b/common/types/head.go
@@ -1,5 +1,11 @@
package types
+import (
+ "time"
+
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
// Head provides access to a chain's head, as needed by the TxManager.
// This is a generic interface which ALL chains will implement.
//
@@ -8,6 +14,9 @@ type Head[BLOCK_HASH Hashable] interface {
// BlockNumber is the head's block number
BlockNumber() int64
+ // Timestamp the time of mining of the block
+ GetTimestamp() time.Time
+
// ChainLength returns the length of the chain followed by recursively looking up parents
ChainLength() uint32
@@ -24,4 +33,8 @@ type Head[BLOCK_HASH Hashable] interface {
// HashAtHeight returns the hash of the block at the given height, if it is in the chain.
// If not in chain, returns the zero hash
HashAtHeight(blockNum int64) BLOCK_HASH
+
+ // Returns the total difficulty of the block. For chains who do not have a concept of block
+ // difficulty, return 0.
+ BlockDifficulty() *utils.Big
}
diff --git a/common/types/mocks/head.go b/common/types/mocks/head.go
index 3cb303ef267..82fd910a08b 100644
--- a/common/types/mocks/head.go
+++ b/common/types/mocks/head.go
@@ -3,8 +3,13 @@
package mocks
import (
- types "github.com/smartcontractkit/chainlink/v2/common/types"
+ time "time"
+
mock "github.com/stretchr/testify/mock"
+
+ types "github.com/smartcontractkit/chainlink/v2/common/types"
+
+ utils "github.com/smartcontractkit/chainlink/v2/core/utils"
)
// Head is an autogenerated mock type for the Head type
@@ -12,6 +17,22 @@ type Head[BLOCK_HASH types.Hashable] struct {
mock.Mock
}
+// BlockDifficulty provides a mock function with given fields:
+func (_m *Head[BLOCK_HASH]) BlockDifficulty() *utils.Big {
+ ret := _m.Called()
+
+ var r0 *utils.Big
+ if rf, ok := ret.Get(0).(func() *utils.Big); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*utils.Big)
+ }
+ }
+
+ return r0
+}
+
// BlockHash provides a mock function with given fields:
func (_m *Head[BLOCK_HASH]) BlockHash() BLOCK_HASH {
ret := _m.Called()
@@ -100,6 +121,20 @@ func (_m *Head[BLOCK_HASH]) GetParentHash() BLOCK_HASH {
return r0
}
+// GetTimestamp provides a mock function with given fields:
+func (_m *Head[BLOCK_HASH]) GetTimestamp() time.Time {
+ ret := _m.Called()
+
+ var r0 time.Time
+ if rf, ok := ret.Get(0).(func() time.Time); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(time.Time)
+ }
+
+ return r0
+}
+
// HashAtHeight provides a mock function with given fields: blockNum
func (_m *Head[BLOCK_HASH]) HashAtHeight(blockNum int64) BLOCK_HASH {
ret := _m.Called(blockNum)
diff --git a/common/types/receipt.go b/common/types/receipt.go
new file mode 100644
index 00000000000..01d5a72def5
--- /dev/null
+++ b/common/types/receipt.go
@@ -0,0 +1,14 @@
+package types
+
+import "math/big"
+
+type Receipt[TX_HASH Hashable, BLOCK_HASH Hashable] interface {
+ GetStatus() uint64
+ GetTxHash() TX_HASH
+ GetBlockNumber() *big.Int
+ IsZero() bool
+ IsUnmined() bool
+ GetFeeUsed() uint64
+ GetTransactionIndex() uint
+ GetBlockHash() BLOCK_HASH
+}
diff --git a/common/types/test_utils.go b/common/types/test_utils.go
new file mode 100644
index 00000000000..40560f7866c
--- /dev/null
+++ b/common/types/test_utils.go
@@ -0,0 +1,16 @@
+package types
+
+import (
+ "math"
+ "math/big"
+ "math/rand"
+)
+
+func RandomID() ID {
+ id := rand.Int63n(math.MaxInt32) + 10000
+ return big.NewInt(id)
+}
+
+func NewIDFromInt(id int64) ID {
+ return big.NewInt(id)
+}
diff --git a/contracts/.solhint.json b/contracts/.solhint.json
index 3b69ca6a7f2..e66b915d679 100644
--- a/contracts/.solhint.json
+++ b/contracts/.solhint.json
@@ -37,6 +37,7 @@
"chainlink-solidity/prefix-immutable-variables-with-i": "warn",
"chainlink-solidity/all-caps-constant-storage-variables": "warn",
"chainlink-solidity/no-hardhat-imports": "warn",
- "chainlink-solidity/inherited-constructor-args-not-in-contract-definition": "warn"
+ "chainlink-solidity/inherited-constructor-args-not-in-contract-definition": "warn",
+ "chainlink-solidity/explicit-returns": "warn"
}
}
diff --git a/contracts/.solhintignore b/contracts/.solhintignore
index bc7be4fbfee..ba2aac1fb3a 100644
--- a/contracts/.solhintignore
+++ b/contracts/.solhintignore
@@ -1,4 +1,4 @@
-# 377 warnings
+# 344 warnings
#./src/v0.8/automation
# Ignore Functions v1.0.0 code that was frozen after audit
diff --git a/contracts/GNUmakefile b/contracts/GNUmakefile
index b477164a496..e8808138673 100644
--- a/contracts/GNUmakefile
+++ b/contracts/GNUmakefile
@@ -34,11 +34,11 @@ abigen: ## Build & install abigen.
.PHONY: mockery
mockery: $(mockery) ## Install mockery.
- go install github.com/vektra/mockery/v2@v2.28.1
+ go install github.com/vektra/mockery/v2@v2.35.4
.PHONY: foundry
foundry: ## Install foundry.
- foundryup --version nightly-5be158ba6dc7c798a6f032026fe60fc01686b33b
+ foundryup --version nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
.PHONY: foundry-refresh
foundry-refresh: foundry
diff --git a/contracts/STYLE.md b/contracts/STYLE.md
deleted file mode 100644
index d9692a65210..00000000000
--- a/contracts/STYLE.md
+++ /dev/null
@@ -1,234 +0,0 @@
-# Solidity Style Guide
-
-## Background
-
-Our starting point is the [official Solidity Style Guide](https://solidity.readthedocs.io/en/v0.8.0/style-guide.html) and [ConsenSys's Secure Development practices](https://consensys.github.io/smart-contract-best-practices/), but we deviate in some ways. We lean heavily on [Prettier](https://github.com/smartcontractkit/chainlink/blob/develop/contracts/.prettierrc) for formatting, and if you have to set up a new Solidity project we recommend starting with [our prettier config](https://github.com/smartcontractkit/chainlink/blob/develop/.prettierrc.js). We are trying to automate as much of this styleguide with Solhint as possible.
-
-### Code Organization
-
-- Group functionality together. E.g. Declare structs, events, and helper functions near the functions that use them. This is helpful when reading code because the related pieces are localized. It is also consistent with inheritance and libraries, which are separate pieces of code designed for a specific goal.
- - Why not follow the Solidity recommendation of grouping by visibility? Visibility is clearly defined next to the method signature, making it trivial to check. However, searching can be deceiving because of inherited methods. Given this inconsistency in grouping, we find it easier to read and more consistent to organize code around functionality. Additionally, we recommend testing the public interface for any Solidity contract to ensure it only exposes expected methods.
-
-### Delineate Unaudited Code
-
-- In a large repo it is worthwhile to keep code that has not yet been audited separate from the code that has been audited. This allows you to easily keep track of which files need to be reviewed.
- - E.g. we keep unaudited code in a directory named `dev`. Only once it has been audited we move the audited files out of `dev` and only then is it considered safe to deploy.
-
-## Variables
-
-### Visibility
-
-- All contract variables should be private. Getters should be explicitly written and documented when you want to expose a variable publicly. Whether a getter function reads from storage, a constant, or calculates a value from somewhere else, that’s all implementation details that should not be exposed to the consumer by casing or other conventions.
-
-Examples:
-
-Good:
-
-```javascript
-uint256 private s_myVar;
-
-function getMyVar() external view returns(uint256){
- return s_myVar;
-}
-```
-
-Bad:
-
-```javascript
-uint256 public s_myVar;
-```
-
-### Naming and Casing
-
-- Function arguments are named like this: `argumentName`. No leading or trailing underscores necessary.
-- Storage variables prefixed with an `s_` to make it clear that they live in storage and are expensive to read and write: `s_variableName`. They should always be private, and you should write explicit getters if you want to expose a storage variable.
-- Immutable variables should be prefixed with an `i_` to make it clear that they are immutable. E.g. `i_decimalPlaces`. They should always be private, and you should write explicit getters if you want to expose an immutable variable.
-- Internal/private constants should be all caps with underscores: `FOO_BAR`. Like other contract variables, constants should not be public. Create getter methods if you want to publicly expose constants.
-- Explicitly declare variable size: `uint256` not just `uint`. In addition to being explicit, it matches the naming used to calculate function selectors.
-
-Examples:
-
-Good:
-
-```javascript
-uint256 private s_myVar;
-uint256 private immutable i_myImmutVar;
-uint256 private constant MY_CONST_VAR;
-
-function multiplyMyVar(uint256 multiplier) external view returns(uint256){
- return multiplier * s_myVar;
-}
-```
-
-Bad:
-
-```javascript
-uint private s_myVar;
-uint256 private immutable myImmutVar;
-uint256 private constant s_myConstVar;
-
-function multiplyMyVar_(uint _multiplier) external view returns(uint256){
- return _mutliplier * s_myVar;
-}
-```
-
-### Types
-
-- If you are storing an address and know/expect it to be of a type(or interface), make the variable that type. This more clearly documents the behavior of this variable than the `address` type and often leads to less casting code whenever the address is used.
-
-Examples:
-
-Good:
-
-```javascript
-import "@chainlink/contracts/src/v0.8/interfaces/AggregatorV3Interface.sol";
-// .
-// .
-// .
-AggregatorV3Interface private s_priceFeed;
-
-constructor(address priceFeed) {
- s_priceFeed = AggregatorV3Interface(priceFeed);
-}
-```
-
-Bad:
-
-```javascript
-import "@chainlink/contracts/src/v0.8/interfaces/AggregatorV3Interface.sol";
-// .
-// .
-// .
-address private s_priceFeed;
-
-constructor(address priceFeed) {
- s_priceFeed = priceFeed;
-}
-```
-
-## Functions
-
-### Visibility
-
-- Method visibility should always be explicitly declared. Contract’s [public interfaces should be tested](https://github.com/smartcontractkit/chainlink/blob/master/contracts/test/test-helpers/helpers.ts#L221) to enforce this and make sure that internal logic is not accidentally exposed.
-
-### Naming
-
-- Function names should start with imperative verbs, not nouns or other tenses.
- - `requestData` not `dataRequest`
- - `approve` not `approved`
-- Prefix private and internal methods with an underscore. There should never be a publicly callable method starting with an underscore.
- - E.g. `_setOwner(address)`
-- Prefix your public getters with `get` and your public setters with `set`.
- - `getConfig` and `setConfig`.
-
-## Modifiers
-
-- Only extract a modifier once a check is duplicated in multiple places. Modifiers arguably hurt readability, so we have found that they are not worth extracting until there is duplication.
-- Modifiers should be treated as if they are view functions. They should not change state, only read it. While it is possible to change state in a modifier, it is unconventional and surprising.
-
-### Naming
-
-There are two common classes of modifiers, and their name should be prefixed accordingly to quickly represent their behavior:
-
-- Control flow modifiers: Prefix the modifier name with `if` in the case that a modifier only enables or disables the subsequent code in the modified method, but does not revert.
-- Reverting modifiers: Prefix the modifier name with `validate` in the case that a modifier reverts if a condition is not met.
-
-### Return Values
-
-- If an address is cast as a contract type, return the type, do not cast back to the address type. This prevents the consumer of the method signature from having to cast again, but presents an equivalent API for off-chain APIs. Additionally it is a more declarative API, providing more context if we return a type.
-
-## Events
-
-- Events should only be triggered on state changes. If the value is set but not changed, we prefer avoiding a log emission indicating a change. (e.g. Either do not emit a log, or name the event `ConfigSet` instead of `ConfigUpdated`.)
-
-### Naming
-
-- When possible event names should correspond to the method they are in or the action that is being taken. Events preferably follow the format , where the action performed is the past tense of the imperative verb in the method name. e.g. calling `setConfig` should emit an event called `ConfigSet`, not `ConfigUpdated` in a method named `setConfig`.
-
-## Errors
-
-### Use Custom Errors
-
-Whenever possible (Solidity v0.8+) use [custom errors](https://blog.soliditylang.org/2021/04/21/custom-errors/) instead of emitting strings. This saves contract code size and simultaneously provides more informative error messages.
-
-### Expose Errors
-
-It is common to call a contract and then check the call succeeded:
-
-```javascript
-(bool success, ) = to.call(data);
-require(success, "Contract call failed");
-```
-
-While this may look descriptive it swallows the error. Instead bubble up the error:
-
-```javascript
-error YourError(bytes response);
-
-(bool success, bytes memory response) = to.call(data);
-if (!success) { revert YourError(response); }
-```
-
-This will cost slightly more gas to copy the response into memory, but will ultimately make contract usage more understandable and easier to debug. Whether it is worth the extra gas is a judgement call you’ll have to make based on your needs.
-
-The original error will not be human readable in an off-chain explorer because it is RLP hex encoded but is easily decoded with standard Solidity ABI decoding tools, or a hex to UTF-8 converter and some basic ABI knowledge.
-
-## Control Flow
-
-### `if` Statements
-
-Always wrap the result statement of your `if` conditions in a closure, even if it is only one line.
-
-Bad:
-
-```javascript
- if (condition) statement;
-```
-
-Good:
-
-```javascript
- if (condition) { statement; }
-```
-
-## Interfaces
-
-### Scope
-
-- Interfaces should be as concise as reasonably possible. Break it up into smaller composable interfaces when that is sensible.
-
-### Naming
-
-- Up through Solidity version 0.8: Interfaces should be named `FooInterface`, this follows our historical naming pattern.
-- Starting in Solidity v0.9: Interfaces should be named `IFoo` instead of `FooInterface`. This follows the patterns of popular [libraries like OpenZeppelin’s](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/IERC20.sol#L9).
-
-## Vendor Dependencies
-
-- That’s it, vendor your Solidity dependencies. Supply chain attacks are all the rage these days. There is not yet a silver bullet for best way to vendor, it depends on the size of your project and your needs. You should be as explicit as possible about where the code comes from and make sure that this is enforced in some way; e.g. reference a hash. Some options:
- - NPM packages work for repos already in the JavaScript ecosystem. If you go this route you should lock to a hash of the repo or use a proxy registry like GitHub Packages.
- - Git submodules are great if you don’t mind git submodules.
- - Copy and paste the code into a `vendor` directory. Record attribution of the code and license in the repo along with the commit or version that you pulled the code from.
-
-## Common Behaviors
-
-### Transferring Ownership
-
-- When transferring control, whether it is of a token or a role in a contract, prefer "safe ownership" transfer patterns where the recipient must accept ownership. This avoids accidentally burning the control. This is also inline with the secure pattern of [prefer pull over push](https://consensys.github.io/smart-contract-best-practices/recommendations/#favor-pull-over-push-for-external-calls).
-
-### Use Factories
-
-- If you expect multiple instances of a contract to be deployed, it is probably best to [build a factory](https://www.quicknode.com/guides/solidity/how-to-create-a-smart-contract-factory-in-solidity-using-hardhat) as this allows for simpler deployments later. Additionally it reduces the burden of verifying the correctness of the contract deployment. If many people have to deploy an instance of a contract then doing so with a contract makes it much easier for verification because instead of checking the code hash and/or the compiler and maybe the source code, you only have to check that the contract was deployed through a factory.
-- Factories can add some pain when deploying with immutable variables. In general it is difficult to parse those out immutable variables from internal transactions. There is nothing inherently wrong with contracts deployed in this manner but at the time of writing they may not easily verify on Etherscan.
-
-### Call with Exact Gas
-
-- `call` accepts a gas parameter, but that parameter is a ceiling on gas usage. If a transaction does not have enough gas, `call` will simply provide as much gas as it safely can. This is unintuitive and can lead to transactions failing for unexpected reasons. We have [an implementation of `callWithExactGas`](https://github.com/smartcontractkit/chainlink/blob/075f3e2caf61b8685d2dc78714f1ee39764fda17/contracts/src/v0.8/KeeperRegistry.sol#L792) to ensure the precise gas amount requested is provided.
-
-## Picking a Pragma
-
-- If a contract or library is expected to be imported by outside parties then the pragma should be kept as loose as possible without sacrificing safety. We publish versions for every minor semver version of Solidity, and maintain a corresponding set of tests for each published version.
- - Examples: libraries, interfaces, abstract contracts, and contracts expected to be inherited from
-- Otherwise, Solidity contracts should have a pragma which is locked to a specific version.
- - Example: Most concrete contracts.
-- Avoid changing pragmas after audit. Unless there is a bug that has affects your contract, then you should try to stick to a known good pragma. In practice this means we typically only support one (occasionally two) pragma for any “major”(minor by semver naming) Solidity version.
diff --git a/contracts/STYLE_GUIDE.md b/contracts/STYLE_GUIDE.md
new file mode 100644
index 00000000000..3868117d4b9
--- /dev/null
+++ b/contracts/STYLE_GUIDE.md
@@ -0,0 +1,391 @@
+# Structure
+
+This guide is split into two sections: [Guidelines](#guidelines) and [Rules](#rules).
+Guidelines are recommendations that should be followed but are hard to enforce in an automated way.
+Rules are all enforced through CI, this can be through Solhint rules or other tools.
+
+## Background
+
+Our starting point is the [official Solidity Style Guide](https://docs.soliditylang.org/en/v0.8.21/style-guide.html) and [ConsenSys's Secure Development practices](https://consensys.github.io/smart-contract-best-practices/), but we deviate in some ways. We lean heavily on [Prettier](https://github.com/smartcontractkit/chainlink/blob/develop/contracts/.prettierrc) for formatting, and if you have to set up a new Solidity project we recommend starting with [our prettier config](https://github.com/smartcontractkit/chainlink/blob/develop/contracts/.prettierrc). We are trying to automate as much of this styleguide with Solhint as possible.
+
+This guide is not meant to be applied retroactively. There is no need to rewrite existing code to adhere to this guide, and when making (small) changes in existing files, it is not required to do so in accordance to this guide if it would conflict with other practices in that existing file. Consistency is preferred.
+
+We will be looking into `forge fmt`, but for now we still use `prettier`.
+
+
+# Guidelines
+
+## Code Organization
+- Group functionality together. E.g. Declare structs, events, and helper functions near the functions that use them. This is helpful when reading code because the related pieces are localized. It is also consistent with inheritance and libraries, which are separate pieces of code designed for a specific goal.
+- 🤔Why not follow the Solidity recommendation of grouping by visibility? Visibility is clearly defined next to the method signature, making it trivial to check. However, searching can be deceiving because of inherited methods. Given this inconsistency in grouping, we find it easier to read and more consistent to organize code around functionality. Additionally, we recommend testing the public interface for any Solidity contract to ensure it only exposes expected methods.
+- Follow the [Solidity folder structure CLIP](https://github.com/smartcontractkit/CLIPs/tree/main/clips/2023-04-13-solidity-folder-structure)
+
+### Delineate Unaudited Code
+
+- In a large repo it is worthwhile to keep code that has not yet been audited separate from the code that has been audited. This allows you to easily keep track of which files need to be reviewed.
+ - E.g. we keep unaudited code in a directory named `dev` that exists within each projects folder. Only once it has been audited we move the audited files out of `dev` and only then is it considered safe to deploy.
+ - This `dev` folder also has implications for when code is valid for bug bounties, so be extra careful to move functionality out of a `dev` folder.
+
+
+## comments
+- Besides comment above functions/structs, comments should live everywhere a reader might be confused.
+ Don’t overestimate the reader of your contract, expect confusion in many places and document accordingly.
+ This will help massively during audits and onboarding new team members.
+- Headers should be used to group functionality, the following header style and length is recommended.
+ - Don’t use headers for a single function, or to say “getters”. Group by functionality e.g. the `Tokens and pools` , or `fees` logic within the CCIP OnRamp.
+
+```solidity
+ // ================================================================
+ // │ Tokens and pools │
+ // ================================================================
+
+....
+
+ // ================================================================
+ // │ Fees │
+ // ================================================================
+```
+
+## Variables
+
+- Function arguments are named like this: `argumentName`. No leading or trailing underscores necessary.
+- Names should be explicit on the unit it contains, e.g. a network fee that is charged in USD cents
+
+```solidity
+uint256 fee; // bad
+uint256 networkFee; // bad
+uint256 networkFeeUSD; // bad
+uint256 networkFeeUSDCents; // good
+```
+
+### Types
+
+- If you are storing an address and know/expect it to be of a type(or interface), make the variable that type. This more clearly documents the behavior of this variable than the `address` type and often leads to less casting code whenever the address is used.
+
+### Structs
+
+- All structs should be packed to have the lowest memory footprint to reduce gas usage. Even structs that will never be written to storage should be packed.
+ - A contract can be considered a struct; it should also be packed to reduce gas cost.
+- Structs should contain struct packing comments to clearly indicate the storage slot layout
+ - Using the exact characters from the example below will ensure visually appealing struct packing comments.
+ - Notice there is no line on the unpacked last `fee` item.
+- Struct should contain comments, clearly indicating the denomination of values e.g. 0.01 USD if the variable name doesn’t already do that (which it should).
+ - Simple tool that could help packing structs and adding comments: https://github.com/RensR/Spack
+
+```solidity
+/// @dev Struct to hold the fee configuration for a fee token, same as the FeeTokenConfig but with
+/// token included so that an array of these can be passed in to setFeeTokenConfig to set the mapping
+struct FeeTokenConfigArgs {
+ address token; // ────────────╮ Token address
+ uint32 networkFeeUSD; // │ Flat network fee to charge for messages, multiples of 0.01 USD
+ // │ multiline comments should work like this. More fee info
+ uint64 gasMultiplier; // ─────╯ Price multiplier for gas costs, 1e18 based so 11e17 = 10% extra cost
+ uint64 premiumMultiplier; // ─╮ Multiplier for fee-token-specific premiums
+ bool enabled; // ─────────────╯ Whether this fee token is enabled
+ uint256 fee; // The flat fee the user pays in juels
+}
+```
+## Functions
+
+### Naming
+
+- Function names should start with imperative verbs, not nouns or other tenses.
+ - `requestData` not `dataRequest`
+ - `approve` not `approved`
+ - `getFeeParameters` not `feeParameters`
+
+- Prefix your public getters with `get` and your public setters with `set`.
+ - `getConfig` and `setConfig`.
+
+### Return Values
+
+- If an address is cast as a contract type, return the type, do not cast back to the address type.
+ This prevents the consumer of the method signature from having to cast again, but presents an equivalent API for off-chain APIs.
+ Additionally, it is a more declarative API, providing more context if we return a type.
+
+## Modifiers
+
+- Only extract a modifier once a check is duplicated in multiple places. Modifiers arguably hurt readability, so we have found that they are not worth extracting until there is duplication.
+- Modifiers should be treated as if they are view functions. They should not change state, only read it. While it is possible to change state in a modifier, it is unconventional and surprising.
+- Modifiers tend to bloat contract size because the code is duplicated wherever the modifier is used.
+
+## Events
+
+- Events should only be triggered on state changes. If the value is set but not changed, we prefer avoiding a log emission indicating a change. (e.g. Either do not emit a log, or name the event `ConfigSet` instead of `ConfigUpdated`.)
+- Events should be emitted for all state changes, not emitting should be an exception
+- When possible event names should correspond to the method they are in or the action that is being taken. Events preferably follow the format , where the action performed is the past tense of the imperative verb in the method name. e.g. calling `setConfig` should emit an event called `ConfigSet`, not `ConfigUpdated` in a method named `setConfig`.
+
+
+### Expose Errors
+
+It is common to call a contract and then check the call succeeded:
+
+```solidity
+(bool success, ) = to.call(data);
+require(success, "Contract call failed");
+```
+
+While this may look descriptive it swallows the error. Instead, bubble up the error:
+
+```solidity
+bool success;
+retData = new bytes(maxReturnBytes);
+assembly {
+ // call and return whether we succeeded. ignore return data
+ // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength)
+ success := call(gasLimit, target, 0, add(payload, 0x20), mload(payload), 0, 0)
+
+ // limit our copy to maxReturnBytes bytes
+ let toCopy := returndatasize()
+ if gt(toCopy, maxReturnBytes) {
+ toCopy := maxReturnBytes
+ }
+ // Store the length of the copied bytes
+ mstore(retData, toCopy)
+ // copy the bytes from retData[0:_toCopy]
+ returndatacopy(add(retData, 0x20), 0, toCopy)
+}
+return (success, retData);
+```
+
+This will cost slightly more gas to copy the response into memory, but will ultimately make contract usage more understandable and easier to debug. Whether it is worth the extra gas is a judgement call you’ll have to make based on your needs.
+
+The original error will not be human-readable in an off-chain explorer because it is RLP hex encoded but is easily decoded with standard Solidity ABI decoding tools, or a hex to UTF-8 converter and some basic ABI knowledge.
+
+
+## Interfaces
+
+- Interfaces should be as concise as reasonably possible. Break it up into smaller composable interfaces when that is sensible.
+
+## Dependencies
+
+- Prefer not reinventing the wheel, especially if there is an Openzeppelin wheel.
+- The `shared` folder can be treated as a first party dependency and it is recommend to check if some functionality might already be in there before either writing it yourself or adding a third party dependency.
+- When we have reinvented the wheel already (like with ownership), it is OK to keep using these contracts. If there are clear benefits of using another standard like OZ, we can deprecate the custom implementation and start using the new standard in all new projects. Migration will not be required unless there are serious issues with the old implementation.
+- When the decision is made to use a new standard, it is no longer allowed to use the old standard for new projects.
+
+### Vendor dependencies
+
+- That’s it, vendor your Solidity dependencies. Supply chain attacks are all the rage these days. There is not yet a silver bullet for best way to vendor, it depends on the size of your project and your needs. You should be as explicit as possible about where the code comes from and make sure that this is enforced in some way; e.g. reference a hash. Some options:
+ - NPM packages work for repos already in the JavaScript ecosystem. If you go this route you should lock to a hash of the repo or use a proxy registry like GitHub Packages.
+ - Copy and paste the code into a `vendor` directory. Record attribution of the code and license in the repo along with the commit or version that you pulled the code from.
+ - Foundry uses git submodules for its dependencies. We only use the `forge-std` lib through submodules, we don’t import any non-Foundry-testing code through this method.
+
+
+## Common Behaviors
+
+### Transferring Ownership
+
+- When transferring control, whether it is of a token or a role in a contract, prefer "safe ownership" transfer patterns where the recipient must accept ownership. This avoids accidentally burning the control. This is also inline with the secure pattern of [prefer pull over push](https://consensys.github.io/smart-contract-best-practices/recommendations/#favor-pull-over-push-for-external-calls).
+
+### Call with Exact Gas
+
+- `call` accepts a gas parameter, but that parameter is a ceiling on gas usage. If a transaction does not have enough gas, `call` will simply provide as much gas as it safely can. This is unintuitive and can lead to transactions failing for unexpected reasons. We have [an implementation of `callWithExactGas`](https://github.com/smartcontractkit/chainlink/blob/075f3e2caf61b8685d2dc78714f1ee39764fda17/contracts/src/v0.8/KeeperRegistry.sol#L792) to ensure the precise gas amount requested is provided.
+
+### Sending tokens
+
+- Prefer [ERC20.safeTransfer](https://docs.openzeppelin.com/contracts/2.x/api/token/erc20#SafeERC20) over ERC20.transfer
+
+### Gas golfing
+
+- Golf your code. Make it cheap, within reason.
+ - Focus on the hot path
+- Most of the cost of executing Solidity is related to reading/writing storage
+- Calling other contracts will also be costly
+- Common types to safely use are
+ - uint40 for timestamps (or uint32 if you really need the space)
+ - uint96 for link, as there are only 1b link tokens
+- prefer `++i` over `i++`
+- If you’re unsure about golfing, ask in the #tech-solidity channel
+
+## Testing
+
+- Test using Foundry.
+- Aim for at least 90% *useful* coverage as a baseline, but (near) 100% is achievable in Solidity. Always 100% test the critical path.
+ - Make sure to test for each event emitted
+ - Test each reverting path
+- Consider fuzzing, start with stateless (very easy in Foundry) and if that works, try stateful fuzzing.
+- Consider fork testing if applicable
+
+### Foundry
+
+- Create a Foundry profile for each project folder in `foundry.toml`
+- Foundry tests live in the project folder in `src`, not in the `contracts/test/` folder
+- Set the block number and timestamp. It is preferred to set these values to some reasonable value close to reality.
+- There should be no code between `vm.expectEmit`/`vm.expectRevert` and the function call
+
+## Picking a Pragma
+
+- If a contract or library is expected to be imported by outside parties then the pragma should be kept as loose as possible without sacrificing safety. We publish versions for every minor semver version of Solidity, and maintain a corresponding set of tests for each published version.
+ - Examples: libraries, interfaces, abstract contracts, and contracts expected to be inherited from
+- Otherwise, Solidity contracts should have a pragma which is locked to a specific version.
+ - Example: Most concrete contracts.
+- Avoid changing pragmas after audit. Unless there is a bug that has affects your contract, then you should try to stick to a known good pragma. In practice this means we typically only support one (occasionally two) pragma for any “major”(minor by semver naming) Solidity version.
+- The current advised pragma is `0.8.19` or higher, lower versions should be avoided when starting a new project. Newer versions can be considered.
+- All contracts should have a SPDX license identifier. If unsure about which one to pick, please consult with legal. Most older contracts have been MIT, but some of the newer products have been using BUSL-1.1
+
+
+## Versioning
+
+Contracts should implement the following interface
+
+```solidity
+interface ITypeAndVersion {
+ function typeAndVersion() external pure returns (string memory);
+}
+```
+
+Here are some examples of what this should look like:
+
+```solidity
+contract AccessControlledFoo is Foo {
+ // solhint-disable-next-line chainlink-solidity/all-caps-constant-storage-variables
+ string public constant override typeAndVersion = "AccessControlledFoo 1.0.0";
+}
+
+contract OffchainAggregator is ITypeAndVersion {
+ // solhint-disable-next-line chainlink-solidity/all-caps-constant-storage-variables
+ string public constant override typeAndVersion = "OffchainAggregator 1.0.0";
+
+ function getData() public returns(uint256) {
+ return 4;
+ }
+}
+
+// Next version of Aggregator contract
+contract SuperDuperAggregator is ITypeAndVersion {
+ /// This is a new contract that has not been released yet, so we
+ /// add a `-dev` suffix to the typeAndVersion.
+
+ // solhint-disable-next-line chainlink-solidity/all-caps-constant-storage-variables
+ string public constant override typeAndVersion = "SuperDuperAggregator 1.1.0-dev";
+
+ function getData() public returns(uint256) {
+ return 5;
+ }
+}
+```
+
+All contracts will expose a `typeAndVersion` constant.
+The string has the following format: `-` with the `-dev` part only being applicable to contracts that have not been fully released.
+Try to fit it into 32 bytes to keep impact on contract sizes minimal.
+Solhint will complain about a public constant variable that isn’t FULL_CAPS without the solhint-disable comment.
+
+
+
+
+
+
+
+
+
+
+# Rules
+
+All rules have a `rule` tag which indicated how the rule is enforced.
+
+
+## Comments
+
+- Comments should be in the `//` (default) or `///` (natspec) format, not the `/* */` format.
+ - rule: `tbd`
+- Comments should follow [NatSpec](https://docs.soliditylang.org/en/latest/natspec-format.html)
+ - rule: `tbd`
+
+## Imports
+
+- Imports should always be explicit
+ - rule: `no-global-import`
+- Imports have follow the following format:
+ - rule: `tbd`
+
+```solidity
+import {IInterface} from "../interfaces/IInterface.sol";
+
+import {AnythingElse} from "../code/AnythingElse.sol";
+
+import {ThirdPartyCode} from "../../vendor/ThirdPartyCode.sol";
+```
+
+- An example would be
+
+```solidity
+import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol";
+import {IPool} from "../interfaces/pools/IPool.sol";
+
+import {AggregateRateLimiter} from "../AggregateRateLimiter.sol";
+import {Client} from "../libraries/Client.sol";
+
+import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.0/contracts/token/ERC20/utils/SafeERC20.sol";
+import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.0/contracts/token/ERC20/IERC20.sol";
+```
+
+## Variables
+
+### Visibility
+
+All contract variables should be private by default. Getters should be explicitly written and documented when you want to expose a variable publicly.
+Whether a getter function reads from storage, a constant, or calculates a value from somewhere else, that’s all implementation details that should not be exposed to the consumer by casing or other conventions.
+
+rule: tbd
+
+### Naming and Casing
+
+- Storage variables prefixed with an `s_` to make it clear that they live in storage and are expensive to read and write: `s_variableName`. They should always be private, and you should write explicit getters if you want to expose a storage variable.
+ - rule: `chainlink-solidity/prefix-storage-variables-with-s-underscore`
+- Immutable variables should be prefixed with an `i_` to make it clear that they are immutable. E.g. `i_decimalPlaces`. They should always be private, and you should write explicit getters if you want to expose an immutable variable.
+ - rule: `chainlink-solidity/prefix-immutable-variables-with-i`
+- Internal/private constants should be all caps with underscores: `FOO_BAR`. Like other contract variables, constants should not be public. Create getter methods if you want to publicly expose constants.
+ - rule: `chainlink-solidity/all-caps-constant-storage-variables`
+- Explicitly declare variable size: `uint256` not just `uint`. In addition to being explicit, it matches the naming used to calculate function selectors.
+ - rule: `explicit-types`
+- Mapping should always be named if Solidity allows it (≥0.8.18)
+ - rule: `tbd`
+
+
+## Functions
+
+### Visibility
+
+- Method visibility should always be explicitly declared.
+ - rule: `state-visibility`
+
+- Prefix private and internal methods with an underscore. There should never be a publicly callable method starting with an underscore.
+ - E.g. `_setOwner(address)`
+ - rule: `chainlink-solidity/prefix-internal-functions-with-underscore`
+
+### Return values
+
+- Returned values should always be explicit. Using named return values and then returning with an empty return should be avoided
+ - rule: `chainlink-solidity/explicit-returns`
+
+```solidity
+// Bad
+function getNum() external view returns (uint64 num) {
+ num = 4;
+ return;
+}
+
+// Good
+function getNum() external view returns (uint64 num) {
+ num = 4;
+ return num;
+}
+
+// Good
+function getNum() external view returns (uint64 num) {
+ return 4;
+}
+```
+
+## Errors
+
+Use [custom errors](https://blog.soliditylang.org/2021/04/21/custom-errors/) instead of emitting strings. This saves contract code size and simultaneously provides more informative error messages.
+
+rule: `custom-errors`
+
+## Interfaces
+
+Interfaces should be named `IFoo` instead of `FooInterface`. This follows the patterns of popular [libraries like OpenZeppelin’s](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/IERC20.sol#L9).
+
+rule: `tbd`
\ No newline at end of file
diff --git a/contracts/ci.json b/contracts/ci.json
index dd1fbb0a88f..f1eff76513c 100644
--- a/contracts/ci.json
+++ b/contracts/ci.json
@@ -6,23 +6,19 @@
"dir": "cross-version",
"numOfSplits": 1
},
- {
- "dir": "v0.6",
- "numOfSplits": 1
- },
- {
- "dir": "v0.7",
- "numOfSplits": 1
- },
{
"dir": "v0.8",
- "numOfSplits": 8,
+ "numOfSplits": 6,
"slowTests": [
- "Keeper",
- "Cron.test",
- "CronUpkeep.test",
+ "Cron",
+ "CronUpkeep",
+ "VRFSubscriptionBalanceMonitor",
"EthBalanceMonitor",
- "CanaryUpkeep"
+ "KeeperRegistrar",
+ "KeeperRegistry1_2",
+ "KeeperRegistry1_3",
+ "KeeperRegistry2_0",
+ "KeeperRegistry2_1"
]
}
]
diff --git a/contracts/gas-snapshots/functions.gas-snapshot b/contracts/gas-snapshots/functions.gas-snapshot
index d575c8ca196..82b5b494a74 100644
--- a/contracts/gas-snapshots/functions.gas-snapshot
+++ b/contracts/gas-snapshots/functions.gas-snapshot
@@ -1,56 +1,67 @@
+ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumGoerli() (gas: 14577815)
+ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumMainnet() (gas: 14577793)
+ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumSepolia() (gas: 14577809)
+ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseGoerli() (gas: 14589229)
+ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseMainnet() (gas: 14589206)
+ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseSepolia() (gas: 14589178)
+ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismGoerli() (gas: 14589129)
+ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismMainnet() (gas: 14589118)
+ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismSepolia() (gas: 14589162)
FunctionsBilling_Constructor:test_Constructor_Success() (gas: 14812)
FunctionsBilling_DeleteCommitment:test_DeleteCommitment_RevertIfNotRouter() (gas: 13282)
FunctionsBilling_DeleteCommitment:test_DeleteCommitment_Success() (gas: 15897)
FunctionsBilling_EstimateCost:test_EstimateCost_RevertsIfGasPriceAboveCeiling() (gas: 32458)
-FunctionsBilling_EstimateCost:test_EstimateCost_Success() (gas: 53227)
-FunctionsBilling_EstimateCost:test_EstimateCost_SuccessLowGasPrice() (gas: 53330)
+FunctionsBilling_EstimateCost:test_EstimateCost_Success() (gas: 53807)
+FunctionsBilling_EstimateCost:test_EstimateCost_SuccessLowGasPrice() (gas: 53910)
FunctionsBilling_GetAdminFee:test_GetAdminFee_Success() (gas: 18226)
FunctionsBilling_GetConfig:test_GetConfig_Success() (gas: 23671)
FunctionsBilling_GetDONFee:test_GetDONFee_Success() (gas: 15792)
FunctionsBilling_GetWeiPerUnitLink:test_GetWeiPerUnitLink_Success() (gas: 31773)
-FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertIfInsufficientBalance() (gas: 70138)
-FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertWithNoBalance() (gas: 106295)
-FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceNoAmountGiven() (gas: 140174)
-FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceValidAmountGiven() (gas: 142502)
+FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertIfInsufficientBalance() (gas: 70128)
+FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertWithNoBalance() (gas: 106285)
+FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceNoAmountGiven() (gas: 140164)
+FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceValidAmountGiven() (gas: 142492)
FunctionsBilling_OracleWithdrawAll:test_OracleWithdrawAll_RevertIfNotOwner() (gas: 13296)
FunctionsBilling_OracleWithdrawAll:test_OracleWithdrawAll_SuccessPaysTransmittersWithBalance() (gas: 147278)
FunctionsBilling_UpdateConfig:test_UpdateConfig_RevertIfNotOwner() (gas: 18974)
FunctionsBilling_UpdateConfig:test_UpdateConfig_Success() (gas: 38251)
-FunctionsBilling__DisperseFeePool:test__DisperseFeePool_RevertIfNotSet() (gas: 8801)
+FunctionsBilling__DisperseFeePool:test__DisperseFeePool_RevertIfNotSet() (gas: 8810)
+FunctionsBilling__FulfillAndBill:test__FulfillAndBill_RevertIfInvalidCommitment() (gas: 13302)
+FunctionsBilling__FulfillAndBill:test__FulfillAndBill_Success() (gas: 180763)
FunctionsClient_Constructor:test_Constructor_Success() (gas: 7573)
-FunctionsClient_FulfillRequest:test_FulfillRequest_MaximumGas() (gas: 498114)
-FunctionsClient_FulfillRequest:test_FulfillRequest_MinimumGas() (gas: 199285)
+FunctionsClient_FulfillRequest:test_FulfillRequest_MaximumGas() (gas: 497786)
+FunctionsClient_FulfillRequest:test_FulfillRequest_MinimumGas() (gas: 198990)
FunctionsClient_HandleOracleFulfillment:test_HandleOracleFulfillment_RevertIfNotRouter() (gas: 14623)
FunctionsClient_HandleOracleFulfillment:test_HandleOracleFulfillment_Success() (gas: 22923)
FunctionsClient__SendRequest:test__SendRequest_RevertIfInvalidCallbackGasLimit() (gas: 55059)
-FunctionsCoordinator_Constructor:test_Constructor_Success() (gas: 11984)
+FunctionsCoordinator_Constructor:test_Constructor_Success() (gas: 12006)
FunctionsCoordinator_GetDONPublicKey:test_GetDONPublicKey_RevertIfEmpty() (gas: 15334)
-FunctionsCoordinator_GetDONPublicKey:test_GetDONPublicKey_Success() (gas: 106496)
+FunctionsCoordinator_GetDONPublicKey:test_GetDONPublicKey_Success() (gas: 106506)
FunctionsCoordinator_GetThresholdPublicKey:test_GetThresholdPublicKey_RevertIfEmpty() (gas: 15313)
-FunctionsCoordinator_GetThresholdPublicKey:test_GetThresholdPublicKey_Success() (gas: 656556)
+FunctionsCoordinator_GetThresholdPublicKey:test_GetThresholdPublicKey_Success() (gas: 656362)
FunctionsCoordinator_SetDONPublicKey:test_SetDONPublicKey_RevertNotOwner() (gas: 20364)
-FunctionsCoordinator_SetDONPublicKey:test_SetDONPublicKey_Success() (gas: 101275)
+FunctionsCoordinator_SetDONPublicKey:test_SetDONPublicKey_Success() (gas: 101285)
FunctionsCoordinator_SetThresholdPublicKey:test_SetThresholdPublicKey_RevertNotOwner() (gas: 13892)
-FunctionsCoordinator_SetThresholdPublicKey:test_SetThresholdPublicKey_Success() (gas: 651248)
+FunctionsCoordinator_SetThresholdPublicKey:test_SetThresholdPublicKey_Success() (gas: 651054)
FunctionsCoordinator_StartRequest:test_StartRequest_RevertIfNotRouter() (gas: 22703)
-FunctionsCoordinator_StartRequest:test_StartRequest_Success() (gas: 107681)
+FunctionsCoordinator_StartRequest:test_StartRequest_Success() (gas: 108848)
FunctionsCoordinator__IsTransmitter:test__IsTransmitter_SuccessFound() (gas: 18957)
FunctionsCoordinator__IsTransmitter:test__IsTransmitter_SuccessNotFound() (gas: 19690)
FunctionsRequest_DEFAULT_BUFFER_SIZE:test_DEFAULT_BUFFER_SIZE() (gas: 246)
FunctionsRequest_EncodeCBOR:test_EncodeCBOR_Success() (gas: 223)
FunctionsRequest_REQUEST_DATA_VERSION:test_REQUEST_DATA_VERSION() (gas: 225)
FunctionsRouter_Constructor:test_Constructor_Success() (gas: 12007)
-FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedCostExceedsCommitment() (gas: 169900)
-FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInsufficientGas() (gas: 160227)
+FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedCostExceedsCommitment() (gas: 167459)
+FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInsufficientGas() (gas: 157790)
FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInvalidCommitment() (gas: 38115)
FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInvalidRequestId() (gas: 35238)
-FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedSubscriptionBalanceInvariant() (gas: 178373)
+FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedSubscriptionBalanceInvariant() (gas: 175935)
FunctionsRouter_Fulfill:test_Fulfill_RevertIfNotCommittedCoordinator() (gas: 28086)
-FunctionsRouter_Fulfill:test_Fulfill_RevertIfPaused() (gas: 153924)
-FunctionsRouter_Fulfill:test_Fulfill_SuccessClientNoLongerExists() (gas: 296712)
-FunctionsRouter_Fulfill:test_Fulfill_SuccessFulfilled() (gas: 310327)
-FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackReverts() (gas: 2484946)
-FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackRunsOutOfGas() (gas: 515433)
+FunctionsRouter_Fulfill:test_Fulfill_RevertIfPaused() (gas: 151478)
+FunctionsRouter_Fulfill:test_Fulfill_SuccessClientNoLongerExists() (gas: 321037)
+FunctionsRouter_Fulfill:test_Fulfill_SuccessFulfilled() (gas: 334658)
+FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackReverts() (gas: 2509939)
+FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackRunsOutOfGas() (gas: 540418)
FunctionsRouter_GetAdminFee:test_GetAdminFee_Success() (gas: 17983)
FunctionsRouter_GetAllowListId:test_GetAllowListId_Success() (gas: 12904)
FunctionsRouter_GetConfig:test_GetConfig_Success() (gas: 37159)
@@ -71,15 +82,15 @@ FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfNotNe
FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfNotOwner() (gas: 23392)
FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_Success() (gas: 118479)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfConsumerNotAllowed() (gas: 59347)
-FunctionsRouter_SendRequest:test_SendRequest_RevertIfDuplicateRequestId() (gas: 192799)
+FunctionsRouter_SendRequest:test_SendRequest_RevertIfDuplicateRequestId() (gas: 193436)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfEmptyData() (gas: 29426)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfIncorrectDonId() (gas: 57925)
-FunctionsRouter_SendRequest:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 186299)
+FunctionsRouter_SendRequest:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 186932)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfInvalidCallbackGasLimit() (gas: 50947)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfInvalidDonId() (gas: 25082)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfNoSubscription() (gas: 29132)
FunctionsRouter_SendRequest:test_SendRequest_RevertIfPaused() (gas: 34291)
-FunctionsRouter_SendRequest:test_SendRequest_Success() (gas: 285026)
+FunctionsRouter_SendRequest:test_SendRequest_Success() (gas: 286243)
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfConsumerNotAllowed() (gas: 65843)
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfEmptyData() (gas: 36012)
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfIncorrectDonId() (gas: 29896)
@@ -87,8 +98,8 @@ FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfInvalid
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfInvalidDonId() (gas: 27503)
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfNoSubscription() (gas: 35717)
FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfPaused() (gas: 40810)
-FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_Success() (gas: 291595)
-FunctionsRouter_SendRequestToProposed:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 192791)
+FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_Success() (gas: 292812)
+FunctionsRouter_SendRequestToProposed:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 193424)
FunctionsRouter_SetAllowListId:test_SetAllowListId_Success() (gas: 30688)
FunctionsRouter_SetAllowListId:test_UpdateConfig_RevertIfNotOwner() (gas: 13403)
FunctionsRouter_Unpause:test_Unpause_RevertIfNotOwner() (gas: 13293)
@@ -101,7 +112,7 @@ FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOw
FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfPaused() (gas: 60987)
FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfSenderBecomesBlocked() (gas: 94677)
FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfSenderIsNotNewOwner() (gas: 62693)
-FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_Success() (gas: 214560)
+FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_Success() (gas: 215197)
FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfMaximumConsumers() (gas: 137893)
FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfMaximumConsumersAfterConfigUpdate() (gas: 164837)
FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfNoSubscription() (gas: 12946)
@@ -113,7 +124,7 @@ FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNoSubs
FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNotAllowedSender() (gas: 57885)
FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNotSubscriptionOwner() (gas: 89272)
FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfPaused() (gas: 20148)
-FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfPendingRequests() (gas: 193688)
+FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfPendingRequests() (gas: 194325)
FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_SuccessForfeitAllBalanceAsDeposit() (gas: 114506)
FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_SuccessForfeitSomeBalanceAsDeposit() (gas: 125832)
FunctionsSubscriptions_CancelSubscription_ReceiveDeposit:test_CancelSubscription_SuccessRecieveDeposit() (gas: 74973)
@@ -130,11 +141,11 @@ FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_Reve
FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_RevertIfStartIsAfterEnd() (gas: 13459)
FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_Success() (gas: 59592)
FunctionsSubscriptions_GetTotalBalance:test_GetTotalBalance_Success() (gas: 15010)
-FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoCalldata(uint96) (runs: 256, μ: 28446, ~: 28446)
-FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoSubscription(uint96) (runs: 256, μ: 30958, ~: 30958)
-FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNotLink(uint96) (runs: 256, μ: 14293, ~: 14293)
-FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfPaused(uint96) (runs: 256, μ: 35938, ~: 35938)
-FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_Success(uint96) (runs: 256, μ: 59686, ~: 59686)
+FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoCalldata(uint96) (runs: 256, μ: 43508, ~: 45548)
+FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoSubscription(uint96) (runs: 256, μ: 46020, ~: 48060)
+FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNotLink(uint96) (runs: 256, μ: 14295, ~: 14295)
+FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfPaused(uint96) (runs: 256, μ: 51089, ~: 53040)
+FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_Success(uint96) (runs: 256, μ: 86057, ~: 89604)
FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfAmountMoreThanBalance() (gas: 20745)
FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfBalanceInvariant() (gas: 189)
FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfNoAmount() (gas: 15638)
@@ -146,7 +157,7 @@ FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_Reve
FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_Success() (gas: 54867)
FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessDeletesSubscription() (gas: 49607)
FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessSubOwnerRefunded() (gas: 50896)
-FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessWhenRequestInFlight() (gas: 164303)
+FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessWhenRequestInFlight() (gas: 164812)
FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfAmountMoreThanBalance() (gas: 17924)
FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfBalanceInvariant() (gas: 210)
FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfNotOwner() (gas: 15555)
@@ -155,7 +166,7 @@ FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessIfRecipientAddres
FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessPaysRecipient() (gas: 54413)
FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessSetsBalanceToZero() (gas: 37790)
FunctionsSubscriptions_PendingRequestExists:test_PendingRequestExists_SuccessFalseIfNoPendingRequests() (gas: 14981)
-FunctionsSubscriptions_PendingRequestExists:test_PendingRequestExists_SuccessTrueIfPendingRequests() (gas: 175857)
+FunctionsSubscriptions_PendingRequestExists:test_PendingRequestExists_SuccessTrueIfPendingRequests() (gas: 176494)
FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfEmptyNewOwner() (gas: 27611)
FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfInvalidNewOwner() (gas: 57709)
FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfNoSubscription() (gas: 15001)
@@ -171,7 +182,7 @@ FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNoSubscription
FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNotAllowedSender() (gas: 57800)
FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNotSubscriptionOwner() (gas: 87208)
FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfPaused() (gas: 18049)
-FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfPendingRequests() (gas: 191221)
+FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfPendingRequests() (gas: 191858)
FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_Success() (gas: 41979)
FunctionsSubscriptions_SetFlags:test_SetFlags_RevertIfNoSubscription() (gas: 12891)
FunctionsSubscriptions_SetFlags:test_SetFlags_RevertIfNotOwner() (gas: 15684)
@@ -209,5 +220,5 @@ Gas_AcceptTermsOfService:test_AcceptTermsOfService_Gas() (gas: 84675)
Gas_AddConsumer:test_AddConsumer_Gas() (gas: 79087)
Gas_CreateSubscription:test_CreateSubscription_Gas() (gas: 73375)
Gas_FundSubscription:test_FundSubscription_Gas() (gas: 38546)
-Gas_SendRequest:test_SendRequest_MaximumGas() (gas: 964214)
-Gas_SendRequest:test_SendRequest_MinimumGas() (gas: 156934)
\ No newline at end of file
+Gas_SendRequest:test_SendRequest_MaximumGas() (gas: 979631)
+Gas_SendRequest:test_SendRequest_MinimumGas() (gas: 157578)
\ No newline at end of file
diff --git a/contracts/gas-snapshots/llo-feeds.gas-snapshot b/contracts/gas-snapshots/llo-feeds.gas-snapshot
index a9877fbe33c..ad9339a3410 100644
--- a/contracts/gas-snapshots/llo-feeds.gas-snapshot
+++ b/contracts/gas-snapshots/llo-feeds.gas-snapshot
@@ -1,3 +1,23 @@
+ByteUtilTest:test_readAddress() (gas: 542)
+ByteUtilTest:test_readAddressMultiWord() (gas: 540)
+ByteUtilTest:test_readAddressWithEmptyArray() (gas: 3274)
+ByteUtilTest:test_readAddressWithNotEnoughBytes() (gas: 3314)
+ByteUtilTest:test_readUint192Max() (gas: 485)
+ByteUtilTest:test_readUint192Min() (gas: 508)
+ByteUtilTest:test_readUint192MultiWord() (gas: 486)
+ByteUtilTest:test_readUint192WithEmptyArray() (gas: 3274)
+ByteUtilTest:test_readUint192WithNotEnoughBytes() (gas: 3314)
+ByteUtilTest:test_readUint256Max() (gas: 502)
+ByteUtilTest:test_readUint256Min() (gas: 546)
+ByteUtilTest:test_readUint256MultiWord() (gas: 500)
+ByteUtilTest:test_readUint256WithEmptyArray() (gas: 3296)
+ByteUtilTest:test_readUint256WithNotEnoughBytes() (gas: 3293)
+ByteUtilTest:test_readUint32Max() (gas: 507)
+ByteUtilTest:test_readUint32Min() (gas: 487)
+ByteUtilTest:test_readUint32MultiWord() (gas: 552)
+ByteUtilTest:test_readUint32WithEmptyArray() (gas: 3253)
+ByteUtilTest:test_readUint32WithNotEnoughBytes() (gas: 3272)
+ByteUtilTest:test_readZeroAddress() (gas: 519)
FeeManagerProcessFeeTest:test_DiscountIsAppliedForNative() (gas: 52282)
FeeManagerProcessFeeTest:test_DiscountIsReturnedForNative() (gas: 52235)
FeeManagerProcessFeeTest:test_DiscountIsReturnedForNativeWithSurcharge() (gas: 78440)
diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts
index 521345ffc9e..5306827b8e3 100644
--- a/contracts/hardhat.config.ts
+++ b/contracts/hardhat.config.ts
@@ -105,6 +105,18 @@ let config = {
},
},
},
+ 'src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol': {
+ version: '0.8.6',
+ settings: {
+ optimizer: {
+ enabled: true,
+ runs: 50, // see native_solc_compile_all_vrf
+ },
+ metadata: {
+ bytecodeHash: 'none',
+ },
+ },
+ },
},
},
contractSizer: {
diff --git a/contracts/package.json b/contracts/package.json
index 05a5293e48d..6d0b7af6ccd 100644
--- a/contracts/package.json
+++ b/contracts/package.json
@@ -18,7 +18,7 @@
"prepublishOnly": "pnpm compile && ./scripts/prepublish_generate_abi_folder",
"publish-beta": "pnpm publish --tag beta",
"publish-prod": "npm dist-tag add @chainlink/contracts@0.8.0 latest",
- "solhint": "solhint --max-warnings 350 \"./src/v0.8/**/*.sol\""
+ "solhint": "solhint --max-warnings 371 \"./src/v0.8/**/*.sol\""
},
"files": [
"src/v0.8",
@@ -72,11 +72,11 @@
"istanbul": "^0.4.5",
"moment": "^2.29.4",
"prettier": "^3.0.3",
- "prettier-plugin-solidity": "1.1.3",
+ "prettier-plugin-solidity": "1.1.4-dev",
"rlp": "^2.2.7",
- "solhint": "^3.6.2",
- "solhint-plugin-chainlink-solidity": "git+https://github.com/smartcontractkit/chainlink-solhint-rules.git",
- "solhint-plugin-prettier": "^0.0.5",
+ "solhint": "^4.0.0",
+ "solhint-plugin-chainlink-solidity": "git+https://github.com/smartcontractkit/chainlink-solhint-rules.git#v1.2.0",
+ "solhint-plugin-prettier": "^0.1.0",
"solidity-coverage": "^0.8.5",
"ts-node": "^10.9.1",
"tslib": "^2.6.2",
diff --git a/contracts/pnpm-lock.yaml b/contracts/pnpm-lock.yaml
index 2b6082d656a..ac4efd5f59f 100644
--- a/contracts/pnpm-lock.yaml
+++ b/contracts/pnpm-lock.yaml
@@ -140,20 +140,20 @@ devDependencies:
specifier: ^3.0.3
version: 3.0.3
prettier-plugin-solidity:
- specifier: 1.1.3
- version: 1.1.3(prettier@3.0.3)
+ specifier: 1.1.4-dev
+ version: 1.1.4-dev(prettier@3.0.3)
rlp:
specifier: ^2.2.7
version: 2.2.7
solhint:
- specifier: ^3.6.2
- version: 3.6.2
+ specifier: ^4.0.0
+ version: 4.0.0
solhint-plugin-chainlink-solidity:
- specifier: git+https://github.com/smartcontractkit/chainlink-solhint-rules.git
- version: github.com/smartcontractkit/chainlink-solhint-rules/6229ce5d3cc3e4a2454411bebc887c5ca240dcf2
+ specifier: git+https://github.com/smartcontractkit/chainlink-solhint-rules.git#v1.2.0
+ version: github.com/smartcontractkit/chainlink-solhint-rules/cfc50b32f95b730304a50deb2e27e88d87115874
solhint-plugin-prettier:
- specifier: ^0.0.5
- version: 0.0.5(prettier-plugin-solidity@1.1.3)(prettier@3.0.3)
+ specifier: ^0.1.0
+ version: 0.1.0(prettier-plugin-solidity@1.1.4-dev)(prettier@3.0.3)
solidity-coverage:
specifier: ^0.8.5
version: 0.8.5(hardhat@2.18.1)
@@ -1317,6 +1317,35 @@ packages:
tslib: 2.6.2
dev: true
+ /@pnpm/config.env-replace@1.1.0:
+ resolution: {integrity: sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==}
+ engines: {node: '>=12.22.0'}
+ dev: true
+
+ /@pnpm/network.ca-file@1.0.2:
+ resolution: {integrity: sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==}
+ engines: {node: '>=12.22.0'}
+ dependencies:
+ graceful-fs: 4.2.10
+ dev: true
+
+ /@pnpm/npm-conf@2.2.2:
+ resolution: {integrity: sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==}
+ engines: {node: '>=12'}
+ dependencies:
+ '@pnpm/config.env-replace': 1.1.0
+ '@pnpm/network.ca-file': 1.0.2
+ config-chain: 1.1.13
+ dev: true
+
+ /@prettier/sync@0.3.0(prettier@3.0.3):
+ resolution: {integrity: sha512-3dcmCyAxIcxy036h1I7MQU/uEEBq8oLwf1CE3xeze+MPlgkdlb/+w6rGR/1dhp6Hqi17fRS6nvwnOzkESxEkOw==}
+ peerDependencies:
+ prettier: ^3.0.0
+ dependencies:
+ prettier: 3.0.3
+ dev: true
+
/@resolver-engine/core@0.3.3:
resolution: {integrity: sha512-eB8nEbKDJJBi5p5SrvrvILn4a0h42bKtbCTri3ZxCGt6UvoQyp7HnGOfki944bUjBSHKK3RvgfViHn+kqdXtnQ==}
dependencies:
@@ -1477,6 +1506,12 @@ packages:
antlr4ts: 0.5.0-alpha.4
dev: true
+ /@solidity-parser/parser@0.16.1:
+ resolution: {integrity: sha512-PdhRFNhbTtu3x8Axm0uYpqOy/lODYQK+MlYSgqIsq2L8SFYEHJPHNUiOTAJbDGzNjjr1/n9AcIayxafR/fWmYw==}
+ dependencies:
+ antlr4ts: 0.5.0-alpha.4
+ dev: true
+
/@szmarczak/http-timer@1.1.2:
resolution: {integrity: sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==}
engines: {node: '>=6'}
@@ -3833,6 +3868,13 @@ packages:
typedarray: 0.0.6
dev: true
+ /config-chain@1.1.13:
+ resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==}
+ dependencies:
+ ini: 1.3.8
+ proto-list: 1.2.4
+ dev: true
+
/constant-case@2.0.0:
resolution: {integrity: sha512-eS0N9WwmjTqrOmR3o83F5vW8Z+9R1HnVz3xmzT2PMFug9ly+Au/fxRWlEBSb6LcZwspSsEn9Xs1uw9YgzAg1EQ==}
dependencies:
@@ -7326,6 +7368,13 @@ packages:
graceful-fs: 4.2.10
dev: true
+ /latest-version@7.0.0:
+ resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==}
+ engines: {node: '>=14.16'}
+ dependencies:
+ package-json: 8.1.1
+ dev: true
+
/lcid@1.0.0:
resolution: {integrity: sha512-YiGkH6EnGrDGqLMITnGjXtGmNtjoXw9SVUzcaos8RBi7Ps0VBylkq+vOcY9QE5poLasPCR849ucFUkl0UzUyOw==}
engines: {node: '>=0.10.0'}
@@ -8523,6 +8572,16 @@ packages:
engines: {node: '>=6'}
dev: true
+ /package-json@8.1.1:
+ resolution: {integrity: sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==}
+ engines: {node: '>=14.16'}
+ dependencies:
+ got: 12.1.0
+ registry-auth-token: 5.0.2
+ registry-url: 6.0.1
+ semver: 7.5.4
+ dev: true
+
/param-case@2.1.1:
resolution: {integrity: sha512-eQE845L6ot89sk2N8liD8HAuH4ca6Vvr7VWAWwt7+kvvG5aBcPmmphQ68JsEG2qa9n1TykS2DLeMt363AAH8/w==}
dependencies:
@@ -8822,6 +8881,7 @@ packages:
/prettier-plugin-solidity@1.1.3(prettier@2.8.8):
resolution: {integrity: sha512-fQ9yucPi2sBbA2U2Xjh6m4isUTJ7S7QLc/XDDsktqqxYfTwdYKJ0EnnywXHwCGAaYbQNK+HIYPL1OemxuMsgeg==}
engines: {node: '>=12'}
+ requiresBuild: true
peerDependencies:
prettier: '>=2.3.0 || >=3.0.0-alpha.0'
dependencies:
@@ -8832,15 +8892,15 @@ packages:
dev: true
optional: true
- /prettier-plugin-solidity@1.1.3(prettier@3.0.3):
- resolution: {integrity: sha512-fQ9yucPi2sBbA2U2Xjh6m4isUTJ7S7QLc/XDDsktqqxYfTwdYKJ0EnnywXHwCGAaYbQNK+HIYPL1OemxuMsgeg==}
- engines: {node: '>=12'}
+ /prettier-plugin-solidity@1.1.4-dev(prettier@3.0.3):
+ resolution: {integrity: sha512-SIDnHIPLN/Pod/dZoyJL07ViEcDxrXoT47ROQshpA/WFgyq/rRzLIc3oWkKfWiicHOD493Y/L1n9ds1GbwPoKQ==}
+ engines: {node: '>=16'}
peerDependencies:
- prettier: '>=2.3.0 || >=3.0.0-alpha.0'
+ prettier: '>=2.3.0'
dependencies:
- '@solidity-parser/parser': 0.16.0
+ '@solidity-parser/parser': 0.16.1
prettier: 3.0.3
- semver: 7.5.0
+ semver: 7.5.4
solidity-comments-extractor: 0.0.7
dev: true
@@ -8892,6 +8952,10 @@ packages:
signal-exit: 3.0.7
dev: true
+ /proto-list@1.2.4:
+ resolution: {integrity: sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==}
+ dev: true
+
/proxy-addr@2.0.5:
resolution: {integrity: sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==}
engines: {node: '>= 0.10'}
@@ -9080,6 +9144,16 @@ packages:
unpipe: 1.0.0
dev: true
+ /rc@1.2.8:
+ resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==}
+ hasBin: true
+ dependencies:
+ deep-extend: 0.6.0
+ ini: 1.3.8
+ minimist: 1.2.8
+ strip-json-comments: 2.0.1
+ dev: true
+
/read-pkg-up@1.0.1:
resolution: {integrity: sha512-WD9MTlNtI55IwYUS27iHh9tK3YoIVhxis8yKhLpTqWtml739uXc9NWTpxoHkfZf3+DkCCsXox94/VWZniuZm6A==}
engines: {node: '>=0.10.0'}
@@ -9216,6 +9290,20 @@ packages:
regjsparser: 0.1.5
dev: true
+ /registry-auth-token@5.0.2:
+ resolution: {integrity: sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==}
+ engines: {node: '>=14'}
+ dependencies:
+ '@pnpm/npm-conf': 2.2.2
+ dev: true
+
+ /registry-url@6.0.1:
+ resolution: {integrity: sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==}
+ engines: {node: '>=12'}
+ dependencies:
+ rc: 1.2.8
+ dev: true
+
/regjsgen@0.2.0:
resolution: {integrity: sha512-x+Y3yA24uF68m5GA+tBjbGYo64xXVJpbToBaWCoSNSc1hdk6dfctaRWrNFTVJZIIhL5GxW8zwjoixbnifnK59g==}
dev: true
@@ -9553,9 +9641,11 @@ packages:
resolution: {integrity: sha512-+XC0AD/R7Q2mPSRuy2Id0+CGTZ98+8f+KvwirxOKIEyid+XSx6HbC63p+O4IndTHuX5Z+JxQ0TghCkO5Cg/2HA==}
engines: {node: '>=10'}
hasBin: true
+ requiresBuild: true
dependencies:
lru-cache: 6.0.0
dev: true
+ optional: true
/semver@7.5.4:
resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==}
@@ -9863,19 +9953,20 @@ packages:
- debug
dev: true
- /solhint-plugin-prettier@0.0.5(prettier-plugin-solidity@1.1.3)(prettier@3.0.3):
- resolution: {integrity: sha512-7jmWcnVshIrO2FFinIvDQmhQpfpS2rRRn3RejiYgnjIE68xO2bvrYvjqVNfrio4xH9ghOqn83tKuTzLjEbmGIA==}
+ /solhint-plugin-prettier@0.1.0(prettier-plugin-solidity@1.1.4-dev)(prettier@3.0.3):
+ resolution: {integrity: sha512-SDOTSM6tZxZ6hamrzl3GUgzF77FM6jZplgL2plFBclj/OjKP8Z3eIPojKU73gRr0MvOS8ACZILn8a5g0VTz/Gw==}
peerDependencies:
- prettier: ^1.15.0 || ^2.0.0
- prettier-plugin-solidity: ^1.0.0-alpha.14
+ prettier: ^3.0.0
+ prettier-plugin-solidity: ^1.0.0
dependencies:
+ '@prettier/sync': 0.3.0(prettier@3.0.3)
prettier: 3.0.3
prettier-linter-helpers: 1.0.0
- prettier-plugin-solidity: 1.1.3(prettier@3.0.3)
+ prettier-plugin-solidity: 1.1.4-dev(prettier@3.0.3)
dev: true
- /solhint@3.6.2:
- resolution: {integrity: sha512-85EeLbmkcPwD+3JR7aEMKsVC9YrRSxd4qkXuMzrlf7+z2Eqdfm1wHWq1ffTuo5aDhoZxp2I9yF3QkxZOxOL7aQ==}
+ /solhint@4.0.0:
+ resolution: {integrity: sha512-bFViMcFvhqVd/HK3Roo7xZXX5nbujS7Bxeg5vnZc9QvH0yCWCrQ38Yrn1pbAY9tlKROc6wFr+rK1mxYgYrjZgA==}
hasBin: true
dependencies:
'@solidity-parser/parser': 0.16.0
@@ -9889,6 +9980,7 @@ packages:
glob: 8.1.0
ignore: 5.2.4
js-yaml: 4.1.0
+ latest-version: 7.0.0
lodash: 4.17.21
pluralize: 8.0.0
semver: 7.5.4
@@ -10321,6 +10413,11 @@ packages:
engines: {node: '>=4'}
dev: true
+ /strip-json-comments@2.0.1:
+ resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==}
+ engines: {node: '>=0.10.0'}
+ dev: true
+
/strip-json-comments@3.1.1:
resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
engines: {node: '>=8'}
@@ -12349,8 +12446,8 @@ packages:
ethereumjs-util: 6.2.1
dev: true
- github.com/smartcontractkit/chainlink-solhint-rules/6229ce5d3cc3e4a2454411bebc887c5ca240dcf2:
- resolution: {tarball: https://codeload.github.com/smartcontractkit/chainlink-solhint-rules/tar.gz/6229ce5d3cc3e4a2454411bebc887c5ca240dcf2}
+ github.com/smartcontractkit/chainlink-solhint-rules/cfc50b32f95b730304a50deb2e27e88d87115874:
+ resolution: {tarball: https://codeload.github.com/smartcontractkit/chainlink-solhint-rules/tar.gz/cfc50b32f95b730304a50deb2e27e88d87115874}
name: '@chainlink/solhint-plugin-chainlink-solidity'
- version: 1.0.1
+ version: 1.2.0
dev: true
diff --git a/contracts/scripts/native_solc_compile_all_6 b/contracts/scripts/native_solc_compile_all_6
index 7f8f4fa6957..f7bd60d6781 100755
--- a/contracts/scripts/native_solc_compile_all_6
+++ b/contracts/scripts/native_solc_compile_all_6
@@ -12,7 +12,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
@@ -20,10 +20,13 @@ export SOLC_VERSION=$SOLC_VERSION
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v0.6 \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.6 \
- $ROOT/contracts/src/v0.6/$1
+ -o "$ROOT"/contracts/solc/v0.6/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.6 \
+ "$ROOT"/contracts/src/v0.6/"$1"
}
compileContract Flags.sol
diff --git a/contracts/scripts/native_solc_compile_all_7 b/contracts/scripts/native_solc_compile_all_7
index b2d76b3cb5f..fd64d9ffce7 100755
--- a/contracts/scripts/native_solc_compile_all_7
+++ b/contracts/scripts/native_solc_compile_all_7
@@ -12,7 +12,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
@@ -20,10 +20,13 @@ export SOLC_VERSION=$SOLC_VERSION
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v0.7 \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.7 \
- $ROOT/contracts/src/v0.7/$1
+ -o "$ROOT"/contracts/solc/v0.7/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.7 \
+ "$ROOT"/contracts/src/v0.7/"$1"
}
compileContract tests/MultiWordConsumer.sol
diff --git a/contracts/scripts/native_solc_compile_all_automation b/contracts/scripts/native_solc_compile_all_automation
index 414453c8482..1c54d677135 100755
--- a/contracts/scripts/native_solc_compile_all_automation
+++ b/contracts/scripts/native_solc_compile_all_automation
@@ -12,7 +12,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
@@ -20,10 +20,13 @@ export SOLC_VERSION=$SOLC_VERSION
compileContract () {
- solc @openzeppelin/=$ROOT/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/node_modules\
- $ROOT/contracts/src/v0.8/$1
+ local contract
+ contract=$(basename "$1" ".sol")
+
+ solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
compileContract automation/upkeeps/CronUpkeepFactory.sol
@@ -60,8 +63,6 @@ compileContract automation/v2_1/AutomationForwarderLogic.sol
compileContract automation/testhelpers/LogTriggeredStreamsLookup.sol
compileContract automation/testhelpers/DummyProtocol.sol
-compileContract automation/testhelpers/KeeperBase.sol
-compileContract automation/testhelpers/KeeperCompatibleInterface.sol
compileContract automation/testhelpers/KeeperConsumer.sol
compileContract automation/testhelpers/KeeperConsumerPerformance.sol
compileContract automation/testhelpers/PerformDataChecker.sol
diff --git a/contracts/scripts/native_solc_compile_all_events_mock b/contracts/scripts/native_solc_compile_all_events_mock
index 993530e2fa1..68e8bdfa6a9 100755
--- a/contracts/scripts/native_solc_compile_all_events_mock
+++ b/contracts/scripts/native_solc_compile_all_events_mock
@@ -12,17 +12,20 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
# This script is used to compile the contracts for the Events Mocks used in the tests.
diff --git a/contracts/scripts/native_solc_compile_all_feeds b/contracts/scripts/native_solc_compile_all_feeds
index 2bbd9fe869c..2c5808d4663 100755
--- a/contracts/scripts/native_solc_compile_all_feeds
+++ b/contracts/scripts/native_solc_compile_all_feeds
@@ -12,7 +12,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
@@ -20,13 +20,16 @@ export SOLC_VERSION=$SOLC_VERSION
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
# Aggregators
-compileContract interfaces/AggregatorV2V3Interface.sol
+compileContract shared/interfaces/AggregatorV2V3Interface.sol
compileContract Chainlink.sol
compileContract ChainlinkClient.sol
diff --git a/contracts/scripts/native_solc_compile_all_llo-feeds b/contracts/scripts/native_solc_compile_all_llo-feeds
index 27ef714ec1c..2caa6fb98de 100755
--- a/contracts/scripts/native_solc_compile_all_llo-feeds
+++ b/contracts/scripts/native_solc_compile_all_llo-feeds
@@ -11,7 +11,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
@@ -19,10 +19,13 @@ export SOLC_VERSION=$SOLC_VERSION
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
compileContract llo-feeds/Verifier.sol
diff --git a/contracts/scripts/native_solc_compile_all_logpoller b/contracts/scripts/native_solc_compile_all_logpoller
index 91a0606dba8..b6ac51ecedb 100755
--- a/contracts/scripts/native_solc_compile_all_logpoller
+++ b/contracts/scripts/native_solc_compile_all_logpoller
@@ -11,7 +11,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
@@ -19,10 +19,13 @@ export SOLC_VERSION=$SOLC_VERSION
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
diff --git a/contracts/scripts/native_solc_compile_all_ocr2vrf b/contracts/scripts/native_solc_compile_all_ocr2vrf
index 42478d7ebc2..755edd34f56 100755
--- a/contracts/scripts/native_solc_compile_all_ocr2vrf
+++ b/contracts/scripts/native_solc_compile_all_ocr2vrf
@@ -16,7 +16,7 @@ echo "Compiling OCR2VRF contracts..."
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
@@ -24,11 +24,14 @@ export SOLC_VERSION=$SOLC_VERSION
compileContract () {
- solc --overwrite --optimize --optimize-runs $2 --metadata-hash none \
- -o $ROOT/contracts/solc/v0.8.19 \
+ local contract
+ contract=$(basename "$1" ".sol")
+
+ solc --overwrite --optimize --optimize-runs "$2" --metadata-hash none \
+ -o "$ROOT"/contracts/solc/v0.8.19/"$contract" \
--abi --bin \
- --allow-paths $ROOT/../$FOLDER/contracts \
- $ROOT/$1
+ --allow-paths "$ROOT"/../$FOLDER/contracts \
+ "$ROOT"/"$1"
}
# OCR2VRF
diff --git a/contracts/scripts/native_solc_compile_all_operatorforwarder b/contracts/scripts/native_solc_compile_all_operatorforwarder
index 3bc5cb9249f..2d455994813 100755
--- a/contracts/scripts/native_solc_compile_all_operatorforwarder
+++ b/contracts/scripts/native_solc_compile_all_operatorforwarder
@@ -11,17 +11,20 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
compileContract () {
- solc @openzeppelin/=$ROOT/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/node_modules\
- $ROOT/contracts/src/v0.8/$1
+ local contract
+ contract=$(basename "$1" ".sol")
+
+ solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
# Contracts
diff --git a/contracts/scripts/native_solc_compile_all_shared b/contracts/scripts/native_solc_compile_all_shared
index db421f45e04..9178237b8a5 100755
--- a/contracts/scripts/native_solc_compile_all_shared
+++ b/contracts/scripts/native_solc_compile_all_shared
@@ -11,7 +11,7 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
@@ -19,10 +19,13 @@ export SOLC_VERSION=$SOLC_VERSION
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
compileContract shared/token/ERC677/BurnMintERC677.sol
diff --git a/contracts/scripts/native_solc_compile_all_transmission b/contracts/scripts/native_solc_compile_all_transmission
index e08f38e2bac..281fa7aea73 100755
--- a/contracts/scripts/native_solc_compile_all_transmission
+++ b/contracts/scripts/native_solc_compile_all_transmission
@@ -11,17 +11,20 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
compileContract () {
+ local contract
+ contract=$(basename "$1" ".sol")
+
solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8\
- $ROOT/contracts/src/v0.8/$1
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
# Contracts
diff --git a/contracts/scripts/native_solc_compile_all_vrf b/contracts/scripts/native_solc_compile_all_vrf
index 80adba8e6f7..4eed35cf5bc 100755
--- a/contracts/scripts/native_solc_compile_all_vrf
+++ b/contracts/scripts/native_solc_compile_all_vrf
@@ -11,24 +11,30 @@ OPTIMIZE_RUNS=1000000
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )"
-python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt
+python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt
solc-select install $SOLC_VERSION
solc-select use $SOLC_VERSION
export SOLC_VERSION=$SOLC_VERSION
compileContract () {
- solc @openzeppelin/=$ROOT/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/node_modules\
- $ROOT/contracts/src/v0.8/$1
+ local contract
+ contract=$(basename "$1" ".sol")
+
+ solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
compileContractAltOpts () {
- solc @openzeppelin/=$ROOT/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $2 --metadata-hash none \
- -o $ROOT/contracts/solc/v$SOLC_VERSION \
- --abi --bin --allow-paths $ROOT/contracts/src/v0.8,$ROOT/contracts/node_modules\
- $ROOT/contracts/src/v0.8/$1
+ local contract
+ contract=$(basename "$1" ".sol")
+
+ solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs "$2" --metadata-hash none \
+ -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \
+ --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\
+ "$ROOT"/contracts/src/v0.8/"$1"
}
# VRF
@@ -73,6 +79,7 @@ compileContract vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol
compileContract vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol
compileContract vrf/dev/libraries/VRFV2PlusClient.sol
compileContract vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol
+compileContract vrf/dev/BlockhashStore.sol
compileContract vrf/dev/TrustedBlockhashStore.sol
compileContract vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol
compileContractAltOpts vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol 5
diff --git a/contracts/src/v0.8/ValidatorProxy.sol b/contracts/src/v0.8/ValidatorProxy.sol
index 35909ad87de..627af73b395 100644
--- a/contracts/src/v0.8/ValidatorProxy.sol
+++ b/contracts/src/v0.8/ValidatorProxy.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.0;
import {ConfirmedOwner} from "./shared/access/ConfirmedOwner.sol";
-import {AggregatorValidatorInterface} from "./interfaces/AggregatorValidatorInterface.sol";
+import {AggregatorValidatorInterface} from "./shared/interfaces/AggregatorValidatorInterface.sol";
import {TypeAndVersionInterface} from "./interfaces/TypeAndVersionInterface.sol";
// solhint-disable custom-errors
diff --git a/contracts/src/v0.8/automation/testhelpers/KeeperBase.sol b/contracts/src/v0.8/automation/testhelpers/KeeperBase.sol
deleted file mode 100644
index 6fe41607f75..00000000000
--- a/contracts/src/v0.8/automation/testhelpers/KeeperBase.sol
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: MIT
-pragma solidity 0.8.16;
-
-contract KeeperBase {
- /**
- * @notice method that allows it to be simulated via eth_call by checking that
- * the sender is the zero address.
- */
- function preventExecution() internal view {
- require(tx.origin == address(0), "only for simulated backend");
- }
-
- /**
- * @notice modifier that allows it to be simulated via eth_call by checking
- * that the sender is the zero address.
- */
- modifier cannotExecute() {
- preventExecution();
- _;
- }
-}
diff --git a/contracts/src/v0.8/automation/testhelpers/KeeperCompatibleInterface.sol b/contracts/src/v0.8/automation/testhelpers/KeeperCompatibleInterface.sol
deleted file mode 100644
index 113f5ef6a55..00000000000
--- a/contracts/src/v0.8/automation/testhelpers/KeeperCompatibleInterface.sol
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: MIT
-
-pragma solidity 0.8.16;
-
-interface KeeperCompatibleInterface {
- /**
- * @notice method that is simulated by the keepers to see if any work actually
- * needs to be performed. This method does does not actually need to be
- * executable, and since it is only ever simulated it can consume lots of gas.
- * @dev To ensure that it is never called, you may want to add the
- * cannotExecute modifier from KeeperBase to your implementation of this
- * method.
- * @param checkData specified in the upkeep registration so it is always the
- * same for a registered upkeep. This can easily be broken down into specific
- * arguments using `abi.decode`, so multiple upkeeps can be registered on the
- * same contract and easily differentiated by the contract.
- * @return upkeepNeeded boolean to indicate whether the keeper should call
- * performUpkeep or not.
- * @return performData bytes that the keeper should call performUpkeep with, if
- * upkeep is needed. If you would like to encode data to decode later, try
- * `abi.encode`.
- */
- function checkUpkeep(bytes calldata checkData) external returns (bool upkeepNeeded, bytes memory performData);
-
- /**
- * @notice method that is actually executed by the keepers, via the registry.
- * The data returned by the checkUpkeep simulation will be passed into
- * this method to actually be executed.
- * @dev The input to this method should not be trusted, and the caller of the
- * method should not even be restricted to any single registry. Anyone should
- * be able call it, and the input should be validated, there is no guarantee
- * that the data passed in is the performData returned from checkUpkeep. This
- * could happen due to malicious keepers, racing keepers, or simply a state
- * change while the performUpkeep transaction is waiting for confirmation.
- * Always validate the data passed in.
- * @param performData is the data which was passed back from the checkData
- * simulation. If it is encoded, it can easily be decoded into other types by
- * calling `abi.decode`. This data should not be trusted, and should be
- * validated against the contract's current state.
- */
- function performUpkeep(bytes calldata performData) external;
-}
diff --git a/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol b/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol
index ba4694234a9..fb492f376c2 100644
--- a/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol
+++ b/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol
@@ -1,7 +1,7 @@
pragma solidity 0.8.16;
-import "./KeeperCompatibleInterface.sol";
-import "./KeeperBase.sol";
+import "../interfaces/KeeperCompatibleInterface.sol";
+import "../KeeperBase.sol";
contract KeeperConsumer is KeeperCompatibleInterface, KeeperBase {
uint public counter;
diff --git a/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol b/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol
index 03c57ea8e41..268942f931d 100644
--- a/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol
+++ b/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity 0.8.16;
-import "./KeeperCompatibleInterface.sol";
+import "../interfaces/KeeperCompatibleInterface.sol";
contract PerformDataChecker is KeeperCompatibleInterface {
uint256 public counter;
diff --git a/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol b/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol
index e2d42bc0666..9b9dc2d6b7d 100644
--- a/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol
+++ b/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: MIT
-pragma solidity 0.8.6;
+pragma solidity 0.8.19;
import {AutomationCompatibleInterface} from "../interfaces/AutomationCompatibleInterface.sol";
import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
-import {EnumerableMap} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableMap.sol";
import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol";
import {Pausable} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol";
@@ -35,94 +34,87 @@ interface ILinkAvailable {
/// we could save a fair amount of gas and re-write this upkeep for use with Automation v2.0+,
/// which has significantly different trust assumptions
contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationCompatibleInterface {
- using EnumerableMap for EnumerableMap.AddressToUintMap;
-
+ event BalanceUpdated(address indexed addr, uint256 oldBalance, uint256 newBalance);
event FundsWithdrawn(uint256 amountWithdrawn, address payee);
- event TopUpSucceeded(address indexed topUpAddress);
+ event UpkeepIntervalSet(uint256 oldUpkeepInterval, uint256 newUpkeepInterval);
+ event MaxCheckSet(uint256 oldMaxCheck, uint256 newMaxCheck);
+ event MaxPerformSet(uint256 oldMaxPerform, uint256 newMaxPerform);
+ event MinWaitPeriodSet(uint256 s_minWaitPeriodSeconds, uint256 minWaitPeriodSeconds);
event TopUpBlocked(address indexed topUpAddress);
+ event TopUpFailed(address indexed recipient);
+ event TopUpSucceeded(address indexed topUpAddress);
+ event TopUpUpdated(address indexed addr, uint256 oldTopUpAmount, uint256 newTopUpAmount);
event WatchlistUpdated();
- event MaxPerformUpdated(uint256 oldMaxPerform, uint256 newMaxPerform);
- event MaxCheckUpdated(uint256 oldMaxCheck, uint256 newMaxCheck);
+ error InvalidAddress(address target);
+ error InvalidMaxCheck(uint16 maxCheck);
+ error InvalixMaxPerform(uint16 maxPerform);
+ error InvalidMinBalance(uint96 minBalance);
+ error InvalidTopUpAmount(uint96 topUpAmount);
+ error InvalidUpkeepInterval(uint8 upkeepInterval);
+ error InvalidLinkTokenAddress(address lt);
error InvalidWatchList();
error DuplicateAddress(address duplicate);
+ struct MonitoredAddress {
+ uint96 minBalance;
+ uint96 topUpAmount;
+ uint56 lastTopUpTimestamp;
+ bool isActive;
+ }
+
IERC20 private immutable LINK_TOKEN;
- EnumerableMap.AddressToUintMap private s_watchList;
- uint256 private s_topUpAmount;
- uint32 private s_minWaitPeriodSeconds;
+ uint256 private s_minWaitPeriodSeconds;
uint16 private s_maxPerform;
uint16 private s_maxCheck;
+ uint8 private s_upkeepInterval;
+ address[] private s_watchList;
+ mapping(address targetAddress => MonitoredAddress targetProperties) internal s_targets;
/// @param linkTokenAddress the LINK token address
- /// @param topUpAmount the amount of LINK to top up an aggregator with at once
constructor(
address linkTokenAddress,
- uint256 topUpAmount,
+ uint256 minWaitPeriodSeconds,
uint16 maxPerform,
- uint16 maxCheck
+ uint16 maxCheck,
+ uint8 upkeepInterval
) ConfirmedOwner(msg.sender) {
- require(linkTokenAddress != address(0), "LinkAvailableBalanceMonitor: invalid linkTokenAddress");
- require(topUpAmount > 0, "LinkAvailableBalanceMonitor: invalid topUpAmount");
+ if (linkTokenAddress == address(0)) revert InvalidLinkTokenAddress(linkTokenAddress);
LINK_TOKEN = IERC20(linkTokenAddress);
- s_topUpAmount = topUpAmount;
- s_maxPerform = maxPerform;
- s_maxCheck = maxCheck;
+ setMinWaitPeriodSeconds(minWaitPeriodSeconds);
+ setMaxPerform(maxPerform);
+ setMaxCheck(maxCheck);
+ setUpkeepInterval(upkeepInterval);
}
/// @notice Sets the list of subscriptions to watch and their funding parameters
/// @param addresses the list of target addresses to watch (could be direct target or IAggregatorProxy)
/// @param minBalances the list of corresponding minBalance for the target address
- function setWatchList(address[] calldata addresses, uint256[] calldata minBalances) external onlyOwner {
- if (addresses.length != minBalances.length) {
- revert InvalidWatchList();
- }
- // first, remove all existing addresses from list
- for (uint256 idx = s_watchList.length(); idx > 0; idx--) {
- (address target, ) = s_watchList.at(idx - 1);
- require(s_watchList.remove(target), "LinkAvailableBalanceMonitor: unable to setWatchlist");
- }
- // then set new addresses
- for (uint256 idx = 0; idx < addresses.length; idx++) {
- if (s_watchList.contains(addresses[idx])) {
- revert DuplicateAddress(addresses[idx]);
- }
- if (addresses[idx] == address(0)) {
- revert InvalidWatchList();
- }
- s_watchList.set(addresses[idx], minBalances[idx]);
- }
- emit WatchlistUpdated();
- }
-
- /// @notice Adds addresses to the watchlist without overwriting existing members
- /// @param addresses the list of target addresses to watch (could be direct target or IAggregatorProxy)
- /// @param minBalances the list of corresponding minBalance for the target address
- function addToWatchList(address[] calldata addresses, uint256[] calldata minBalances) external onlyOwner {
- if (addresses.length != minBalances.length) {
+ /// @param topUpAmounts the list of corresponding minTopUp for the target address
+ function setWatchList(
+ address[] calldata addresses,
+ uint96[] calldata minBalances,
+ uint96[] calldata topUpAmounts
+ ) external onlyOwner {
+ if (addresses.length != minBalances.length || addresses.length != topUpAmounts.length) {
revert InvalidWatchList();
}
- for (uint256 idx = 0; idx < addresses.length; idx++) {
- if (s_watchList.contains(addresses[idx])) {
- revert DuplicateAddress(addresses[idx]);
- }
- if (addresses[idx] == address(0)) {
- revert InvalidWatchList();
- }
- s_watchList.set(addresses[idx], minBalances[idx]);
+ for (uint256 idx = 0; idx < s_watchList.length; idx++) {
+ delete s_targets[s_watchList[idx]];
}
- emit WatchlistUpdated();
- }
-
- /// @notice Removes addresses from the watchlist
- /// @param addresses the list of target addresses to remove from the watchlist
- function removeFromWatchlist(address[] calldata addresses) external onlyOwner {
for (uint256 idx = 0; idx < addresses.length; idx++) {
- if (!s_watchList.contains(addresses[idx])) {
- revert InvalidWatchList();
- }
- s_watchList.remove(addresses[idx]);
+ address targetAddress = addresses[idx];
+ if (s_targets[targetAddress].isActive) revert DuplicateAddress(addresses[idx]);
+ if (addresses[idx] == address(0)) revert InvalidWatchList();
+ if (topUpAmounts[idx] == 0) revert InvalidWatchList();
+ s_targets[targetAddress] = MonitoredAddress({
+ isActive: true,
+ minBalance: minBalances[idx],
+ topUpAmount: topUpAmounts[idx],
+ lastTopUpTimestamp: 0
+ });
}
+ s_watchList = addresses;
emit WatchlistUpdated();
}
@@ -135,20 +127,21 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
function sampleUnderfundedAddresses() public view returns (address[] memory) {
uint16 maxPerform = s_maxPerform;
uint16 maxCheck = s_maxCheck;
- uint256 numTargets = s_watchList.length();
- uint256 idx = uint256(blockhash(block.number - 1)) % numTargets; // start at random index, to distribute load
+ uint256 numTargets = s_watchList.length;
+ uint256 idx = uint256(blockhash(block.number - (block.number % s_upkeepInterval) - 1)) % numTargets;
uint256 numToCheck = numTargets < maxCheck ? numTargets : maxCheck;
uint256 numFound = 0;
address[] memory targetsToFund = new address[](maxPerform);
+ MonitoredAddress memory target;
for (
uint256 numChecked = 0;
numChecked < numToCheck;
(idx, numChecked) = ((idx + 1) % numTargets, numChecked + 1)
) {
- (address target, uint256 minBalance) = s_watchList.at(idx);
- (bool needsFunding, ) = _needsFunding(target, minBalance);
- if (needsFunding) {
- targetsToFund[numFound] = target;
+ address targetAddress = s_watchList[idx];
+ target = s_targets[targetAddress];
+ if (_needsFunding(targetAddress, target.minBalance)) {
+ targetsToFund[numFound] = targetAddress;
numFound++;
if (numFound == maxPerform) {
break; // max number of addresses in batch reached
@@ -163,29 +156,59 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
return targetsToFund;
}
- /// @notice Send funds to the targets provided.
- /// @param targetAddresses the list of targets to fund
function topUp(address[] memory targetAddresses) public whenNotPaused {
- uint256 topUpAmount = s_topUpAmount;
- uint256 stopIdx = targetAddresses.length;
- uint256 numCanFund = LINK_TOKEN.balanceOf(address(this)) / topUpAmount;
- stopIdx = numCanFund < stopIdx ? numCanFund : stopIdx;
- for (uint256 idx = 0; idx < stopIdx; idx++) {
- (bool exists, uint256 minBalance) = s_watchList.tryGet(targetAddresses[idx]);
- if (!exists) {
- emit TopUpBlocked(targetAddresses[idx]);
- continue;
- }
- (bool needsFunding, address target) = _needsFunding(targetAddresses[idx], minBalance);
- if (!needsFunding) {
- emit TopUpBlocked(targetAddresses[idx]);
- continue;
+ MonitoredAddress memory target;
+ uint256 localBalance = LINK_TOKEN.balanceOf(address(this));
+ for (uint256 idx = 0; idx < targetAddresses.length; idx++) {
+ address targetAddress = targetAddresses[idx];
+ target = s_targets[targetAddress];
+ if (localBalance >= target.topUpAmount && _needsFunding(targetAddress, target.minBalance)) {
+ bool success = LINK_TOKEN.transfer(targetAddress, target.topUpAmount);
+ if (success) {
+ localBalance -= target.topUpAmount;
+ target.lastTopUpTimestamp = uint56(block.timestamp);
+ emit TopUpSucceeded(targetAddress);
+ } else {
+ emit TopUpFailed(targetAddress);
+ }
+ } else {
+ emit TopUpBlocked(targetAddress);
}
- LINK_TOKEN.transfer(target, topUpAmount);
- emit TopUpSucceeded(targetAddresses[idx]);
}
}
+ /// @notice checks the target (could be direct target or IAggregatorProxy), and determines
+ /// if it is elligible for funding
+ /// @param targetAddress the target to check
+ /// @param minBalance minimum balance required for the target
+ /// @return bool whether the target needs funding or not
+ function _needsFunding(address targetAddress, uint256 minBalance) private view returns (bool) {
+ // Explicitly check if the targetAddress is the zero address
+ // or if it's not a contract. In both cases return with false,
+ // to prevent target.linkAvailableForPayment from running,
+ // which would revert the operation.
+ if (targetAddress == address(0) || targetAddress.code.length == 0) {
+ return false;
+ }
+ MonitoredAddress memory addressToCheck = s_targets[targetAddress];
+ ILinkAvailable target;
+ IAggregatorProxy proxy = IAggregatorProxy(targetAddress);
+ try proxy.aggregator() returns (address aggregatorAddress) {
+ if (aggregatorAddress == address(0)) return false;
+ target = ILinkAvailable(aggregatorAddress);
+ } catch {
+ target = ILinkAvailable(targetAddress);
+ }
+ try target.linkAvailableForPayment() returns (int256 balance) {
+ if (
+ balance < int256(minBalance) && addressToCheck.lastTopUpTimestamp + s_minWaitPeriodSeconds <= block.timestamp
+ ) {
+ return true;
+ }
+ } catch {}
+ return false;
+ }
+
/// @notice Gets list of subscription ids that are underfunded and returns a keeper-compatible payload.
/// @return upkeepNeeded signals if upkeep is needed
/// @return performData is an abi encoded list of subscription ids that need funds
@@ -193,12 +216,6 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
bytes calldata
) external view override whenNotPaused returns (bool upkeepNeeded, bytes memory performData) {
address[] memory needsFunding = sampleUnderfundedAddresses();
- uint256 numCanFund = LINK_TOKEN.balanceOf(address(this)) / s_topUpAmount;
- if (numCanFund < needsFunding.length) {
- assembly {
- mstore(needsFunding, numCanFund) // resize
- }
- }
upkeepNeeded = needsFunding.length > 0;
performData = abi.encode(needsFunding);
return (upkeepNeeded, performData);
@@ -215,38 +232,54 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
/// @param amount the amount of the LINK to withdraw
/// @param payee the address to pay
function withdraw(uint256 amount, address payable payee) external onlyOwner {
- require(payee != address(0), "LinkAvailableBalanceMonitor: invalid payee address");
+ if (payee == address(0)) revert InvalidAddress(payee);
LINK_TOKEN.transfer(payee, amount);
emit FundsWithdrawn(amount, payee);
}
- /// @notice Sets the top up amount
- function setTopUpAmount(uint256 topUpAmount) external onlyOwner returns (uint256) {
- require(topUpAmount > 0, "LinkAvailableBalanceMonitor: invalid linkTokenAddress");
- return s_topUpAmount = topUpAmount;
+ /// @notice Sets the minimum balance for the given target address
+ function setMinBalance(address target, uint96 minBalance) external onlyOwner {
+ if (target == address(0)) revert InvalidAddress(target);
+ if (minBalance == 0) revert InvalidMinBalance(minBalance);
+ if (!s_targets[target].isActive) revert InvalidWatchList();
+ uint256 oldBalance = s_targets[target].minBalance;
+ s_targets[target].minBalance = minBalance;
+ emit BalanceUpdated(target, oldBalance, minBalance);
}
/// @notice Sets the minimum balance for the given target address
- function setMinBalance(address target, uint256 minBalance) external onlyOwner returns (uint256) {
- require(minBalance > 0, "LinkAvailableBalanceMonitor: invalid minBalance");
- (bool exists, uint256 prevMinBalance) = s_watchList.tryGet(target);
- if (!exists) {
- revert InvalidWatchList();
- }
- s_watchList.set(target, minBalance);
- return prevMinBalance;
+ function setTopUpAmount(address target, uint96 topUpAmount) external onlyOwner {
+ if (target == address(0)) revert InvalidAddress(target);
+ if (topUpAmount == 0) revert InvalidTopUpAmount(topUpAmount);
+ if (!s_targets[target].isActive) revert InvalidWatchList();
+ uint256 oldTopUpAmount = s_targets[target].topUpAmount;
+ s_targets[target].topUpAmount = topUpAmount;
+ emit BalanceUpdated(target, oldTopUpAmount, topUpAmount);
}
/// @notice Update s_maxPerform
- function setMaxPerform(uint16 maxPerform) external onlyOwner {
- emit MaxPerformUpdated(s_maxPerform, maxPerform);
+ function setMaxPerform(uint16 maxPerform) public onlyOwner {
s_maxPerform = maxPerform;
+ emit MaxPerformSet(s_maxPerform, maxPerform);
}
/// @notice Update s_maxCheck
- function setMaxCheck(uint16 maxCheck) external onlyOwner {
- emit MaxCheckUpdated(s_maxCheck, maxCheck);
+ function setMaxCheck(uint16 maxCheck) public onlyOwner {
s_maxCheck = maxCheck;
+ emit MaxCheckSet(s_maxCheck, maxCheck);
+ }
+
+ /// @notice Sets the minimum wait period (in seconds) for addresses between funding
+ function setMinWaitPeriodSeconds(uint256 minWaitPeriodSeconds) public onlyOwner {
+ s_minWaitPeriodSeconds = minWaitPeriodSeconds;
+ emit MinWaitPeriodSet(s_minWaitPeriodSeconds, minWaitPeriodSeconds);
+ }
+
+ /// @notice Update s_upkeepInterval
+ function setUpkeepInterval(uint8 upkeepInterval) public onlyOwner {
+ if (upkeepInterval > 255) revert InvalidUpkeepInterval(upkeepInterval);
+ s_upkeepInterval = upkeepInterval;
+ emit UpkeepIntervalSet(s_upkeepInterval, upkeepInterval);
}
/// @notice Gets maxPerform
@@ -259,31 +292,27 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
return s_maxCheck;
}
- /// @notice Gets the list of subscription ids being watched
- function getWatchList() external view returns (address[] memory, uint256[] memory) {
- uint256 len = s_watchList.length();
- address[] memory targets = new address[](len);
- uint256[] memory minBalances = new uint256[](len);
-
- for (uint256 idx = 0; idx < len; idx++) {
- (targets[idx], minBalances[idx]) = s_watchList.at(idx);
- }
+ /// @notice Gets the minimum wait period
+ function getMinWaitPeriodSeconds() external view returns (uint256) {
+ return s_minWaitPeriodSeconds;
+ }
- return (targets, minBalances);
+ /// @notice Gets upkeepInterval
+ function getUpkeepInterval() external view returns (uint8) {
+ return s_upkeepInterval;
}
- /// @notice Gets the configured top up amount
- function getTopUpAmount() external view returns (uint256) {
- return s_topUpAmount;
+ /// @notice Gets the list of subscription ids being watched
+ function getWatchList() external view returns (address[] memory) {
+ return s_watchList;
}
- /// @notice Gets the configured minimum balance for the given target
- function getMinBalance(address target) external view returns (uint256) {
- (bool exists, uint256 minBalance) = s_watchList.tryGet(target);
- if (!exists) {
- revert InvalidWatchList();
- }
- return minBalance;
+ /// @notice Gets configuration information for an address on the watchlist
+ function getAccountInfo(
+ address targetAddress
+ ) external view returns (bool isActive, uint256 minBalance, uint256 topUpAmount) {
+ MonitoredAddress memory target = s_targets[targetAddress];
+ return (target.isActive, target.minBalance, target.topUpAmount);
}
/// @notice Pause the contract, which prevents executing performUpkeep
@@ -295,26 +324,4 @@ contract LinkAvailableBalanceMonitor is ConfirmedOwner, Pausable, AutomationComp
function unpause() external onlyOwner {
_unpause();
}
-
- /// @notice checks the target (could be direct target or IAggregatorProxy), and determines
- /// if it is elligible for funding
- /// @param targetAddress the target to check
- /// @param minBalance minimum balance required for the target
- /// @return bool whether the target needs funding or not
- /// @return address the address of the contract needing funding
- function _needsFunding(address targetAddress, uint256 minBalance) private view returns (bool, address) {
- ILinkAvailable target;
- IAggregatorProxy proxy = IAggregatorProxy(targetAddress);
- try proxy.aggregator() returns (address aggregatorAddress) {
- target = ILinkAvailable(aggregatorAddress);
- } catch {
- target = ILinkAvailable(targetAddress);
- }
- try target.linkAvailableForPayment() returns (int256 balance) {
- if (balance < 0 || uint256(balance) < minBalance) {
- return (true, address(target));
- }
- } catch {}
- return (false, address(0));
- }
}
diff --git a/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol b/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol
index 262b8357f7a..2fa1ee6188b 100644
--- a/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol
+++ b/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol
@@ -7,7 +7,7 @@ import "@openzeppelin/contracts/security/Pausable.sol";
import "@openzeppelin/contracts/security/ReentrancyGuard.sol";
import "../KeeperBase.sol";
import "../../interfaces/TypeAndVersionInterface.sol";
-import "../../interfaces/AggregatorV3Interface.sol";
+import "../../shared/interfaces/AggregatorV3Interface.sol";
import "../interfaces/KeeperCompatibleInterface.sol";
import "../interfaces/v1_2/KeeperRegistryInterface1_2.sol";
import "../interfaces/MigratableKeeperRegistryInterface.sol";
diff --git a/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol b/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol
index 6328b651671..c21f3a73912 100644
--- a/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol
+++ b/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol
@@ -8,7 +8,7 @@ import "../../vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_
import "../ExecutionPrevention.sol";
import {Config, Upkeep} from "../interfaces/v1_3/AutomationRegistryInterface1_3.sol";
import "../../shared/access/ConfirmedOwner.sol";
-import "../../interfaces/AggregatorV3Interface.sol";
+import "../../shared/interfaces/AggregatorV3Interface.sol";
import "../../shared/interfaces/LinkTokenInterface.sol";
import "../interfaces/KeeperCompatibleInterface.sol";
import "../interfaces/UpkeepTranscoderInterface.sol";
diff --git a/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol b/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol
index 14e9b204475..9b78e5806ff 100644
--- a/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol
+++ b/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol
@@ -7,7 +7,7 @@ import "../../vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_
import {ArbSys} from "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol";
import "../ExecutionPrevention.sol";
import "../../shared/access/ConfirmedOwner.sol";
-import "../../interfaces/AggregatorV3Interface.sol";
+import "../../shared/interfaces/AggregatorV3Interface.sol";
import "../../shared/interfaces/LinkTokenInterface.sol";
import "../interfaces/KeeperCompatibleInterface.sol";
import "../interfaces/UpkeepTranscoderInterface.sol";
diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol
index 389222668a8..c0d34d4303e 100644
--- a/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol
+++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol
@@ -11,7 +11,7 @@ import {StreamsLookupCompatibleInterface} from "../interfaces/StreamsLookupCompa
import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol";
import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol";
import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
-import {AggregatorV3Interface} from "../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol";
import {KeeperCompatibleInterface} from "../interfaces/KeeperCompatibleInterface.sol";
import {UpkeepFormat} from "../interfaces/UpkeepTranscoderInterface.sol";
@@ -407,8 +407,8 @@ abstract contract KeeperRegistryBase2_1 is ConfirmedOwner, ExecutionPrevention {
struct ChainConfig {
uint256 fastGas;
uint256 linkNative;
- uint256 l1GasCost; // 0 for L1
uint256 executionL1GasCost;
+ uint256 estimatedL1GasCost;
}
event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig);
@@ -533,29 +533,29 @@ abstract contract KeeperRegistryBase2_1 is ConfirmedOwner, ExecutionPrevention {
* for gas it takes the min of gas price in the transaction or the fast gas
* price in order to reduce costs for the upkeep clients.
*/
- function _getFeedData(HotVars memory hotVars) internal view returns (uint256 gasWei, uint256 linkNative) {
- uint32 stalenessSeconds = hotVars.stalenessSeconds;
- bool staleFallback = stalenessSeconds > 0;
- uint256 timestamp;
- int256 feedValue;
- (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData();
- if (
- feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
- ) {
- gasWei = s_fallbackGasPrice;
- } else {
- gasWei = uint256(feedValue);
- }
- (, feedValue, , timestamp, ) = i_linkNativeFeed.latestRoundData();
- if (
- feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
- ) {
- linkNative = s_fallbackLinkPrice;
- } else {
- linkNative = uint256(feedValue);
- }
- return (gasWei, linkNative);
- }
+// function _getFeedData(HotVars memory hotVars) internal view returns (uint256 gasWei, uint256 linkNative) {
+// uint32 stalenessSeconds = hotVars.stalenessSeconds;
+// bool staleFallback = stalenessSeconds > 0;
+// uint256 timestamp;
+// int256 feedValue;
+// (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData();
+// if (
+// feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
+// ) {
+// gasWei = s_fallbackGasPrice;
+// } else {
+// gasWei = uint256(feedValue);
+// }
+// (, feedValue, , timestamp, ) = i_linkNativeFeed.latestRoundData();
+// if (
+// feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp)
+// ) {
+// linkNative = s_fallbackLinkPrice;
+// } else {
+// linkNative = uint256(feedValue);
+// }
+// return (gasWei, linkNative);
+// }
/**
* @dev calculates LINK paid for gas spent plus a configure premium percentage
@@ -581,7 +581,7 @@ abstract contract KeeperRegistryBase2_1 is ConfirmedOwner, ExecutionPrevention {
uint256 l1CostWei;
// if it's not performing upkeeps, use gas ceiling multiplier to estimate the upper bound
if (!isExecution) {
- l1CostWei = hotVars.gasCeilingMultiplier * cfg.l1CostWei;
+ l1CostWei = hotVars.gasCeilingMultiplier * cfg.estimatedL1GasCost;
} else {
l1CostWei = cfg.executionL1GasCost;
}
diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol
index 3739b2a3c72..0b540f1b815 100644
--- a/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol
+++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol
@@ -56,9 +56,10 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
bytes memory performData,
UpkeepFailureReason upkeepFailureReason,
uint256 gasUsed,
- uint256 gasLimit,
- uint256 fastGasWei,
- uint256 linkNative
+ uint256 gasLimit//,
+ // likely these 2 values don't need to be returned anymore. otherwise, we can just pass the values from chain config here.
+ // uint256 fastGasWei,
+ // uint256 linkNative
)
{
Trigger triggerType = _getTriggerType(id);
@@ -70,7 +71,7 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
return (false, bytes(""), UpkeepFailureReason.UPKEEP_CANCELLED, 0, upkeep.performGas, 0, 0);
if (upkeep.paused) return (false, bytes(""), UpkeepFailureReason.UPKEEP_PAUSED, 0, upkeep.performGas, 0, 0);
- (fastGasWei, linkNative) = _getFeedData(hotVars);
+ // (fastGasWei, linkNative) = _getFeedData(hotVars);
uint96 maxLinkPayment = _getMaxLinkPayment(
hotVars,
cfg,
@@ -99,9 +100,9 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
bytes(""),
UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT,
gasUsed,
- upkeep.performGas,
- fastGasWei,
- linkNative
+ upkeep.performGas//,
+ //fastGasWei,
+ //linkNative
);
}
return (
@@ -109,9 +110,9 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
result,
UpkeepFailureReason.TARGET_CHECK_REVERTED,
gasUsed,
- upkeep.performGas,
- fastGasWei,
- linkNative
+ upkeep.performGas//,
+ //fastGasWei,
+ //linkNative
);
}
@@ -122,9 +123,9 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
bytes(""),
UpkeepFailureReason.UPKEEP_NOT_NEEDED,
gasUsed,
- upkeep.performGas,
- fastGasWei,
- linkNative
+ upkeep.performGas//,
+ //fastGasWei,
+ //linkNative
);
if (performData.length > s_storage.maxPerformDataSize)
@@ -133,12 +134,12 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
bytes(""),
UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT,
gasUsed,
- upkeep.performGas,
- fastGasWei,
- linkNative
+ upkeep.performGas//,
+ //fastGasWei,
+ //linkNative
);
- return (upkeepNeeded, performData, upkeepFailureReason, gasUsed, upkeep.performGas, fastGasWei, linkNative);
+ return (upkeepNeeded, performData, upkeepFailureReason, gasUsed, upkeep.performGas/*, fastGasWei, linkNative*/);
}
/**
@@ -155,9 +156,9 @@ contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable {
bytes memory performData,
UpkeepFailureReason upkeepFailureReason,
uint256 gasUsed,
- uint256 gasLimit,
- uint256 fastGasWei,
- uint256 linkNative
+ uint256 gasLimit//,
+ //uint256 fastGasWei,
+ //uint256 linkNative
)
{
return checkUpkeep(id, bytes(""), cfg);
diff --git a/contracts/src/v0.8/dev/shared/interfaces/OwnableInterface.sol b/contracts/src/v0.8/dev/shared/interfaces/OwnableInterface.sol
deleted file mode 100644
index a24cbee504c..00000000000
--- a/contracts/src/v0.8/dev/shared/interfaces/OwnableInterface.sol
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.0;
-
-interface OwnableInterface {
- function owner() external returns (address);
-
- function transferOwnership(address recipient) external;
-
- function acceptOwnership() external;
-}
diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol
index ed67d485431..cb4f2f45677 100644
--- a/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol
+++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.19;
import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol";
-import {AggregatorV3Interface} from "../../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol";
import {IFunctionsBilling} from "./interfaces/IFunctionsBilling.sol";
import {Routable} from "./Routable.sol";
@@ -10,6 +10,8 @@ import {FunctionsResponse} from "./libraries/FunctionsResponse.sol";
import {SafeCast} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol";
+import {ChainSpecificUtil} from "./libraries/ChainSpecificUtil.sol";
+
/// @title Functions Billing contract
/// @notice Contract that calculates payment from users to the nodes of the Decentralized Oracle Network (DON).
abstract contract FunctionsBilling is Routable, IFunctionsBilling {
@@ -18,6 +20,15 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
using FunctionsResponse for FunctionsResponse.FulfillResult;
uint256 private constant REASONABLE_GAS_PRICE_CEILING = 1_000_000_000_000_000; // 1 million gwei
+
+ event RequestBilled(
+ bytes32 indexed requestId,
+ uint96 juelsPerGas,
+ uint256 l1FeeShareWei,
+ uint96 callbackCostJuels,
+ uint96 totalCostJuels
+ );
+
// ================================================================
// | Request Commitment state |
// ================================================================
@@ -123,10 +134,10 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
return uint256(weiPerUnitLink);
}
- function _getJuelsPerGas(uint256 gasPriceWei) private view returns (uint96) {
- // (1e18 juels/link) * (wei/gas) / (wei/link) = juels per gas
+ function _getJuelsFromWei(uint256 amountWei) private view returns (uint96) {
+ // (1e18 juels/link) * wei / (wei/link) = juels
// There are only 1e9*1e18 = 1e27 juels in existence, should not exceed uint96 (2^96 ~ 7e28)
- return SafeCast.toUint96((1e18 * gasPriceWei) / getWeiPerUnitLink());
+ return SafeCast.toUint96((1e18 * amountWei) / getWeiPerUnitLink());
}
// ================================================================
@@ -159,8 +170,6 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
uint72 donFee,
uint72 adminFee
) internal view returns (uint96) {
- uint256 executionGas = s_config.gasOverheadBeforeCallback + s_config.gasOverheadAfterCallback + callbackGasLimit;
-
// If gas price is less than the minimum fulfillment gas price, override to using the minimum
if (gasPriceWei < s_config.minimumEstimateGasPriceWei) {
gasPriceWei = s_config.minimumEstimateGasPriceWei;
@@ -170,11 +179,13 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
((gasPriceWei * s_config.fulfillmentGasPriceOverEstimationBP) / 10_000);
/// @NOTE: Basis Points are 1/100th of 1%, divide by 10_000 to bring back to original units
- uint96 juelsPerGas = _getJuelsPerGas(gasPriceWithOverestimation);
- uint256 estimatedGasReimbursement = juelsPerGas * executionGas;
- uint96 fees = uint96(donFee) + uint96(adminFee);
+ uint256 executionGas = s_config.gasOverheadBeforeCallback + s_config.gasOverheadAfterCallback + callbackGasLimit;
+ uint256 l1FeeWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data);
+ uint96 estimatedGasReimbursementJuels = _getJuelsFromWei((gasPriceWithOverestimation * executionGas) + l1FeeWei);
+
+ uint96 feesJuels = uint96(donFee) + uint96(adminFee);
- return SafeCast.toUint96(estimatedGasReimbursement + fees);
+ return estimatedGasReimbursementJuels + feesJuels;
}
// ================================================================
@@ -248,6 +259,7 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
/// @param requestId identifier for the request that was generated by the Registry in the beginBilling commitment
/// @param response response data from DON consensus
/// @param err error from DON consensus
+ /// @param reportBatchSize the number of fulfillments in the transmitter's report
/// @return result fulfillment result
/// @dev Only callable by a node that has been approved on the Coordinator
/// @dev simulated offchain to determine if sufficient balance is present to fulfill the request
@@ -256,21 +268,23 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
bytes memory response,
bytes memory err,
bytes memory onchainMetadata,
- bytes memory /* offchainMetadata TODO: use in getDonFee() for dynamic billing */
+ bytes memory /* offchainMetadata TODO: use in getDonFee() for dynamic billing */,
+ uint8 reportBatchSize
) internal returns (FunctionsResponse.FulfillResult) {
FunctionsResponse.Commitment memory commitment = abi.decode(onchainMetadata, (FunctionsResponse.Commitment));
- uint96 juelsPerGas = _getJuelsPerGas(tx.gasprice);
+ uint256 gasOverheadWei = (commitment.gasOverheadBeforeCallback + commitment.gasOverheadAfterCallback) * tx.gasprice;
+ uint256 l1FeeShareWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data) / reportBatchSize;
// Gas overhead without callback
- uint96 gasOverheadJuels = juelsPerGas *
- (commitment.gasOverheadBeforeCallback + commitment.gasOverheadAfterCallback);
+ uint96 gasOverheadJuels = _getJuelsFromWei(gasOverheadWei + l1FeeShareWei);
+ uint96 juelsPerGas = _getJuelsFromWei(tx.gasprice);
// The Functions Router will perform the callback to the client contract
(FunctionsResponse.FulfillResult resultCode, uint96 callbackCostJuels) = _getRouter().fulfill(
response,
err,
juelsPerGas,
- gasOverheadJuels + commitment.donFee, // costWithoutFulfillment
+ gasOverheadJuels + commitment.donFee, // cost without callback or admin fee, those will be added by the Router
msg.sender,
commitment
);
@@ -288,6 +302,13 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
// Put donFee into the pool of fees, to be split later
// Saves on storage writes that would otherwise be charged to the user
s_feePool += commitment.donFee;
+ emit RequestBilled({
+ requestId: requestId,
+ juelsPerGas: juelsPerGas,
+ l1FeeShareWei: l1FeeShareWei,
+ callbackCostJuels: callbackCostJuels,
+ totalCostJuels: gasOverheadJuels + callbackCostJuels + commitment.donFee + commitment.adminFee
+ });
}
return resultCode;
@@ -353,15 +374,16 @@ abstract contract FunctionsBilling is Routable, IFunctionsBilling {
// All transmitters are assumed to also be observers
// Pay out the DON fee to all transmitters
address[] memory transmitters = _getTransmitters();
- if (transmitters.length == 0) {
+ uint256 numberOfTransmitters = transmitters.length;
+ if (numberOfTransmitters == 0) {
revert NoTransmittersSet();
}
- uint96 feePoolShare = s_feePool / uint96(transmitters.length);
+ uint96 feePoolShare = s_feePool / uint96(numberOfTransmitters);
// Bounded by "maxNumOracles" on OCR2Abstract.sol
- for (uint256 i = 0; i < transmitters.length; ++i) {
+ for (uint256 i = 0; i < numberOfTransmitters; ++i) {
s_withdrawableTokens[transmitters[i]] += feePoolShare;
}
- s_feePool -= feePoolShare * uint96(transmitters.length);
+ s_feePool -= feePoolShare * uint96(numberOfTransmitters);
}
// Overriden in FunctionsCoordinator.sol
diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol
index eb0d954ae02..16e9029ce3f 100644
--- a/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol
+++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol
@@ -44,7 +44,7 @@ contract FunctionsCoordinator is OCR2Base, IFunctionsCoordinator, FunctionsBilli
address router,
Config memory config,
address linkToNativeFeed
- ) OCR2Base(true) FunctionsBilling(router, config, linkToNativeFeed) {}
+ ) OCR2Base() FunctionsBilling(router, config, linkToNativeFeed) {}
/// @inheritdoc IFunctionsCoordinator
function getThresholdPublicKey() external view override returns (bytes memory) {
@@ -133,30 +133,36 @@ contract FunctionsCoordinator is OCR2Base, IFunctionsCoordinator, FunctionsBilli
address[MAX_NUM_ORACLES] memory /*signers*/,
bytes calldata report
) internal override {
- bytes32[] memory requestIds;
- bytes[] memory results;
- bytes[] memory errors;
- bytes[] memory onchainMetadata;
- bytes[] memory offchainMetadata;
- (requestIds, results, errors, onchainMetadata, offchainMetadata) = abi.decode(
- report,
- (bytes32[], bytes[], bytes[], bytes[], bytes[])
- );
+ (
+ bytes32[] memory requestIds,
+ bytes[] memory results,
+ bytes[] memory errors,
+ bytes[] memory onchainMetadata,
+ bytes[] memory offchainMetadata
+ ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[]));
+ uint256 numberOfFulfillments = uint8(requestIds.length);
if (
- requestIds.length == 0 ||
- requestIds.length != results.length ||
- requestIds.length != errors.length ||
- requestIds.length != onchainMetadata.length ||
- requestIds.length != offchainMetadata.length
+ numberOfFulfillments == 0 ||
+ numberOfFulfillments != results.length ||
+ numberOfFulfillments != errors.length ||
+ numberOfFulfillments != onchainMetadata.length ||
+ numberOfFulfillments != offchainMetadata.length
) {
- revert ReportInvalid();
+ revert ReportInvalid("Fields must be equal length");
}
// Bounded by "MaxRequestBatchSize" on the Job's ReportingPluginConfig
- for (uint256 i = 0; i < requestIds.length; ++i) {
+ for (uint256 i = 0; i < numberOfFulfillments; ++i) {
FunctionsResponse.FulfillResult result = FunctionsResponse.FulfillResult(
- _fulfillAndBill(requestIds[i], results[i], errors[i], onchainMetadata[i], offchainMetadata[i])
+ _fulfillAndBill(
+ requestIds[i],
+ results[i],
+ errors[i],
+ onchainMetadata[i],
+ offchainMetadata[i],
+ uint8(numberOfFulfillments) // will not exceed "MaxRequestBatchSize" on the Job's ReportingPluginConfig
+ )
);
// Emit on successfully processing the fulfillment
diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol
index 39b84a930aa..f6d7880da3e 100644
--- a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol
+++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol
@@ -5,6 +5,4 @@ import {IFunctionsRouter} from "./IFunctionsRouter.sol";
import {IOwnable} from "../../../../shared/interfaces/IOwnable.sol";
/// @title Chainlink Functions Router interface with Ownability.
-interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {
-
-}
+interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {}
diff --git a/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol b/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol
new file mode 100644
index 00000000000..d6569a256bf
--- /dev/null
+++ b/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.19;
+
+import {ArbGasInfo} from "../../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol";
+import {OVM_GasPriceOracle} from "../../../../vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol";
+
+/// @dev A library that abstracts out opcodes that behave differently across chains.
+/// @dev The methods below return values that are pertinent to the given chain.
+library ChainSpecificUtil {
+ // ------------ Start Arbitrum Constants ------------
+
+ /// @dev ARBGAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum.
+ /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10
+ address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C);
+ ArbGasInfo private constant ARBGAS = ArbGasInfo(ARBGAS_ADDR);
+
+ uint256 private constant ARB_MAINNET_CHAIN_ID = 42161;
+ uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613;
+ uint256 private constant ARB_SEPOLIA_TESTNET_CHAIN_ID = 421614;
+
+ // ------------ End Arbitrum Constants ------------
+
+ // ------------ Start Optimism Constants ------------
+ /// @dev L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism
+ bytes internal constant L1_FEE_DATA_PADDING =
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff";
+ /// @dev OVM_GASPRICEORACLE_ADDR is the address of the OVM_GasPriceOracle precompile on Optimism.
+ /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee
+ address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F);
+ OVM_GasPriceOracle private constant OVM_GASPRICEORACLE = OVM_GasPriceOracle(OVM_GASPRICEORACLE_ADDR);
+
+ uint256 private constant OP_MAINNET_CHAIN_ID = 10;
+ uint256 private constant OP_GOERLI_CHAIN_ID = 420;
+ uint256 private constant OP_SEPOLIA_CHAIN_ID = 11155420;
+
+ /// @dev Base is a OP stack based rollup and follows the same L1 pricing logic as Optimism.
+ uint256 private constant BASE_MAINNET_CHAIN_ID = 8453;
+ uint256 private constant BASE_GOERLI_CHAIN_ID = 84531;
+ uint256 private constant BASE_SEPOLIA_CHAIN_ID = 84532;
+
+ // ------------ End Optimism Constants ------------
+
+ /// @notice Returns the L1 fees in wei that will be paid for the current transaction, given any calldata
+ /// @notice for the current transaction.
+ /// @notice When on a known Arbitrum chain, it uses ArbGas.getCurrentTxL1GasFees to get the fees.
+ /// @notice On Arbitrum, the provided calldata is not used to calculate the fees.
+ /// @notice On Optimism, the provided calldata is passed to the OVM_GasPriceOracle predeploy
+ /// @notice and getL1Fee is called to get the fees.
+ function _getCurrentTxL1GasFees(bytes memory txCallData) internal view returns (uint256 l1FeeWei) {
+ uint256 chainid = block.chainid;
+ if (_isArbitrumChainId(chainid)) {
+ return ARBGAS.getCurrentTxL1GasFees();
+ } else if (_isOptimismChainId(chainid)) {
+ return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(txCallData, L1_FEE_DATA_PADDING));
+ }
+ return 0;
+ }
+
+ /// @notice Return true if and only if the provided chain ID is an Arbitrum chain ID.
+ function _isArbitrumChainId(uint256 chainId) internal pure returns (bool) {
+ return
+ chainId == ARB_MAINNET_CHAIN_ID ||
+ chainId == ARB_GOERLI_TESTNET_CHAIN_ID ||
+ chainId == ARB_SEPOLIA_TESTNET_CHAIN_ID;
+ }
+
+ /// @notice Return true if and only if the provided chain ID is an Optimism (or Base) chain ID.
+ /// @notice Note that optimism chain id's are also OP stack chain id's.
+ function _isOptimismChainId(uint256 chainId) internal pure returns (bool) {
+ return
+ chainId == OP_MAINNET_CHAIN_ID ||
+ chainId == OP_GOERLI_CHAIN_ID ||
+ chainId == OP_SEPOLIA_CHAIN_ID ||
+ chainId == BASE_MAINNET_CHAIN_ID ||
+ chainId == BASE_GOERLI_CHAIN_ID ||
+ chainId == BASE_SEPOLIA_CHAIN_ID;
+ }
+}
diff --git a/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol
index dd9ea84a519..375159bf4c9 100644
--- a/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol
+++ b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol
@@ -10,17 +10,10 @@ import {OCR2Abstract} from "./OCR2Abstract.sol";
* doc, which refers to this contract as simply the "contract".
*/
abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract {
- error ReportInvalid();
+ error ReportInvalid(string message);
error InvalidConfig(string message);
- bool internal immutable i_uniqueReports;
-
- constructor(bool uniqueReports) ConfirmedOwner(msg.sender) {
- i_uniqueReports = uniqueReports;
- }
-
- // solhint-disable-next-line chainlink-solidity/all-caps-constant-storage-variables
- uint256 private constant maxUint32 = (1 << 32) - 1;
+ constructor() ConfirmedOwner(msg.sender) {}
// incremented each time a new config is posted. This count is incorporated
// into the config digest, to prevent replay attacks.
@@ -144,12 +137,12 @@ abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract {
// Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol
for (uint256 i = 0; i < args.signers.length; i++) {
+ if (args.signers[i] == address(0)) revert InvalidConfig("signer must not be empty");
+ if (args.transmitters[i] == address(0)) revert InvalidConfig("transmitter must not be empty");
// add new signer/transmitter addresses
- // solhint-disable-next-line custom-errors
- require(s_oracles[args.signers[i]].role == Role.Unset, "repeated signer address");
+ if (s_oracles[args.signers[i]].role != Role.Unset) revert InvalidConfig("repeated signer address");
s_oracles[args.signers[i]] = Oracle(uint8(i), Role.Signer);
- // solhint-disable-next-line custom-errors
- require(s_oracles[args.transmitters[i]].role == Role.Unset, "repeated transmitter address");
+ if (s_oracles[args.transmitters[i]].role != Role.Unset) revert InvalidConfig("repeated transmitter address");
s_oracles[args.transmitters[i]] = Oracle(uint8(i), Role.Transmitter);
s_signers.push(args.signers[i]);
s_transmitters.push(args.transmitters[i]);
@@ -287,8 +280,7 @@ abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract {
ss.length *
32 + // 32 bytes per entry in _ss
0; // placeholder
- // solhint-disable-next-line custom-errors
- require(msg.data.length == expected, "calldata length mismatch");
+ if (msg.data.length != expected) revert ReportInvalid("calldata length mismatch");
}
/**
@@ -319,30 +311,20 @@ abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract {
emit Transmitted(configDigest, uint32(epochAndRound >> 8));
- ConfigInfo memory configInfo = s_configInfo;
- // solhint-disable-next-line custom-errors
- require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch");
+ // The following check is disabled to allow both current and proposed routes to submit reports using the same OCR config digest
+ // Chainlink Functions uses globally unique request IDs. Metadata about the request is stored and checked in the Coordinator and Router
+ // require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch");
_requireExpectedMsgDataLength(report, rs, ss);
- uint256 expectedNumSignatures;
- if (i_uniqueReports) {
- expectedNumSignatures = (configInfo.n + configInfo.f) / 2 + 1;
- } else {
- expectedNumSignatures = configInfo.f + 1;
- }
+ uint256 expectedNumSignatures = (s_configInfo.n + s_configInfo.f) / 2 + 1;
- // solhint-disable-next-line custom-errors
- require(rs.length == expectedNumSignatures, "wrong number of signatures");
- // solhint-disable-next-line custom-errors
- require(rs.length == ss.length, "signatures out of registration");
+ if (rs.length != expectedNumSignatures) revert ReportInvalid("wrong number of signatures");
+ if (rs.length != ss.length) revert ReportInvalid("report rs and ss must be of equal length");
Oracle memory transmitter = s_oracles[msg.sender];
- // solhint-disable-next-line custom-errors
- require( // Check that sender is authorized to report
- transmitter.role == Role.Transmitter && msg.sender == s_transmitters[transmitter.index],
- "unauthorized transmitter"
- );
+ if (transmitter.role != Role.Transmitter && msg.sender != s_transmitters[transmitter.index])
+ revert ReportInvalid("unauthorized transmitter");
}
address[MAX_NUM_ORACLES] memory signed;
@@ -357,10 +339,8 @@ abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract {
for (uint256 i = 0; i < rs.length; ++i) {
address signer = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]);
o = s_oracles[signer];
- // solhint-disable-next-line custom-errors
- require(o.role == Role.Signer, "address not authorized to sign");
- // solhint-disable-next-line custom-errors
- require(signed[o.index] == address(0), "non-unique signature");
+ if (o.role != Role.Signer) revert ReportInvalid("address not authorized to sign");
+ if (signed[o.index] != address(0)) revert ReportInvalid("non-unique signature");
signed[o.index] = signer;
signerCount += 1;
}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol b/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol
new file mode 100644
index 00000000000..5384a66d912
--- /dev/null
+++ b/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.19;
+
+import {BaseTest} from "./BaseTest.t.sol";
+import {FunctionsClient} from "../../dev/v1_X/FunctionsClient.sol";
+import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol";
+import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol";
+import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol";
+import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol";
+
+import {FunctionsFulfillmentSetup} from "./Setup.t.sol";
+
+import {ArbGasInfo} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol";
+import {OVM_GasPriceOracle} from "../../../vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol";
+
+/// @notice #_getCurrentTxL1GasFees Arbitrum
+/// @dev Arbitrum gas formula = L2 Gas Price * (Gas used on L2 + Extra Buffer for L1 cost)
+/// @dev where Extra Buffer for L1 cost = (L1 Estimated Cost / L2 Gas Price)
+contract ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum is FunctionsFulfillmentSetup {
+ address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C);
+ uint256 private constant L1_FEE_WEI = 15_818_209_764_247;
+
+ uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(LINK_ETH_RATE));
+
+ function setUp() public virtual override {
+ vm.mockCall(ARBGAS_ADDR, abi.encodeWithSelector(ArbGasInfo.getCurrentTxL1GasFees.selector), abi.encode(L1_FEE_WEI));
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenArbitrumMainnet() public {
+ // Set the chainID
+ vm.chainId(42161);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenArbitrumGoerli() public {
+ // Set the chainID
+ vm.chainId(421613);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenArbitrumSepolia() public {
+ // Set the chainID
+ vm.chainId(421614);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+}
+
+/// @notice #_getCurrentTxL1GasFees Optimism
+/// @dev Optimism gas formula = ((l2_base_fee + l2_priority_fee) * l2_gas_used) + L1 data fee
+/// @dev where L1 data fee = l1_gas_price * ((count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16) + fixed_overhead + noncalldata_gas) * dynamic_overhead
+contract ChainSpecificUtil__getCurrentTxL1GasFees_Optimism is FunctionsFulfillmentSetup {
+ address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F);
+ uint256 private constant L1_FEE_WEI = 15_818_209_764_247;
+
+ uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(LINK_ETH_RATE));
+
+ function setUp() public virtual override {
+ vm.mockCall(
+ OVM_GASPRICEORACLE_ADDR,
+ abi.encodeWithSelector(OVM_GasPriceOracle.getL1Fee.selector),
+ abi.encode(L1_FEE_WEI)
+ );
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenOptimismMainnet() public {
+ // Set the chainID
+ vm.chainId(10);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenOptimismGoerli() public {
+ // Set the chainID
+ vm.chainId(420);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenOptimismSepolia() public {
+ // Set the chainID
+ vm.chainId(11155420);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+}
+
+/// @notice #_getCurrentTxL1GasFees Base
+/// @dev Base gas formula uses Optimism formula = ((l2_base_fee + l2_priority_fee) * l2_gas_used) + L1 data fee
+/// @dev where L1 data fee = l1_gas_price * ((count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16) + fixed_overhead + noncalldata_gas) * dynamic_overhead
+contract ChainSpecificUtil__getCurrentTxL1GasFees_Base is FunctionsFulfillmentSetup {
+ address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F);
+ uint256 private constant L1_FEE_WEI = 15_818_209_764_247;
+
+ uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(LINK_ETH_RATE));
+
+ function setUp() public virtual override {
+ vm.mockCall(
+ OVM_GASPRICEORACLE_ADDR,
+ abi.encodeWithSelector(OVM_GasPriceOracle.getL1Fee.selector),
+ abi.encode(L1_FEE_WEI)
+ );
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenBaseMainnet() public {
+ // Set the chainID
+ vm.chainId(8453);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenBaseGoerli() public {
+ // Set the chainID
+ vm.chainId(84531);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+
+ function test__getCurrentTxL1GasFees_SuccessWhenBaseSepolia() public {
+ // Set the chainID
+ vm.chainId(84532);
+
+ // Setup sends and fulfills request #1
+ FunctionsFulfillmentSetup.setUp();
+
+ // Check request cost estimate
+ uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) +
+ l1FeeJuels;
+ assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels);
+
+ // Check response actual cost
+ uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels;
+ assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels);
+ }
+}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol
index 82dea8672c8..6e94e4fc5f7 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol
@@ -4,6 +4,7 @@ pragma solidity ^0.8.19;
import {FunctionsCoordinator} from "../../dev/v1_X/FunctionsCoordinator.sol";
import {FunctionsBilling} from "../../dev/v1_X/FunctionsBilling.sol";
import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol";
+import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol";
import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol";
import {Routable} from "../../dev/v1_X/Routable.sol";
@@ -221,8 +222,55 @@ contract FunctionsBilling__StartBilling {
}
/// @notice #_fulfillAndBill
-contract FunctionsBilling__FulfillAndBill {
- // TODO: make contract internal function helper
+contract FunctionsBilling__FulfillAndBill is FunctionsClientRequestSetup {
+ function test__FulfillAndBill_RevertIfInvalidCommitment() public {
+ vm.expectRevert();
+ s_functionsCoordinator.fulfillAndBill_HARNESS(
+ s_requests[1].requestId,
+ new bytes(0),
+ new bytes(0),
+ new bytes(0), // malformed commitment data
+ new bytes(0),
+ 1
+ );
+ }
+
+ event RequestBilled(
+ bytes32 indexed requestId,
+ uint96 juelsPerGas,
+ uint256 l1FeeShareWei,
+ uint96 callbackCostJuels,
+ uint96 totalCostJuels
+ );
+
+ function test__FulfillAndBill_Success() public {
+ uint96 juelsPerGas = uint96((1e18 * TX_GASPRICE_START) / uint256(LINK_ETH_RATE));
+ uint96 callbackCostGas = 5072; // Taken manually
+ uint96 callbackCostJuels = juelsPerGas * callbackCostGas;
+ uint96 gasOverheadJuels = juelsPerGas *
+ (getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback);
+
+ uint96 totalCostJuels = gasOverheadJuels + callbackCostJuels + s_donFee + s_adminFee;
+
+ // topic0 (function signature, always checked), check topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true).
+ bool checkTopic1 = true;
+ bool checkTopic2 = false;
+ bool checkTopic3 = false;
+ bool checkData = true;
+ vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData);
+ emit RequestBilled(s_requests[1].requestId, juelsPerGas, 0, callbackCostJuels, totalCostJuels);
+
+ FunctionsResponse.FulfillResult resultCode = s_functionsCoordinator.fulfillAndBill_HARNESS(
+ s_requests[1].requestId,
+ new bytes(0),
+ new bytes(0),
+ abi.encode(s_requests[1].commitment),
+ new bytes(0),
+ 1
+ );
+
+ assertEq(uint256(resultCode), uint256(FunctionsResponse.FulfillResult.FULFILLED));
+ }
}
/// @notice #deleteCommitment
diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol
index 7166add19fe..f6d3d41e632 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol
@@ -10,7 +10,6 @@ import {Routable} from "../../dev/v1_X/Routable.sol";
import {BaseTest} from "./BaseTest.t.sol";
import {FunctionsRouterSetup, FunctionsDONSetup, FunctionsSubscriptionSetup} from "./Setup.t.sol";
-import "forge-std/console.sol";
/// @notice #constructor
contract FunctionsCoordinator_Constructor is FunctionsRouterSetup {
diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol
index 5457a221b61..e9684d9f5b3 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol
@@ -30,31 +30,19 @@ contract FunctionsRequest_EncodeCBOR is Test {
}
/// @notice #initializeRequest
-contract FunctionsRequest_InitializeRequest is Test {
-
-}
+contract FunctionsRequest_InitializeRequest is Test {}
/// @notice #initializeRequestForInlineJavaScript
-contract FunctionsRequest_InitializeRequestForInlineJavaScript is Test {
-
-}
+contract FunctionsRequest_InitializeRequestForInlineJavaScript is Test {}
/// @notice #addSecretsReference
-contract FunctionsRequest_AddSecretsReference is Test {
-
-}
+contract FunctionsRequest_AddSecretsReference is Test {}
/// @notice #addDONHostedSecrets
-contract FunctionsRequest_AddDONHostedSecrets is Test {
-
-}
+contract FunctionsRequest_AddDONHostedSecrets is Test {}
/// @notice #setArgs
-contract FunctionsRequest_SetArgs is Test {
-
-}
+contract FunctionsRequest_SetArgs is Test {}
/// @notice #setBytesArgs
-contract FunctionsRequest_SetBytesArgs is Test {
-
-}
+contract FunctionsRequest_SetBytesArgs is Test {}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol
index 8f08a6c1e86..5a54bcc84ca 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol
@@ -309,11 +309,22 @@ contract FunctionsSubscriptions_OwnerWithdraw is FunctionsFulfillmentSetup {
}
/// @notice #onTokenTransfer
-contract FunctionsSubscriptions_OnTokenTransfer is FunctionsSubscriptionSetup {
+contract FunctionsSubscriptions_OnTokenTransfer is FunctionsClientSetup {
+ uint64 s_subscriptionId;
+
+ function setUp() public virtual override {
+ FunctionsClientSetup.setUp();
+
+ // Create subscription, but do not fund it
+ s_subscriptionId = s_functionsRouter.createSubscription();
+ s_functionsRouter.addConsumer(s_subscriptionId, address(s_functionsClient));
+ }
+
function test_OnTokenTransfer_RevertIfPaused(uint96 fundingAmount) public {
// Funding amount must be less than LINK total supply
- vm.assume(fundingAmount < 1_000_000_000 * 1e18);
- vm.assume(fundingAmount > 0);
+ uint256 totalSupplyJuels = 1_000_000_000 * 1e18;
+ vm.assume(fundingAmount <= totalSupplyJuels);
+ vm.assume(fundingAmount >= 0);
s_functionsRouter.pause();
vm.expectRevert("Pausable: paused");
@@ -322,8 +333,9 @@ contract FunctionsSubscriptions_OnTokenTransfer is FunctionsSubscriptionSetup {
function test_OnTokenTransfer_RevertIfCallerIsNotLink(uint96 fundingAmount) public {
// Funding amount must be less than LINK total supply
- vm.assume(fundingAmount < 1_000_000_000 * 1e18);
- vm.assume(fundingAmount > 0);
+ uint256 totalSupplyJuels = 1_000_000_000 * 1e18;
+ vm.assume(fundingAmount <= totalSupplyJuels);
+ vm.assume(fundingAmount >= 0);
vm.expectRevert(FunctionsSubscriptions.OnlyCallableFromLink.selector);
s_functionsRouter.onTokenTransfer(address(s_functionsRouter), fundingAmount, abi.encode(s_subscriptionId));
@@ -331,8 +343,9 @@ contract FunctionsSubscriptions_OnTokenTransfer is FunctionsSubscriptionSetup {
function test_OnTokenTransfer_RevertIfCallerIsNoCalldata(uint96 fundingAmount) public {
// Funding amount must be less than LINK total supply
- vm.assume(fundingAmount < 1_000_000_000 * 1e18);
- vm.assume(fundingAmount > 0);
+ uint256 totalSupplyJuels = 1_000_000_000 * 1e18;
+ vm.assume(fundingAmount <= totalSupplyJuels);
+ vm.assume(fundingAmount >= 0);
vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector);
s_linkToken.transferAndCall(address(s_functionsRouter), fundingAmount, new bytes(0));
@@ -340,8 +353,9 @@ contract FunctionsSubscriptions_OnTokenTransfer is FunctionsSubscriptionSetup {
function test_OnTokenTransfer_RevertIfCallerIsNoSubscription(uint96 fundingAmount) public {
// Funding amount must be less than LINK total supply
- vm.assume(fundingAmount < 1_000_000_000 * 1e18);
- vm.assume(fundingAmount > 0);
+ uint256 totalSupplyJuels = 1_000_000_000 * 1e18;
+ vm.assume(fundingAmount <= totalSupplyJuels);
+ vm.assume(fundingAmount >= 0);
vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector);
uint64 invalidSubscriptionId = 123456789;
@@ -349,17 +363,15 @@ contract FunctionsSubscriptions_OnTokenTransfer is FunctionsSubscriptionSetup {
}
function test_OnTokenTransfer_Success(uint96 fundingAmount) public {
- uint96 subscriptionBalanceBefore = s_functionsRouter.getSubscription(s_subscriptionId).balance;
-
// Funding amount must be less than LINK total supply
- uint96 TOTAL_LINK = 1_000_000_000 * 1e18;
+ uint256 totalSupplyJuels = 1_000_000_000 * 1e18;
// Some of the total supply is already in the subscription account
- vm.assume(fundingAmount < TOTAL_LINK - subscriptionBalanceBefore);
- vm.assume(fundingAmount > 0);
+ vm.assume(fundingAmount <= totalSupplyJuels);
+ vm.assume(fundingAmount >= 0);
s_linkToken.transferAndCall(address(s_functionsRouter), fundingAmount, abi.encode(s_subscriptionId));
uint96 subscriptionBalanceAfter = s_functionsRouter.getSubscription(s_subscriptionId).balance;
- assertEq(subscriptionBalanceBefore + fundingAmount, subscriptionBalanceAfter);
+ assertEq(fundingAmount, subscriptionBalanceAfter);
}
}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol b/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol
index 55ab3810b41..f2d7af54e4f 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol
@@ -154,14 +154,6 @@ contract Gas_SendRequest is FunctionsSubscriptionSetup {
/// @notice #fulfillRequest
contract FunctionsClient_FulfillRequest is FunctionsClientRequestSetup {
- struct Report {
- bytes32[] rs;
- bytes32[] ss;
- bytes32 vs;
- bytes report;
- bytes32[3] reportContext;
- }
-
mapping(uint256 reportNumber => Report) s_reports;
FunctionsClientTestHelper s_functionsClientWithMaximumReturnData;
diff --git a/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol b/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol
index 745ad4f0ae9..3dc0db85a47 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol
@@ -6,39 +6,25 @@ pragma solidity ^0.8.19;
// ================================================================
/// @notice #constructor
-contract OCR2Base_Constructor {
-
-}
+contract OCR2Base_Constructor {}
/// @notice #checkConfigValid
-contract OCR2Base_CheckConfigValid {
-
-}
+contract OCR2Base_CheckConfigValid {}
/// @notice #latestConfigDigestAndEpoch
-contract OCR2Base_LatestConfigDigestAndEpoch {
-
-}
+contract OCR2Base_LatestConfigDigestAndEpoch {}
/// @notice #setConfig
-contract OCR2Base_SetConfig {
-
-}
+contract OCR2Base_SetConfig {}
/// @notice #configDigestFromConfigData
-contract OCR2Base_ConfigDigestFromConfigData {
-
-}
+contract OCR2Base_ConfigDigestFromConfigData {}
/// @notice #latestConfigDetails
-contract OCR2Base_LatestConfigDetails {
-
-}
+contract OCR2Base_LatestConfigDetails {}
/// @notice #transmitters
-contract OCR2Base_Transmitters {
-
-}
+contract OCR2Base_Transmitters {}
/// @notice #_report
contract OCR2Base__Report {
@@ -46,11 +32,7 @@ contract OCR2Base__Report {
}
/// @notice #requireExpectedMsgDataLength
-contract OCR2Base_RequireExpectedMsgDataLength {
-
-}
+contract OCR2Base_RequireExpectedMsgDataLength {}
/// @notice #transmit
-contract OCR2Base_Transmit {
-
-}
+contract OCR2Base_Transmit {}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol b/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol
index 0c08fd20cd3..97418958bc2 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol
@@ -225,6 +225,14 @@ contract FunctionsSubscriptionSetup is FunctionsClientSetup {
/// @notice Set up to initate a minimal request and store it in s_requests[1]
contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
+ struct Report {
+ bytes32[] rs;
+ bytes32[] ss;
+ bytes32 vs;
+ bytes report;
+ bytes32[3] reportContext;
+ }
+
struct RequestData {
string sourceCode;
bytes secrets;
@@ -240,6 +248,12 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
mapping(uint256 requestNumber => Request) s_requests;
+ struct Response {
+ uint96 totalCostJuels;
+ }
+
+ mapping(uint256 requestNumber => Response) s_responses;
+
uint96 s_fulfillmentRouterOwnerBalance = 0;
uint96 s_fulfillmentCoordinatorBalance = 0;
@@ -255,7 +269,24 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
_sendAndStoreRequest(1, sourceCode, secrets, args, bytesArgs, callbackGasLimit);
}
- function _getExpectedCost(uint256 gasUsed) internal view returns (uint96 totalCostJuels) {
+ /// @notice Predicts the estimated cost (maximum cost) of a request
+ /// @dev Meant only for Ethereum, does not add L2 chains' L1 fee
+ function _getExpectedCostEstimate(uint256 callbackGas) internal view returns (uint96) {
+ uint256 gasPrice = TX_GASPRICE_START < getCoordinatorConfig().minimumEstimateGasPriceWei
+ ? getCoordinatorConfig().minimumEstimateGasPriceWei
+ : TX_GASPRICE_START;
+ uint256 gasPriceWithOverestimation = gasPrice +
+ ((gasPrice * getCoordinatorConfig().fulfillmentGasPriceOverEstimationBP) / 10_000);
+ uint96 juelsPerGas = uint96((1e18 * gasPriceWithOverestimation) / uint256(LINK_ETH_RATE));
+ uint96 gasOverheadJuels = juelsPerGas *
+ ((getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback));
+ uint96 callbackGasCostJuels = uint96(juelsPerGas * callbackGas);
+ return gasOverheadJuels + s_donFee + s_adminFee + callbackGasCostJuels;
+ }
+
+ /// @notice Predicts the actual cost of a request
+ /// @dev Meant only for Ethereum, does not add L2 chains' L1 fee
+ function _getExpectedCost(uint256 gasUsed) internal view returns (uint96) {
uint96 juelsPerGas = uint96((1e18 * TX_GASPRICE_START) / uint256(LINK_ETH_RATE));
uint96 gasOverheadJuels = juelsPerGas *
(getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback);
@@ -400,7 +431,7 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
bytes memory report,
bytes32[3] memory reportContext,
uint256[] memory signerPrivateKeys
- ) internal pure returns (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) {
+ ) internal pure returns (bytes32[] memory, bytes32[] memory, bytes32) {
bytes32[] memory rs = new bytes32[](signerPrivateKeys.length);
bytes32[] memory ss = new bytes32[](signerPrivateKeys.length);
bytes memory vs = new bytes(signerPrivateKeys.length);
@@ -417,13 +448,35 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
return (rs, ss, bytes32(vs));
}
+ function _buildAndSignReport(
+ uint256[] memory requestNumberKeys,
+ string[] memory results,
+ bytes[] memory errors
+ ) internal view returns (Report memory) {
+ (bytes memory report, bytes32[3] memory reportContext) = _buildReport(requestNumberKeys, results, errors);
+
+ // Sign the report
+ // Need at least 3 signers to fulfill minimum number of: (configInfo.n + configInfo.f) / 2 + 1
+ uint256[] memory signerPrivateKeys = new uint256[](3);
+ signerPrivateKeys[0] = NOP_SIGNER_PRIVATE_KEY_1;
+ signerPrivateKeys[1] = NOP_SIGNER_PRIVATE_KEY_2;
+ signerPrivateKeys[2] = NOP_SIGNER_PRIVATE_KEY_3;
+ (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) = _signReport(
+ report,
+ reportContext,
+ signerPrivateKeys
+ );
+
+ return Report({report: report, reportContext: reportContext, rs: rawRs, ss: rawSs, vs: rawVs});
+ }
+
/// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin
/// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report
/// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled.
/// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled.
/// @param transmitter - The address that will send the `.report` transaction
/// @param expectedToSucceed - Boolean representing if the report transmission is expected to produce a RequestProcessed event for every fulfillment. If not, we ignore retrieving the event log.
- /// @param requestProcessedIndex - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback)
+ /// @param requestProcessedStartIndex - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback)
/// @param transmitterGasToUse - Override the default amount of gas that the transmitter sends the `.report` transaction with
function _reportAndStore(
uint256[] memory requestNumberKeys,
@@ -431,7 +484,7 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
bytes[] memory errors,
address transmitter,
bool expectedToSucceed,
- uint8 requestProcessedIndex,
+ uint8 requestProcessedStartIndex,
uint256 transmitterGasToUse
) internal {
{
@@ -440,19 +493,7 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
}
}
- (bytes memory report, bytes32[3] memory reportContext) = _buildReport(requestNumberKeys, results, errors);
-
- // Sign the report
- // Need at least 3 signers to fulfill minimum number of: (configInfo.n + configInfo.f) / 2 + 1
- uint256[] memory signerPrivateKeys = new uint256[](3);
- signerPrivateKeys[0] = NOP_SIGNER_PRIVATE_KEY_1;
- signerPrivateKeys[1] = NOP_SIGNER_PRIVATE_KEY_2;
- signerPrivateKeys[2] = NOP_SIGNER_PRIVATE_KEY_3;
- (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) = _signReport(
- report,
- reportContext,
- signerPrivateKeys
- );
+ Report memory r = _buildAndSignReport(requestNumberKeys, results, errors);
// Send as transmitter
vm.stopPrank();
@@ -461,20 +502,24 @@ contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup {
// Send report
vm.recordLogs();
if (transmitterGasToUse > 0) {
- s_functionsCoordinator.transmit{gas: transmitterGasToUse}(reportContext, report, rawRs, rawSs, rawVs);
+ s_functionsCoordinator.transmit{gas: transmitterGasToUse}(r.reportContext, r.report, r.rs, r.ss, r.vs);
} else {
- s_functionsCoordinator.transmit(reportContext, report, rawRs, rawSs, rawVs);
+ s_functionsCoordinator.transmit(r.reportContext, r.report, r.rs, r.ss, r.vs);
}
if (expectedToSucceed) {
// Get actual cost from RequestProcessed event log
(uint96 totalCostJuels, , , , , ) = abi.decode(
- vm.getRecordedLogs()[requestProcessedIndex].data,
+ vm.getRecordedLogs()[requestProcessedStartIndex].data,
(uint96, address, FunctionsResponse.FulfillResult, bytes, bytes, bytes)
);
+ // Store response of first request
+ // TODO: handle multiple requests
+ s_responses[requestNumberKeys[0]] = Response({totalCostJuels: totalCostJuels});
// Store profit amounts
- s_fulfillmentRouterOwnerBalance += s_adminFee;
+ s_fulfillmentRouterOwnerBalance += s_adminFee * uint96(requestNumberKeys.length);
// totalCostJuels = costWithoutCallbackJuels + adminFee + callbackGasCostJuels
+ // TODO: handle multiple requests
s_fulfillmentCoordinatorBalance += totalCostJuels - s_adminFee;
}
diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol
index bc103fc3561..c1b6d5d0b14 100644
--- a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol
+++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol
@@ -79,9 +79,10 @@ contract FunctionsCoordinatorHarness is FunctionsCoordinator {
bytes memory response,
bytes memory err,
bytes memory onchainMetadata,
- bytes memory offchainMetadata
+ bytes memory offchainMetadata,
+ uint8 reportBatchSize
) external returns (FunctionsResponse.FulfillResult) {
- return super._fulfillAndBill(requestId, response, err, onchainMetadata, offchainMetadata);
+ return super._fulfillAndBill(requestId, response, err, onchainMetadata, offchainMetadata, reportBatchSize);
}
function disperseFeePool_HARNESS() external {
diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol
index 5168bdc01ed..8de53dd9c04 100644
--- a/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol
+++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.19;
import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol";
-import {AggregatorV3Interface} from "../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
import {IFunctionsBilling} from "./interfaces/IFunctionsBilling.sol";
import {Routable} from "./Routable.sol";
diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol
index 89eb48022be..c5f3d82677e 100644
--- a/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol
+++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol
@@ -5,6 +5,4 @@ import {IFunctionsRouter} from "./IFunctionsRouter.sol";
import {IOwnable} from "../../../shared/interfaces/IOwnable.sol";
/// @title Chainlink Functions Router interface with Ownability.
-interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {
-
-}
+interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {}
diff --git a/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol b/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol
index 1d2367d82a8..f3272174ae2 100644
--- a/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol
+++ b/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.0;
pragma abicoder v2;
-import {AggregatorV2V3Interface} from "./AggregatorV2V3Interface.sol";
+import {AggregatorV2V3Interface} from "../shared/interfaces/AggregatorV2V3Interface.sol";
interface FeedRegistryInterface {
struct Phase {
diff --git a/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol b/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol
index 1eb6cba932d..5dc73619afc 100644
--- a/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol
+++ b/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol
@@ -10,6 +10,4 @@ import {DelegateForwarderInterface} from "./interfaces/DelegateForwarderInterfac
* @dev Any other L2 contract which uses this contract's address as a privileged position,
* can consider that position to be held by the `l1Owner`
*/
-abstract contract CrossDomainDelegateForwarder is DelegateForwarderInterface, CrossDomainOwnable {
-
-}
+abstract contract CrossDomainDelegateForwarder is DelegateForwarderInterface, CrossDomainOwnable {}
diff --git a/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol b/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol
index 1f9066c5054..8f218f66b80 100644
--- a/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol
+++ b/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol
@@ -10,6 +10,4 @@ import {ForwarderInterface} from "./interfaces/ForwarderInterface.sol";
* @dev Any other L2 contract which uses this contract's address as a privileged position,
* can consider that position to be held by the `l1Owner`
*/
-abstract contract CrossDomainForwarder is ForwarderInterface, CrossDomainOwnable {
-
-}
+abstract contract CrossDomainForwarder is ForwarderInterface, CrossDomainOwnable {}
diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol
index 6d8d31a8085..5250fbda278 100644
--- a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol
+++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol
@@ -2,9 +2,9 @@
pragma solidity ^0.8.4;
import {AddressAliasHelper} from "../../../vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol";
-import {AggregatorInterface} from "../../../interfaces/AggregatorInterface.sol";
-import {AggregatorV3Interface} from "../../../interfaces/AggregatorV3Interface.sol";
-import {AggregatorV2V3Interface} from "../../../interfaces/AggregatorV2V3Interface.sol";
+import {AggregatorInterface} from "../../../shared/interfaces/AggregatorInterface.sol";
+import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol";
+import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol";
import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol";
import {FlagsInterface} from "../interfaces/FlagsInterface.sol";
import {ArbitrumSequencerUptimeFeedInterface} from "../interfaces/ArbitrumSequencerUptimeFeedInterface.sol";
diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol
index 3b5fd277e56..2043ffa6287 100644
--- a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol
+++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import {AggregatorValidatorInterface} from "../../../interfaces/AggregatorValidatorInterface.sol";
+import {AggregatorValidatorInterface} from "../../../shared/interfaces/AggregatorValidatorInterface.sol";
import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol";
import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol";
import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol";
diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol
index b522a600bab..fcf6093e3cd 100644
--- a/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol
+++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.4;
-import {AggregatorInterface} from "../../../interfaces/AggregatorInterface.sol";
-import {AggregatorV3Interface} from "../../../interfaces/AggregatorV3Interface.sol";
-import {AggregatorV2V3Interface} from "../../../interfaces/AggregatorV2V3Interface.sol";
+import {AggregatorInterface} from "../../../shared/interfaces/AggregatorInterface.sol";
+import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol";
+import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol";
import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol";
import {OptimismSequencerUptimeFeedInterface} from "./../interfaces/OptimismSequencerUptimeFeedInterface.sol";
import {SimpleReadAccessController} from "../../../shared/access/SimpleReadAccessController.sol";
diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol
index a955b7e92ca..e41c61a4536 100644
--- a/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol
+++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import {AggregatorValidatorInterface} from "../../../interfaces/AggregatorValidatorInterface.sol";
+import {AggregatorValidatorInterface} from "../../../shared/interfaces/AggregatorValidatorInterface.sol";
import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol";
import {OptimismSequencerUptimeFeedInterface} from "./../interfaces/OptimismSequencerUptimeFeedInterface.sol";
diff --git a/contracts/src/v0.8/llo-feeds/FeeManager.sol b/contracts/src/v0.8/llo-feeds/FeeManager.sol
index 397605d9b2e..c9981045a4a 100644
--- a/contracts/src/v0.8/llo-feeds/FeeManager.sol
+++ b/contracts/src/v0.8/llo-feeds/FeeManager.sol
@@ -5,7 +5,7 @@ import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol";
import {IFeeManager} from "./interfaces/IFeeManager.sol";
import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol";
import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../libraries/Common.sol";
+import {Common} from "./libraries/Common.sol";
import {IRewardManager} from "./interfaces/IRewardManager.sol";
import {IWERC20} from "../shared/interfaces/IWERC20.sol";
import {IERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol";
diff --git a/contracts/src/v0.8/llo-feeds/RewardManager.sol b/contracts/src/v0.8/llo-feeds/RewardManager.sol
index 3777b432fcc..596755142e8 100644
--- a/contracts/src/v0.8/llo-feeds/RewardManager.sol
+++ b/contracts/src/v0.8/llo-feeds/RewardManager.sol
@@ -5,7 +5,7 @@ import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol";
import {IRewardManager} from "./interfaces/IRewardManager.sol";
import {IERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol";
import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol";
-import {Common} from "../libraries/Common.sol";
+import {Common} from "./libraries/Common.sol";
import {SafeERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol";
/**
diff --git a/contracts/src/v0.8/llo-feeds/Verifier.sol b/contracts/src/v0.8/llo-feeds/Verifier.sol
index f7ce156a60b..3e668c09ff0 100644
--- a/contracts/src/v0.8/llo-feeds/Verifier.sol
+++ b/contracts/src/v0.8/llo-feeds/Verifier.sol
@@ -6,7 +6,7 @@ import {IVerifier} from "./interfaces/IVerifier.sol";
import {IVerifierProxy} from "./interfaces/IVerifierProxy.sol";
import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol";
import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../libraries/Common.sol";
+import {Common} from "./libraries/Common.sol";
// OCR2 standard
uint256 constant MAX_NUM_ORACLES = 31;
diff --git a/contracts/src/v0.8/llo-feeds/VerifierProxy.sol b/contracts/src/v0.8/llo-feeds/VerifierProxy.sol
index 6abb2b78e98..a35c54573c1 100644
--- a/contracts/src/v0.8/llo-feeds/VerifierProxy.sol
+++ b/contracts/src/v0.8/llo-feeds/VerifierProxy.sol
@@ -8,7 +8,7 @@ import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol
import {AccessControllerInterface} from "../shared/interfaces/AccessControllerInterface.sol";
import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
import {IVerifierFeeManager} from "./interfaces/IVerifierFeeManager.sol";
-import {Common} from "../libraries/Common.sol";
+import {Common} from "./libraries/Common.sol";
/**
* The verifier proxy contract is the gateway for all report verification requests
diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol
index 08373a6a5bc..e006f0254eb 100644
--- a/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol
+++ b/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../../libraries/Common.sol";
+import {Common} from "../libraries/Common.sol";
import {IVerifierFeeManager} from "./IVerifierFeeManager.sol";
interface IFeeManager is IERC165, IVerifierFeeManager {
diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol
index a76366a3eb1..7a4d4216715 100644
--- a/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol
+++ b/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../../libraries/Common.sol";
+import {Common} from "../libraries/Common.sol";
interface IRewardManager is IERC165 {
/**
diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol
index 9b9ba2e6570..9e1e6d314cd 100644
--- a/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol
+++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../../libraries/Common.sol";
+import {Common} from "../libraries/Common.sol";
interface IVerifier is IERC165 {
/**
diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol
index e5a73e612cb..323b8a2cf00 100644
--- a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol
+++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../../libraries/Common.sol";
+import {Common} from "../libraries/Common.sol";
interface IVerifierFeeManager is IERC165 {
/**
diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol
index c2665261e9a..d86bb46dd9c 100644
--- a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol
+++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity 0.8.16;
-import {Common} from "../../libraries/Common.sol";
+import {Common} from "../libraries/Common.sol";
import {AccessControllerInterface} from "../../shared/interfaces/AccessControllerInterface.sol";
import {IVerifierFeeManager} from "./IVerifierFeeManager.sol";
diff --git a/contracts/src/v0.8/libraries/ByteUtil.sol b/contracts/src/v0.8/llo-feeds/libraries/ByteUtil.sol
similarity index 100%
rename from contracts/src/v0.8/libraries/ByteUtil.sol
rename to contracts/src/v0.8/llo-feeds/libraries/ByteUtil.sol
diff --git a/contracts/src/v0.8/libraries/Common.sol b/contracts/src/v0.8/llo-feeds/libraries/Common.sol
similarity index 100%
rename from contracts/src/v0.8/libraries/Common.sol
rename to contracts/src/v0.8/llo-feeds/libraries/Common.sol
diff --git a/contracts/src/v0.8/libraries/test/ByteUtilTest.t.sol b/contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol
similarity index 99%
rename from contracts/src/v0.8/libraries/test/ByteUtilTest.t.sol
rename to contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol
index 0629d0235ee..b4e87364ac9 100644
--- a/contracts/src/v0.8/libraries/test/ByteUtilTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {Test} from "forge-std/Test.sol";
-import {ByteUtil} from "../ByteUtil.sol";
+import {ByteUtil} from "../libraries/ByteUtil.sol";
contract ByteUtilTest is Test {
using ByteUtil for bytes;
diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol
index ec2611f9e46..db0b3d8b3d9 100644
--- a/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol
@@ -4,7 +4,7 @@ pragma solidity 0.8.16;
import {Test} from "forge-std/Test.sol";
import {FeeManager} from "../../FeeManager.sol";
import {RewardManager} from "../../RewardManager.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol";
import {WERC20Mock} from "../../../shared/mocks/WERC20Mock.sol";
import {IRewardManager} from "../../interfaces/IRewardManager.sol";
diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol
index 801a1e39925..6a24806353d 100644
--- a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity 0.8.16;
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import "./BaseFeeManager.t.sol";
/**
diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol
index 9c2bd711ed5..e0093b88a4f 100644
--- a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity 0.8.16;
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import "./BaseFeeManager.t.sol";
import {IRewardManager} from "../../interfaces/IRewardManager.sol";
diff --git a/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol
index 3198576e8aa..6938437b013 100644
--- a/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol
@@ -3,7 +3,7 @@ pragma solidity 0.8.16;
import {BaseTest, BaseTestWithConfiguredVerifierAndFeeManager} from "../verifier/BaseVerifierTest.t.sol";
import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import {IRewardManager} from "../../interfaces/IRewardManager.sol";
contract Verifier_setConfig is BaseTest {
diff --git a/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol b/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol
index a0a404d88d7..770b7b809d0 100644
--- a/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol
+++ b/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {IVerifier} from "../../interfaces/IVerifier.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
contract ErroredVerifier is IVerifier {
function supportsInterface(bytes4 interfaceId) public pure override returns (bool) {
diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol
index 3e50adef95c..a9953d73c74 100644
--- a/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol
@@ -4,7 +4,7 @@ pragma solidity 0.8.16;
import {Test} from "forge-std/Test.sol";
import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol";
import {RewardManager} from "../../RewardManager.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import {IRewardManager} from "../../interfaces/IRewardManager.sol";
/**
diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol
index 9a3749d1dde..a6c98c03031 100644
--- a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
/**
* @title BaseRewardManagerTest
diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol
index 0e45ba00da4..a8cf6260f5c 100644
--- a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
/**
* @title BaseRewardManagerTest
diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol
index 4b3063ac016..b1836e0fb93 100644
--- a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
/**
* @title BaseRewardManagerTest
diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol
index 91e0f9da906..34e2090115d 100644
--- a/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol
@@ -10,7 +10,7 @@ import {Verifier} from "../../Verifier.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol";
import {FeeManager} from "../../FeeManager.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol";
import {WERC20Mock} from "../../../shared/mocks/WERC20Mock.sol";
import {FeeManager} from "../../FeeManager.sol";
diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol
index a6b23d7e8b4..17fc49979e7 100644
--- a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol
@@ -5,7 +5,7 @@ import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.
import {IVerifier} from "../../interfaces/IVerifier.sol";
import {VerifierProxy} from "../../VerifierProxy.sol";
import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
contract VerifierProxyInitializeVerifierTest is BaseTestWithConfiguredVerifierAndFeeManager {
function test_revertsIfNotCorrectVerifier() public {
diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol
index 6c5eac9b6dd..ba3acce0eec 100644
--- a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol
@@ -2,7 +2,7 @@
pragma solidity 0.8.16;
import {BaseTest, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
contract VerifierSetConfigFromSourceTest is BaseTest {
function setUp() public virtual override {
diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol
index f0b045e7f30..374a976786b 100644
--- a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol
@@ -3,7 +3,7 @@ pragma solidity 0.8.16;
import {BaseTest, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol";
import {Verifier} from "../../Verifier.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
contract VerifierSetConfigTest is BaseTest {
function setUp() public virtual override {
diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol
index b4fcac75d3a..34e02bcfb95 100644
--- a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol
+++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol
@@ -5,7 +5,7 @@ import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.
import {Verifier} from "../../Verifier.sol";
import {VerifierProxy} from "../../VerifierProxy.sol";
import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol";
-import {Common} from "../../../libraries/Common.sol";
+import {Common} from "../../libraries/Common.sol";
contract VerifierVerifyTest is BaseTestWithConfiguredVerifierAndFeeManager {
bytes32[3] internal s_reportContext;
diff --git a/contracts/src/v0.8/mocks/MockAggregatorValidator.sol b/contracts/src/v0.8/mocks/MockAggregatorValidator.sol
index a43236de9ff..bdc935cd231 100644
--- a/contracts/src/v0.8/mocks/MockAggregatorValidator.sol
+++ b/contracts/src/v0.8/mocks/MockAggregatorValidator.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import "../interfaces/AggregatorValidatorInterface.sol";
+import "../shared/interfaces/AggregatorValidatorInterface.sol";
contract MockAggregatorValidator is AggregatorValidatorInterface {
uint8 immutable id;
diff --git a/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol
index 04d2635d583..bc5f1c0e7e4 100644
--- a/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol
+++ b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity 0.8.19;
-import {AuthorizedReceiverInterface} from "../../interfaces/AuthorizedReceiverInterface.sol";
+import {AuthorizedReceiverInterface} from "./interfaces/AuthorizedReceiverInterface.sol";
// solhint-disable custom-errors
abstract contract AuthorizedReceiver is AuthorizedReceiverInterface {
diff --git a/contracts/src/v0.8/operatorforwarder/dev/Operator.sol b/contracts/src/v0.8/operatorforwarder/dev/Operator.sol
index b83996a9ed9..c8451bf03ce 100644
--- a/contracts/src/v0.8/operatorforwarder/dev/Operator.sol
+++ b/contracts/src/v0.8/operatorforwarder/dev/Operator.sol
@@ -5,7 +5,7 @@ import {AuthorizedReceiver} from "./AuthorizedReceiver.sol";
import {LinkTokenReceiver} from "./LinkTokenReceiver.sol";
import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol";
-import {AuthorizedReceiverInterface} from "../../interfaces/AuthorizedReceiverInterface.sol";
+import {AuthorizedReceiverInterface} from "./interfaces/AuthorizedReceiverInterface.sol";
import {OperatorInterface} from "../../interfaces/OperatorInterface.sol";
import {IOwnable} from "../../shared/interfaces/IOwnable.sol";
import {WithdrawalInterface} from "./interfaces/WithdrawalInterface.sol";
diff --git a/contracts/src/v0.8/interfaces/AuthorizedReceiverInterface.sol b/contracts/src/v0.8/operatorforwarder/dev/interfaces/AuthorizedReceiverInterface.sol
similarity index 100%
rename from contracts/src/v0.8/interfaces/AuthorizedReceiverInterface.sol
rename to contracts/src/v0.8/operatorforwarder/dev/interfaces/AuthorizedReceiverInterface.sol
diff --git a/contracts/src/v0.8/interfaces/AggregatorInterface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorInterface.sol
similarity index 100%
rename from contracts/src/v0.8/interfaces/AggregatorInterface.sol
rename to contracts/src/v0.8/shared/interfaces/AggregatorInterface.sol
diff --git a/contracts/src/v0.8/interfaces/AggregatorV2V3Interface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol
similarity index 100%
rename from contracts/src/v0.8/interfaces/AggregatorV2V3Interface.sol
rename to contracts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol
diff --git a/contracts/src/v0.8/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorV3Interface.sol
similarity index 100%
rename from contracts/src/v0.8/interfaces/AggregatorV3Interface.sol
rename to contracts/src/v0.8/shared/interfaces/AggregatorV3Interface.sol
diff --git a/contracts/src/v0.8/interfaces/AggregatorValidatorInterface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorValidatorInterface.sol
similarity index 100%
rename from contracts/src/v0.8/interfaces/AggregatorValidatorInterface.sol
rename to contracts/src/v0.8/shared/interfaces/AggregatorValidatorInterface.sol
diff --git a/contracts/src/v0.8/shared/token/ERC677/ERC677.sol b/contracts/src/v0.8/shared/token/ERC677/ERC677.sol
index 9a68bac3a11..aa75a1170c7 100644
--- a/contracts/src/v0.8/shared/token/ERC677/ERC677.sol
+++ b/contracts/src/v0.8/shared/token/ERC677/ERC677.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.0;
import {IERC677} from "./IERC677.sol";
-import {IERC677Receiver} from "./IERC677Receiver.sol";
+import {IERC677Receiver} from "../../interfaces/IERC677Receiver.sol";
import {Address} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol";
import {ERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol";
diff --git a/contracts/src/v0.8/shared/token/ERC677/IERC677Receiver.sol b/contracts/src/v0.8/shared/token/ERC677/IERC677Receiver.sol
deleted file mode 100644
index 2e44d860a82..00000000000
--- a/contracts/src/v0.8/shared/token/ERC677/IERC677Receiver.sol
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: MIT
-pragma solidity ^0.8.0;
-
-interface IERC677Receiver {
- function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external;
-}
diff --git a/contracts/src/v0.8/tests/FeedConsumer.sol b/contracts/src/v0.8/tests/FeedConsumer.sol
index 3c9462b0ac5..c9fc62357a6 100644
--- a/contracts/src/v0.8/tests/FeedConsumer.sol
+++ b/contracts/src/v0.8/tests/FeedConsumer.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import {AggregatorV2V3Interface} from "../interfaces/AggregatorV2V3Interface.sol";
+import {AggregatorV2V3Interface} from "../shared/interfaces/AggregatorV2V3Interface.sol";
contract FeedConsumer {
AggregatorV2V3Interface public immutable AGGREGATOR;
diff --git a/contracts/src/v0.8/tests/MockETHLINKAggregator.sol b/contracts/src/v0.8/tests/MockETHLINKAggregator.sol
index 98dd775a117..d685aac7314 100644
--- a/contracts/src/v0.8/tests/MockETHLINKAggregator.sol
+++ b/contracts/src/v0.8/tests/MockETHLINKAggregator.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import "../interfaces/AggregatorV3Interface.sol";
+import "../shared/interfaces/AggregatorV3Interface.sol";
contract MockETHLINKAggregator is AggregatorV3Interface {
int256 public answer;
diff --git a/contracts/src/v0.8/tests/MockV3Aggregator.sol b/contracts/src/v0.8/tests/MockV3Aggregator.sol
index d261b2c4b14..9822d23e853 100644
--- a/contracts/src/v0.8/tests/MockV3Aggregator.sol
+++ b/contracts/src/v0.8/tests/MockV3Aggregator.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import "../interfaces/AggregatorV2V3Interface.sol";
+import "../shared/interfaces/AggregatorV2V3Interface.sol";
/**
* @title MockV3Aggregator
diff --git a/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol b/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol
index cd84bb6a0e0..970ddf6b7e6 100644
--- a/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol
+++ b/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol
@@ -4,7 +4,7 @@ pragma solidity ^0.8.15;
import {IPaymaster} from "../../../vendor/entrypoint/interfaces/IPaymaster.sol";
import {SCALibrary} from "./SCALibrary.sol";
import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol";
-import {AggregatorV3Interface} from "../../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol";
import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol";
import {UserOperation} from "../../../vendor/entrypoint/interfaces/UserOperation.sol";
import {_packValidationData} from "../../../vendor/entrypoint/core/Helpers.sol";
diff --git a/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol
index 5150d263a8b..5acd3e74358 100644
--- a/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol
+++ b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol
@@ -3,7 +3,7 @@ pragma solidity ^0.8.4;
import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol";
import {BlockhashStoreInterface} from "./interfaces/BlockhashStoreInterface.sol";
-import {AggregatorV3Interface} from "../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../shared/interfaces/AggregatorV3Interface.sol";
import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol";
import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol";
import {IERC677Receiver} from "../shared/interfaces/IERC677Receiver.sol";
diff --git a/contracts/src/v0.8/vrf/VRFV2Wrapper.sol b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol
index 805c8d76cb6..abe479cb20a 100644
--- a/contracts/src/v0.8/vrf/VRFV2Wrapper.sol
+++ b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol
@@ -6,7 +6,7 @@ import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol";
import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol";
import {VRFConsumerBaseV2} from "./VRFConsumerBaseV2.sol";
import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol";
-import {AggregatorV3Interface} from "../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../shared/interfaces/AggregatorV3Interface.sol";
import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol";
import {VRFV2WrapperInterface} from "./interfaces/VRFV2WrapperInterface.sol";
import {VRFV2WrapperConsumerBase} from "./VRFV2WrapperConsumerBase.sol";
diff --git a/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol
index e4708bb1fcf..d7cc5b86c5a 100644
--- a/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol
+++ b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol
@@ -4,7 +4,7 @@ pragma solidity ^0.8.0;
import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol";
import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol";
import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol";
-import {AggregatorV3Interface} from "../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol";
import {IVRFSubscriptionV2Plus} from "./interfaces/IVRFSubscriptionV2Plus.sol";
diff --git a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol
index 9c3b983e300..a724b70a3d8 100644
--- a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol
+++ b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol
@@ -6,7 +6,7 @@ import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.
import {IVRFV2PlusMigrate} from "./interfaces/IVRFV2PlusMigrate.sol";
import {VRFConsumerBaseV2Plus} from "./VRFConsumerBaseV2Plus.sol";
import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol";
-import {AggregatorV3Interface} from "../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
import {VRFV2PlusClient} from "./libraries/VRFV2PlusClient.sol";
import {IVRFV2PlusWrapper} from "./interfaces/IVRFV2PlusWrapper.sol";
import {VRFV2PlusWrapperConsumerBase} from "./VRFV2PlusWrapperConsumerBase.sol";
diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol
index f9385329686..c5d1d90c126 100644
--- a/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol
+++ b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
-import {AggregatorV3Interface} from "../../interfaces/AggregatorV3Interface.sol";
+import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol";
// Ideally this contract should inherit from VRFCoordinatorV2 and delegate calls to VRFCoordinatorV2
// However, due to exceeding contract size limit, the logic from VRFCoordinatorV2 is ported over to this contract
diff --git a/contracts/test/v0.6/AggregatorFacade.test.ts b/contracts/test/v0.6/AggregatorFacade.test.ts
deleted file mode 100644
index f85c24ae6cd..00000000000
--- a/contracts/test/v0.6/AggregatorFacade.test.ts
+++ /dev/null
@@ -1,167 +0,0 @@
-import { ethers } from 'hardhat'
-import { numToBytes32, publicAbi } from '../test-helpers/helpers'
-import { assert } from 'chai'
-import { Contract, ContractFactory, Signer } from 'ethers'
-import { getUsers } from '../test-helpers/setup'
-import { convertFufillParams, decodeRunRequest } from '../test-helpers/oracle'
-import { bigNumEquals, evmRevert } from '../test-helpers/matchers'
-
-let defaultAccount: Signer
-
-let linkTokenFactory: ContractFactory
-let aggregatorFactory: ContractFactory
-let oracleFactory: ContractFactory
-let aggregatorFacadeFactory: ContractFactory
-
-before(async () => {
- const users = await getUsers()
-
- defaultAccount = users.roles.defaultAccount
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- defaultAccount,
- )
- aggregatorFactory = await ethers.getContractFactory(
- 'src/v0.4/Aggregator.sol:Aggregator',
- defaultAccount,
- )
- oracleFactory = await ethers.getContractFactory(
- 'src/v0.6/Oracle.sol:Oracle',
- defaultAccount,
- )
- aggregatorFacadeFactory = await ethers.getContractFactory(
- 'src/v0.6/AggregatorFacade.sol:AggregatorFacade',
- defaultAccount,
- )
-})
-
-describe('AggregatorFacade', () => {
- const jobId1 =
- '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000001'
- const previousResponse = numToBytes32(54321)
- const response = numToBytes32(67890)
- const decimals = 18
- const description = 'LINK / USD: Historic Aggregator Facade'
-
- let link: Contract
- let aggregator: Contract
- let oc1: Contract
- let facade: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(defaultAccount).deploy()
- oc1 = await oracleFactory.connect(defaultAccount).deploy(link.address)
- aggregator = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(link.address, 0, 1, [oc1.address], [jobId1])
- facade = await aggregatorFacadeFactory
- .connect(defaultAccount)
- .deploy(aggregator.address, decimals, description)
-
- let requestTx = await aggregator.requestRateUpdate()
- let receipt = await requestTx.wait()
- let request = decodeRunRequest(receipt.logs?.[3])
- await oc1.fulfillOracleRequest(
- ...convertFufillParams(request, previousResponse),
- )
- requestTx = await aggregator.requestRateUpdate()
- receipt = await requestTx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- await oc1.fulfillOracleRequest(...convertFufillParams(request, response))
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(facade, [
- 'aggregator',
- 'decimals',
- 'description',
- 'getAnswer',
- 'getRoundData',
- 'getTimestamp',
- 'latestAnswer',
- 'latestRound',
- 'latestRoundData',
- 'latestTimestamp',
- 'version',
- ])
- })
-
- describe('#constructor', () => {
- it('uses the decimals set in the constructor', async () => {
- bigNumEquals(decimals, await facade.decimals())
- })
-
- it('uses the description set in the constructor', async () => {
- assert.equal(description, await facade.description())
- })
-
- it('sets the version to 2', async () => {
- bigNumEquals(2, await facade.version())
- })
- })
-
- describe('#getAnswer/latestAnswer', () => {
- it('pulls the rate from the aggregator', async () => {
- bigNumEquals(response, await facade.latestAnswer())
- const latestRound = await facade.latestRound()
- bigNumEquals(response, await facade.getAnswer(latestRound))
- })
- })
-
- describe('#getTimestamp/latestTimestamp', () => {
- it('pulls the timestamp from the aggregator', async () => {
- const height = await aggregator.latestTimestamp()
- assert.notEqual('0', height.toString())
- bigNumEquals(height, await facade.latestTimestamp())
- const latestRound = await facade.latestRound()
- bigNumEquals(
- await aggregator.latestTimestamp(),
- await facade.getTimestamp(latestRound),
- )
- })
- })
-
- describe('#getRoundData', () => {
- it('assembles the requested round data', async () => {
- const previousId = (await facade.latestRound()).sub(1)
- const round = await facade.getRoundData(previousId)
- bigNumEquals(previousId, round.roundId)
- bigNumEquals(previousResponse, round.answer)
- bigNumEquals(await facade.getTimestamp(previousId), round.startedAt)
- bigNumEquals(await facade.getTimestamp(previousId), round.updatedAt)
- bigNumEquals(previousId, round.answeredInRound)
- })
-
- it('returns zero data for non-existing rounds', async () => {
- const roundId = 13371337
- await evmRevert(facade.getRoundData(roundId), 'No data present')
- })
- })
-
- describe('#latestRoundData', () => {
- it('assembles the requested round data', async () => {
- const latestId = await facade.latestRound()
- const round = await facade.latestRoundData()
- bigNumEquals(latestId, round.roundId)
- bigNumEquals(response, round.answer)
- bigNumEquals(await facade.getTimestamp(latestId), round.startedAt)
- bigNumEquals(await facade.getTimestamp(latestId), round.updatedAt)
- bigNumEquals(latestId, round.answeredInRound)
- })
-
- describe('when there is no latest round', () => {
- beforeEach(async () => {
- aggregator = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(link.address, 0, 1, [oc1.address], [jobId1])
- facade = await aggregatorFacadeFactory
- .connect(defaultAccount)
- .deploy(aggregator.address, decimals, description)
- })
-
- it('assembles the requested round data', async () => {
- await evmRevert(facade.latestRoundData(), 'No data present')
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/BasicConsumer.test.ts b/contracts/test/v0.6/BasicConsumer.test.ts
deleted file mode 100644
index ce0b7c643e2..00000000000
--- a/contracts/test/v0.6/BasicConsumer.test.ts
+++ /dev/null
@@ -1,243 +0,0 @@
-import { ethers } from 'hardhat'
-import { toWei, increaseTime5Minutes, toHex } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { BigNumber, constants, Contract, ContractFactory } from 'ethers'
-import { Roles, getUsers } from '../test-helpers/setup'
-import { bigNumEquals, evmRevert } from '../test-helpers/matchers'
-import {
- convertFufillParams,
- decodeRunRequest,
- encodeOracleRequest,
- RunRequest,
-} from '../test-helpers/oracle'
-import cbor from 'cbor'
-import { makeDebug } from '../test-helpers/debug'
-
-const d = makeDebug('BasicConsumer')
-let basicConsumerFactory: ContractFactory
-let oracleFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- roles = (await getUsers()).roles
- basicConsumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/BasicConsumer.sol:BasicConsumer',
- roles.defaultAccount,
- )
- oracleFactory = await ethers.getContractFactory(
- 'src/v0.6/Oracle.sol:Oracle',
- roles.oracleNode,
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
-})
-
-describe('BasicConsumer', () => {
- const specId = '0x4c7b7ffb66b344fbaa64995af81e355a'.padEnd(66, '0')
- const currency = 'USD'
- const payment = toWei('1')
- let link: Contract
- let oc: Contract
- let cc: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- oc = await oracleFactory.connect(roles.oracleNode).deploy(link.address)
- cc = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, oc.address, specId)
- })
-
- it('has a predictable gas price [ @skip-coverage ]', async () => {
- const rec = await ethers.provider.getTransactionReceipt(
- cc.deployTransaction.hash ?? '',
- )
- assert.isBelow(rec.gasUsed?.toNumber() ?? -1, 1750000)
- })
-
- describe('#requestEthereumPrice', () => {
- describe('without LINK', () => {
- it('reverts', async () =>
- await expect(cc.requestEthereumPrice(currency, payment)).to.be.reverted)
- })
-
- describe('with LINK', () => {
- beforeEach(async () => {
- await link.transfer(cc.address, toWei('1'))
- })
-
- it('triggers a log event in the Oracle contract', async () => {
- const tx = await cc.requestEthereumPrice(currency, payment)
- const receipt = await tx.wait()
-
- const log = receipt?.logs?.[3]
- assert.equal(log?.address.toLowerCase(), oc.address.toLowerCase())
-
- const request = decodeRunRequest(log)
- const expected = {
- path: ['USD'],
- get: 'https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY',
- }
-
- assert.equal(toHex(specId), request.specId)
- bigNumEquals(toWei('1'), request.payment)
- assert.equal(cc.address.toLowerCase(), request.requester.toLowerCase())
- assert.equal(1, request.dataVersion)
- assert.deepEqual(expected, cbor.decodeFirstSync(request.data))
- })
-
- it('has a reasonable gas cost [ @skip-coverage ]', async () => {
- const tx = await cc.requestEthereumPrice(currency, payment)
- const receipt = await tx.wait()
-
- assert.isBelow(receipt?.gasUsed?.toNumber() ?? -1, 140000)
- })
- })
- })
-
- describe('#fulfillOracleRequest', () => {
- const response = ethers.utils.formatBytes32String('1,000,000.00')
- let request: RunRequest
-
- beforeEach(async () => {
- await link.transfer(cc.address, toWei('1'))
- const tx = await cc.requestEthereumPrice(currency, payment)
- const receipt = await tx.wait()
-
- request = decodeRunRequest(receipt?.logs?.[3])
- })
-
- it('records the data given to it by the oracle', async () => {
- await oc
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- const currentPrice = await cc.currentPrice()
- assert.equal(currentPrice, response)
- })
-
- it('logs the data given to it by the oracle', async () => {
- const tx = await oc
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
- const receipt = await tx.wait()
-
- assert.equal(2, receipt?.logs?.length)
- const log = receipt?.logs?.[1]
-
- assert.equal(log?.topics[2], response)
- })
-
- describe('when the consumer does not recognize the request ID', () => {
- let otherRequest: RunRequest
-
- beforeEach(async () => {
- // Create a request directly via the oracle, rather than through the
- // chainlink client (consumer). The client should not respond to
- // fulfillment of this request, even though the oracle will faithfully
- // forward the fulfillment to it.
- const args = encodeOracleRequest(
- toHex(specId),
- cc.address,
- basicConsumerFactory.interface.getSighash('fulfill'),
- 43,
- constants.HashZero,
- )
- const tx = await link.transferAndCall(oc.address, 0, args)
- const receipt = await tx.wait()
-
- otherRequest = decodeRunRequest(receipt?.logs?.[2])
- })
-
- it('does not accept the data provided', async () => {
- d('otherRequest %s', otherRequest)
- await oc
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(otherRequest, response))
-
- const received = await cc.currentPrice()
-
- assert.equal(ethers.utils.parseBytes32String(received), '')
- })
- })
-
- describe('when called by anyone other than the oracle contract', () => {
- it('does not accept the data provided', async () => {
- await evmRevert(
- cc.connect(roles.oracleNode).fulfill(request.requestId, response),
- )
-
- const received = await cc.currentPrice()
- assert.equal(ethers.utils.parseBytes32String(received), '')
- })
- })
- })
-
- describe('#cancelRequest', () => {
- const depositAmount = toWei('1')
- let request: RunRequest
-
- beforeEach(async () => {
- await link.transfer(cc.address, depositAmount)
- const tx = await cc.requestEthereumPrice(currency, payment)
- const receipt = await tx.wait()
-
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- describe('before 5 minutes', () => {
- it('cant cancel the request', () =>
- evmRevert(
- cc
- .connect(roles.consumer)
- .cancelRequest(
- oc.address,
- request.requestId,
- request.payment,
- request.callbackFunc,
- request.expiration,
- ),
- ))
- })
-
- describe('after 5 minutes', () => {
- it('can cancel the request', async () => {
- await increaseTime5Minutes(ethers.provider)
-
- await cc
- .connect(roles.consumer)
- .cancelRequest(
- oc.address,
- request.requestId,
- request.payment,
- request.callbackFunc,
- request.expiration,
- )
- })
- })
- })
-
- describe('#withdrawLink', () => {
- const depositAmount = toWei('1')
-
- beforeEach(async () => {
- await link.transfer(cc.address, depositAmount)
- const balance = await link.balanceOf(cc.address)
- bigNumEquals(balance, depositAmount)
- })
-
- it('transfers LINK out of the contract', async () => {
- await cc.connect(roles.consumer).withdrawLink()
- const ccBalance = await link.balanceOf(cc.address)
- const consumerBalance = BigNumber.from(
- await link.balanceOf(await roles.consumer.getAddress()),
- )
- bigNumEquals(ccBalance, 0)
- bigNumEquals(consumerBalance, depositAmount)
- })
- })
-})
diff --git a/contracts/test/v0.6/BlockhashStore.test.ts b/contracts/test/v0.6/BlockhashStore.test.ts
deleted file mode 100644
index 453b2eca3b1..00000000000
--- a/contracts/test/v0.6/BlockhashStore.test.ts
+++ /dev/null
@@ -1,285 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-
-let personas: Personas
-let blockhashStoreTestHelperFactory: ContractFactory
-
-type TestBlocks = {
- num: number
- rlpHeader: Uint8Array
- hash: string
-}
-
-const mainnetBlocks: TestBlocks[] = [
- {
- num: 10000467,
- rlpHeader: ethers.utils.arrayify(
- '0xf90215a058ee3c05e880cb25a3db92b9f1479c5453690ca97f9bcbb18d21965d3213578ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ea674fdde714fd979de3edf0f56aa9716b898ec8a0a448355652812a7d518b5c979a15bba02cfe4576d8eb61e8b5731ecc37f2bec6a0049f25ed97f9ed9a9c8521ab39cd2c48438d1d18c84dcab5bf494c19595bd462a0b1169f28bdbe5dd61ebc20b7a459be9d7fa898f5a3ba5fed6d502d94b9a8101bb901001000008180000210000080010001080310e004800c3040000060484000010804088050044302a500240041040010012120840002400005092000808000640012081000880010008040200208000004050800400002244044006041040040010890040504020040008004222502000800220000021800006400802036500000000400014640d00020002110000001440000001509543802080004210004100de04744a2810000000032250080810000502210c04289480800000423080800004000a020220030203000020001000000042c00420090000008003308459020e010a01000200190900040e81000040040000020000a8044001000202010000600c087086c49cadb1b57839898538398909483984b9e845eb02fbf94505059452d65746865726d696e652d6575312d34a06d0287c21536fac432714bd3f3712ff1a7e409faf1b10edac9b9547da1d4f7b188930531280477460c',
- ),
- hash: '0x4a65bcdf3466a16740b74849cc10fc57d4acb24cce148665482812699a400464',
- },
- {
- num: 10000468,
- rlpHeader: ethers.utils.arrayify(
- '0xf9020da04a65bcdf3466a16740b74849cc10fc57d4acb24cce148665482812699a400464a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479404668ec2f57cc15c381b461b9fedab5d451c8f7fa0bcd4ddbb7125a5c06df96d862921dc0bba8664b3f759a233fe565a615c0ab3eaa0087ab379852c83e4b660de1668fc93333201ad0d233167ea6cef8bacaf5cba2aa0d81855037b2a6b56eba0c2ed129fb4102fb831b0baa187a0f6e0c155400f7855b9010080040040200000000010102081000000500040010408040800010110000000008000005808020000902021818000210000000000081100401000400014400001041008000020448800180128800008000200000420e01200000000000000011000001000020000208000b42200a0008000510200080200008c002018108010014030200000080000000002000010008000011008004003081000400080100803040080040300000002044080480000000000008080101000000050000000000840000002200040000a0080000442008006005502800000040008000890201002022402208002900020900000000080000100100201080000000003400000004887086d57541477ba839898548398968083989147845eb02fc28c73706964657230380b03ac53a076c676a0ab090b373b6242851a4beab7b8cdc9d3ebe211747a255b78c0278c42880ea13d40042dd1e6',
- ),
- hash: '0x00fd2589a272b85ffaf63223641571bf95891c936b7514ee4e87a593e52de7c9',
- },
- {
- num: 10000469,
- rlpHeader: ethers.utils.arrayify(
- '0xf90211a000fd2589a272b85ffaf63223641571bf95891c936b7514ee4e87a593e52de7c9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347945a0b54d5dc17e0aadc383d2db43b0a0d3e029c4ca01b28d3b4e4d3442a9e6caed9f80b6a639bce6f3a283d4e004e6bb44e483ceeeba067c00d9067bc023b8fab8e3afd1bc0f2470f08003bdf9f167fbfeede2422ac4ea09d8b344d9ab1b7288f8c5e644c53b1a5288da6d6ee0e388ec76f291def48da15b90100c462095870a26a0804132e208110329710d459054558159c103208d44820002016108136199200061063699d8400254a22828c11b5512e3303c98ec7747cc02d00161880c2f2c580e806bccc04805190265f096601342058020a8324c277735d8202220412f03303201252a3000038883a4bb0010e6b004408306232150a84d110100d0c4b9d228022812602c05c801d20500d4ed10010ce2400428a96950a98050c00e603292a806c4983b25814880000440a23821191121996410c5110c949616c2066a4a0488087d4c226c14208042c00d609b5cc44051400219d93626818728612a9b18690e03c902014a900e0018828011494b80d4708799b0d8a83cace87086e64fefefb48839898558398968083986664845eb02fc7906574682d70726f2d687a662d74303032a09f1918a362b55ebd072cc9548fb74f89301d41c2a1feb13c08a1c2c3cb0606d88810dfa530069367fb',
- ),
- hash: '0x325fde74e261fc483a16506bbc711b645b043ad24c7db0136845a1be262cf0c9',
- },
- {
- num: 10000470,
- rlpHeader: ethers.utils.arrayify(
- '0xf90215a0325fde74e261fc483a16506bbc711b645b043ad24c7db0136845a1be262cf0c9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ea674fdde714fd979de3edf0f56aa9716b898ec8a020647cfa35563093442a12d80bf2bacb83da1de8340366677f3822591a334ccea066ad7285f6c5b6407f62c6b65a83aeaaa71ad9a97c2bb15139140f2dbb60f7e0a0c0e633851d0b5ce661ecc054517425e82425fcc6170db9693e5b5a6dd5ef6d6bb90100c0c000c1520708182080c8e461891c2402800a80d44a00034259414012012a5006a1416331181504902044960808f1129018800311621e920886804693749b10542400142e984580ccba634881c4156962200ecfb004000005468db44842781c59923110262660802315006106388b028412c42c000820c508e66b7851fa68002008144cd7860cd884280802915163399c168d5a11b0649486084110149469a1e61c31134204b903206566885180bc0426c0c6c0a4d408e182242f08180d204c624a040248425041ac028010d088820402ba4bd38c2d1215829300543465603822110500811290490148049300040e000c280086a09e8100089818ce480a887e87086c4965bf3c8a839898568398705c839847d2845eb02fe994505059452d65746865726d696e652d6575312d35a09d8ae288d0eede524f3ef5e6cfcc5ba07f380bc695bb71578a7b91cfa517071b8859d0976006378e52',
- ),
- hash: '0x5cf096dfd1fc2d2947a96fdec5377ab7beaa0eb00c80728a3b96f0864cec506a',
- },
-]
-
-const maticBlocks: TestBlocks[] = [
- {
- num: 10000467,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da0212093b89337e6741aca0c6c1cbfc64b56155bdcc3623fa9bcbfa0498fa135aba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0ac0ec242516093308f7a2cc6965f38835eb3db69cba93401daef672666a3aefea0d06985e9ae671d22fb4b3d73ef4e074a448f66b4769ad8d59c3b8aef1ede15e2a00076d4897a88e08c25ca12c558f622d03d532d8b674e8c6b9847000b98dbe480b90100040000000200000000000000000000000000000000000000100000000000400000000000000000000000002800000000100080000000000000000001000000400000000000000000000000080002008000000000000000000021000000000000000000000200000000001000000008000000000008000001800800100000000000010000000010100000800000000000001000000200100000000000000000002000000004000000000000000080010000000000000000200000000000000040000000420000000000010000000000000004040004000000001000001000200100100080000000000400000000100000100000000000000000000000021000000e839898538401312d008302e54b84600df884b861d78301091883626f7288676f312e31352e35856c696e7578000000000000000003eb49c29f5facd767206f64b8a5c9b325bced5c9156f489c6281c68eddc9e5f2ef1177c02a99d8ab6216dcf2879eefddfc27c75ffa9ef6a2185ce9983d1434901a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x6c3b869ca26fece236545f7914d8249651d729852dc1445f53a94d5a59cdc9da',
- },
- {
- num: 10000468,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da06c3b869ca26fece236545f7914d8249651d729852dc1445f53a94d5a59cdc9daa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0fa236c78bbe5939cc62985e32582c2158468a5b2b4dd02d514edb0bea95f0fd3a0e05ccfb09764e5cd6811ef2c2616d4a57f187be84235e2569c9b8d70489f1a44a0aea27aed2ad1d553e30501e6fe47fee0842c3b7ce5867e579b29975f02ec4282b90100008000100000000000000400080800000009000000010020000000000800000000000000080000000000000000000000000080000080000820400000000000000000200000000000000000080000008000200000200000000003009000020000000200000010000000001000000000000000000000000000800040100000000000000000000010000000100100000000000000000102004000000040000000002000000008000000000000000000000000000000200000000000000000000041000000020000080001010000000000000008000000110000001001800020000000100000000001400000040000000000000010010000000001000000001000000e839898548401312d00830494ed84600df886b861d78301091883626f7288676f312e31352e35856c696e75780000000000000000aa8ed86143b48b6aa7170d2083c3a7be31cbdfdc40f39badb8747f4c2198279a71c0d3eb5d25f3b7da5a48b887f61e22fe0baa692aa03807ad12f6fe25af087e00a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x258aa48bde013579fbfef2e222bcc222b1f57bf898a71c623f9024229c9f6111',
- },
- {
- num: 10000469,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025aa0258aa48bde013579fbfef2e222bcc222b1f57bf898a71c623f9024229c9f6111a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0fa236c78bbe5939cc62985e32582c2158468a5b2b4dd02d514edb0bea95f0fd3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e839898558401312d008084600df888b861d78301091883626f7288676f312e31352e35856c696e75780000000000000000bd8668cc5d89583a7cc26fb96650e61f045ffe5248ae80c667ba7648df41e3d552060998ac151f2d15bd1b98f0a2a50c4281729a4c0aae4758a3bad280207c2901a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x611779767f1deb5a17723ec71d1b397b18a0fc9a40d282810a33bd6a0a5f46f9',
- },
- {
- num: 10000470,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025aa0611779767f1deb5a17723ec71d1b397b18a0fc9a40d282810a33bd6a0a5f46f9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0fa236c78bbe5939cc62985e32582c2158468a5b2b4dd02d514edb0bea95f0fd3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e839898568401312d008084600df88ab861d78301091883626f7288676f312e31352e35856c696e75780000000000000000b617675c3b01e98319508130e1a583d57ce6b3a8a97fa2fbdaa33673cc6c609d6f7c361c833838f54b724d3a83cdd73e2398bb147970cd0b057865386cb08e1300a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x2edf2f5c5faa5046b2304f76c92096a25e7c4343a7b75c36b29e8e9755d93397',
- },
-]
-
-// The following headers from Binance Smart Chain were retrieved using `go run
-// binance.go`, where binance.go contains
-//
-// package main
-//
-// import (
-// "context"
-// "fmt"
-// "log"
-// "math/big"
-// "math/rand"
-// "strings"
-//
-// "github.com/ethereum/go-ethereum/ethclient"
-// "github.com/ethereum/go-ethereum/rlp"
-// )
-//
-// var tsBlockTemplate = `
-// {
-// num: %d,
-// rlpHeader: ethers.utils.arrayify(
-// '0x%x',
-// ),
-// hash: '0x%x',
-// },
-// `
-//
-// func main() {
-// client, err := ethclient.Dial("https://bsc-dataseed.binance.org/")
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// header, err := client.HeaderByNumber(context.Background(), nil)
-// if err != nil {
-// log.Fatal(err)
-// }
-// topBlockNum := header.Number.Int64()
-// numBlocks := int64(4)
-// if topBlockNum < numBlocks {
-// log.Fatalf("need at least %d consecutive blocks", numBlocks)
-// }
-// targetBlock := int64(rand.Intn(int(topBlockNum - numBlocks)))
-// simulatedHeadBlock := targetBlock + numBlocks - 1
-// for blockNum := targetBlock; blockNum <= simulatedHeadBlock; blockNum++ {
-// header, err := client.HeaderByNumber(context.Background(), big.NewInt(blockNum))
-// if err != nil {
-// log.Fatal(err)
-// }
-// s, err := rlp.EncodeToBytes(header)
-// if err != nil {
-// log.Fatalf("could not encode header: got error %s from %v", err, header)
-// }
-// // fmt.Printf("header for block number %d: 0x%x\n", blockNum, s)
-// fmt.Printf(strings.TrimLeft(tsBlockTemplate, "\n"), blockNum, s, header.Hash())
-// }
-// }
-const binanceBlocks: TestBlocks[] = [
- {
- num: 1875651,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da029c26248bebbe0d0acb209d13ac9337c4b5c313696c031dd63b3cd16cbdc0c21a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794b8f7166496996a7da21cf1f1b04d9b3e26a3d077a03f962867b5e86191c3280bd52c4249587e08ddfa9851cea981fb7a5721c9157aa05924ae05d17347687ba81d093aee159ccc65cefc8314b0515ef921e553df05a2a089af99a7afa586e7d67062d051df4255304bb730f6d62fdd3bdb207f1513b23bb901000100000000000000000800000000000000000000000200000000000000800000000000000200100000000000000800000000000000000000000000000000000000000000000000800000140800000008201000001000000202000000001200000000002002020000000000000000080000000000000002000000001000000000000002000000008010000000000000000002040080008400280000c00000081000400000004000000010000000020000000000000000000000000000000000000001000210200000000000000000000800000000000000000000000000002010000004000000000001000000000000000000000800020000000000000000000002831c9ec38401c9c380830789c2845f9faab1b861d883010002846765746888676f312e31332e34856c696e7578000000000000003311ee6830f31dc9116d8a59178b539d91eb6811c1d533c4a59bf77262689c552218bb1eae9cb9d6bf6e1066bea78052c8767313ace71c919d02e70760bd255401a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0xe0a935b1e37420ac1d855215bdad4730a5ffe315eda287c6c18aa86c426ede74',
- },
- {
- num: 1875652,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da0e0a935b1e37420ac1d855215bdad4730a5ffe315eda287c6c18aa86c426ede74a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794c2be4ec20253b8642161bc3f444f53679c1f3d47a0dbf2d40cf5533b65ac9b5be35cac75f7b106244ba14ee476a832c28d46a53452a04f83b8a51d3e17b6a02a0caa79acc7597996c5b8c68dba12c33095ae086089eea02fa2642645b2de17227a6c18c3fa491f54b3bdfe8ac8e04924a33a005a0e9e61b901000100000100000000000008000000000000000000040000000000000000800000000000000000000000000000000800000800000000000400000000000020000040100080000000000000000800000000209000001000000200000000801000400800002002030000000000000100080000002000000002004000011000000002000100040000000000100000000000000000040100009000300000000000000002004000004000000000000000020000002000000010000000200000800000000001000280000000000000008000000000000000800000000000020000002000041000000000000001200020001000080000002a40020040000000000000000002831c9ec48401c9c38083044b40845f9faab4b861d883010002846765746888676f312e31332e34856c696e757800000000000000cfc02687b2394922055792a8e67dad566f6690de06b229d752433b2067207b5f43b9f3c63f91cea5a79bbfc51d9132b933a706ab504038a92f37d57af2bb6c2e01a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x629e5abcae42940e00d7b38aa7b2ecccfbab582cb7a0b2c3658c2dad8e66549d',
- },
- {
- num: 1875653,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da0629e5abcae42940e00d7b38aa7b2ecccfbab582cb7a0b2c3658c2dad8e66549da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ce2fd7544e0b2cc94692d4a704debef7bcb61328a0718e7db53041585a814d658c32c88fd550c2c6d200826020008925e0a0f7967fa000fbf842e492a47cc9786885783259a08aed71055e78216006408932515fd960a0c7ffeb2189b8fcde43733cf1958cdb1c38c44052cfbb41125382240c232a98f8b901000000000000000000000000000000000000000002000000000004000000000000000000010000000000000000000000000000000000000200000000004020200000010000000800000000208800000000201000000000000000080000000000000000002002220000000000000000080000000000000000000000001000000000100000000000080010000000000000000000040000000000000000000000000002000000000008000000004000000000000000000000200000000000000000000000000202000000000000000000000000000000000008000000000000002080001000000000000001000000000000000000080100000000000000000000000002831c9ec58401c9c38083025019845f9faab7b861d883010002846765746888676f312e31332e34856c696e7578000000000000008c3c7a5c83e930fbd9d14f83c9b3931f032f0f678919c35b8b32ca6dae9948950bfa326fae134fa234fa7b84c06bdc3f7c6d6414c2a266df1339e563be8bd9cc00a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0xae8574651adabfd0ca55e2cee0e2e639ced73ec1cc0a35debeeceee6943442a9',
- },
- {
- num: 1875654,
- rlpHeader: ethers.utils.arrayify(
- '0xf9025da0ae8574651adabfd0ca55e2cee0e2e639ced73ec1cc0a35debeeceee6943442a9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794d6caa02bbebaebb5d7e581e4b66559e635f805ffa02df6a1173c63ec0a8acc46c818670f030aece1154b9f3bbc70f46a8427dd8dd6a0fa8835c499682d8c90759ff9ea1c291048755b967e48880a0fc21d19ec034a59a0b4e22607cb105c04156044b3f98c2cecae1553b45aa9b6044c37573791a27576b901000200000008000000000001000000000000000000000020000000000000020000000000000000000000000000000000000000000000000040000000000220000000000000000400000000001802000000201000000000000000000000000000000000002002020000000000000000080000000000000000000000001000000000000000000000100000000000000000000000040000000000000000010200200002000400000000400000000200000000000000080000000000000000000008000000000200000000000000000000000000000000000000000000000000002000001000000000000001000000000000000000000000000000000008080000000002831c9ec68401c9c3808301e575845f9faabab861d883010002846765746888676f312e31332e34856c696e757800000000000000399e73b0e963ec029e815623a414aa852508a28dd9799a1bf4e2380c8db687a46cc5b6cc20352ae21e35cfd28124a32fcd49ac8fac5b03901b3e03963e4fff5801a00000000000000000000000000000000000000000000000000000000000000000880000000000000000',
- ),
- hash: '0x189990455c59a5dea78071df9a2008ede292ff0a062fc5c4c6ca35fbe476f834',
- },
-]
-
-before(async () => {
- personas = (await getUsers()).personas
- blockhashStoreTestHelperFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/BlockhashStoreTestHelper.sol:BlockhashStoreTestHelper',
- personas.Default,
- )
-})
-
-runBlockhashStoreTests(mainnetBlocks, 'Ethereum')
-runBlockhashStoreTests(maticBlocks, 'Matic')
-runBlockhashStoreTests(binanceBlocks, 'Binance Smart Chain')
-
-async function runBlockhashStoreTests(
- blocks: TestBlocks[],
- description: string,
-) {
- describe(`BlockhashStore (${description})`, () => {
- let blockhashStoreTestHelper: Contract
-
- beforeEach(async () => {
- blockhashStoreTestHelper = await blockhashStoreTestHelperFactory
- .connect(personas.Default)
- .deploy()
-
- const [lastBlock] = blocks.slice(-1)
- await blockhashStoreTestHelper
- .connect(personas.Default)
- .godmodeSetHash(lastBlock.num, lastBlock.hash)
- assert.strictEqual(
- await blockhashStoreTestHelper.getBlockhash(lastBlock.num),
- lastBlock.hash,
- )
- })
-
- it('getBlockhash reverts for unknown blockhashes', async () => {
- await expect(
- blockhashStoreTestHelper.getBlockhash(99999999),
- ).to.be.revertedWith('blockhash not found in store')
- })
-
- it('storeVerifyHeader records valid blockhashes', async () => {
- for (let i = blocks.length - 2; i >= 0; i--) {
- assert.strictEqual(
- ethers.utils.keccak256(blocks[i + 1].rlpHeader),
- await blockhashStoreTestHelper.getBlockhash(blocks[i + 1].num),
- )
- await blockhashStoreTestHelper
- .connect(personas.Default)
- .storeVerifyHeader(blocks[i].num, blocks[i + 1].rlpHeader)
- assert.strictEqual(
- await blockhashStoreTestHelper.getBlockhash(blocks[i].num),
- blocks[i].hash,
- )
- }
- })
-
- it('storeVerifyHeader rejects unknown headers', async () => {
- const unknownBlock = blocks[0]
- await expect(
- blockhashStoreTestHelper
- .connect(personas.Default)
- .storeVerifyHeader(unknownBlock.num - 1, unknownBlock.rlpHeader),
- ).to.be.revertedWith('header has unknown blockhash')
- })
-
- it('storeVerifyHeader rejects corrupted headers', async () => {
- const [lastBlock] = blocks.slice(-1)
- const modifiedHeader = new Uint8Array(lastBlock.rlpHeader)
- modifiedHeader[137] += 1
- await expect(
- blockhashStoreTestHelper
- .connect(personas.Default)
- .storeVerifyHeader(lastBlock.num - 1, modifiedHeader),
- ).to.be.revertedWith('header has unknown blockhash')
- })
-
- it('store accepts recent block numbers', async () => {
- await ethers.provider.send('evm_mine', [])
-
- const n = (await ethers.provider.getBlockNumber()) - 1
- await blockhashStoreTestHelper.connect(personas.Default).store(n)
-
- assert.equal(
- await blockhashStoreTestHelper.getBlockhash(n),
- (await ethers.provider.getBlock(n)).hash,
- )
- })
-
- it('store rejects future block numbers', async () => {
- await expect(
- blockhashStoreTestHelper.connect(personas.Default).store(99999999999),
- ).to.be.revertedWith('blockhash(n) failed')
- })
-
- it('store rejects old block numbers', async () => {
- for (let i = 0; i < 300; i++) {
- await ethers.provider.send('evm_mine', [])
- }
-
- await expect(
- blockhashStoreTestHelper
- .connect(personas.Default)
- .store((await ethers.provider.getBlockNumber()) - 256),
- ).to.be.revertedWith('blockhash(n) failed')
- })
-
- it('storeEarliest works', async () => {
- for (let i = 0; i < 300; i++) {
- await ethers.provider.send('evm_mine', [])
- }
-
- await blockhashStoreTestHelper.connect(personas.Default).storeEarliest()
-
- const n = (await ethers.provider.getBlockNumber()) - 256
- assert.equal(
- await blockhashStoreTestHelper.getBlockhash(n),
- (await ethers.provider.getBlock(n)).hash,
- )
- })
- })
-}
diff --git a/contracts/test/v0.6/Chainlink.test.ts b/contracts/test/v0.6/Chainlink.test.ts
deleted file mode 100644
index f3587dc30a7..00000000000
--- a/contracts/test/v0.6/Chainlink.test.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi, decodeDietCBOR, hexToBuf } from '../test-helpers/helpers'
-import { assert } from 'chai'
-import { Contract, ContractFactory, providers, Signer } from 'ethers'
-import { Roles, getUsers } from '../test-helpers/setup'
-import { makeDebug } from '../test-helpers/debug'
-
-const debug = makeDebug('ChainlinkTestHelper')
-let concreteChainlinkFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- roles = (await getUsers()).roles
- concreteChainlinkFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/ChainlinkTestHelper.sol:ChainlinkTestHelper',
- roles.defaultAccount,
- )
-})
-
-describe('ChainlinkTestHelper', () => {
- let ccl: Contract
- let defaultAccount: Signer
-
- beforeEach(async () => {
- defaultAccount = roles.defaultAccount
- ccl = await concreteChainlinkFactory.connect(defaultAccount).deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(ccl, [
- 'add',
- 'addBytes',
- 'addInt',
- 'addStringArray',
- 'addUint',
- 'closeEvent',
- 'setBuffer',
- ])
- })
-
- async function parseCCLEvent(tx: providers.TransactionResponse) {
- const receipt = await tx.wait()
- const data = receipt.logs?.[0].data
- const d = debug.extend('parseCCLEvent')
- d('data %s', data)
- return ethers.utils.defaultAbiCoder.decode(['bytes'], data ?? '')
- }
-
- describe('#close', () => {
- it('handles empty payloads', async () => {
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, {})
- })
- })
-
- describe('#setBuffer', () => {
- it('emits the buffer', async () => {
- await ccl.setBuffer('0xA161616162')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { a: 'b' })
- })
- })
-
- describe('#add', () => {
- it('stores and logs keys and values', async () => {
- await ccl.add('first', 'word!!')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 'word!!' })
- })
-
- it('handles two entries', async () => {
- await ccl.add('first', 'uno')
- await ccl.add('second', 'dos')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 'uno',
- second: 'dos',
- })
- })
- })
-
- describe('#addBytes', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addBytes('first', '0xaabbccddeeff')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- const expected = hexToBuf('0xaabbccddeeff')
- assert.deepEqual(decoded, { first: expected })
- })
-
- it('handles two entries', async () => {
- await ccl.addBytes('first', '0x756E6F')
- await ccl.addBytes('second', '0x646F73')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- const expectedFirst = hexToBuf('0x756E6F')
- const expectedSecond = hexToBuf('0x646F73')
- assert.deepEqual(decoded, {
- first: expectedFirst,
- second: expectedSecond,
- })
- })
-
- it('handles strings', async () => {
- await ccl.addBytes('first', ethers.utils.toUtf8Bytes('apple'))
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- const expected = ethers.utils.toUtf8Bytes('apple')
- assert.deepEqual(decoded, { first: expected })
- })
- })
-
- describe('#addInt', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addInt('first', 1)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 1 })
- })
-
- it('handles two entries', async () => {
- await ccl.addInt('first', 1)
- await ccl.addInt('second', 2)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 1,
- second: 2,
- })
- })
- })
-
- describe('#addUint', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addUint('first', 1)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 1 })
- })
-
- it('handles two entries', async () => {
- await ccl.addUint('first', 1)
- await ccl.addUint('second', 2)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 1,
- second: 2,
- })
- })
- })
-
- describe('#addStringArray', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addStringArray('word', [
- ethers.utils.formatBytes32String('seinfeld'),
- ethers.utils.formatBytes32String('"4"'),
- ethers.utils.formatBytes32String('LIFE'),
- ])
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { word: ['seinfeld', '"4"', 'LIFE'] })
- })
- })
-})
diff --git a/contracts/test/v0.6/ChainlinkClient.test.ts b/contracts/test/v0.6/ChainlinkClient.test.ts
deleted file mode 100644
index bfd43d7f3f9..00000000000
--- a/contracts/test/v0.6/ChainlinkClient.test.ts
+++ /dev/null
@@ -1,376 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert } from 'chai'
-import { Contract, ContractFactory } from 'ethers'
-import { Roles, getUsers } from '../test-helpers/setup'
-import {
- convertFufillParams,
- decodeCCRequest,
- decodeRunRequest,
- RunRequest,
-} from '../test-helpers/oracle'
-import { decodeDietCBOR } from '../test-helpers/helpers'
-import { evmRevert } from '../test-helpers/matchers'
-
-let concreteChainlinkClientFactory: ContractFactory
-let emptyOracleFactory: ContractFactory
-let getterSetterFactory: ContractFactory
-let oracleFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- roles = (await getUsers()).roles
-
- concreteChainlinkClientFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/ChainlinkClientTestHelper.sol:ChainlinkClientTestHelper',
- roles.defaultAccount,
- )
- emptyOracleFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/EmptyOracle.sol:EmptyOracle',
- roles.defaultAccount,
- )
- getterSetterFactory = await ethers.getContractFactory(
- 'src/v0.5/tests/GetterSetter.sol:GetterSetter',
- roles.defaultAccount,
- )
- oracleFactory = await ethers.getContractFactory(
- 'src/v0.4/Oracle.sol:Oracle',
- roles.defaultAccount,
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
-})
-
-describe('ChainlinkClientTestHelper', () => {
- const specId =
- '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000'
- let cc: Contract
- let gs: Contract
- let oc: Contract
- let newoc: Contract
- let link: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- oc = await oracleFactory.connect(roles.defaultAccount).deploy(link.address)
- newoc = await oracleFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
- gs = await getterSetterFactory.connect(roles.defaultAccount).deploy()
- cc = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, oc.address)
- })
-
- describe('#newRequest', () => {
- it('forwards the information to the oracle contract through the link token', async () => {
- const tx = await cc.publicNewRequest(
- specId,
- gs.address,
- ethers.utils.toUtf8Bytes('requestedBytes32(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
-
- assert.equal(1, receipt.logs?.length)
- const [jId, cbAddr, cbFId, cborData] = receipt.logs
- ? decodeCCRequest(receipt.logs[0])
- : []
- const params = decodeDietCBOR(cborData ?? '')
-
- assert.equal(specId, jId)
- assert.equal(gs.address, cbAddr)
- assert.equal('0xed53e511', cbFId)
- assert.deepEqual({}, params)
- })
- })
-
- describe('#chainlinkRequest(Request)', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const { events, logs } = await tx.wait()
-
- assert.equal(4, events?.length)
-
- assert.equal(logs?.[0].address, cc.address)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
- })
-
- describe('#chainlinkRequestTo(Request)', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { events } = await tx.wait()
-
- assert.equal(4, events?.length)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
-
- it('emits an event on the target oracle contract', async () => {
- const tx = await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { logs } = await tx.wait()
- const event = logs && newoc.interface.parseLog(logs[3])
-
- assert.equal(4, logs?.length)
- assert.equal(event?.name, 'OracleRequest')
- })
-
- it('does not modify the stored oracle address', async () => {
- await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const actualOracleAddress = await cc.publicOracleAddress()
- assert.equal(oc.address, actualOracleAddress)
- })
- })
-
- describe('#cancelChainlinkRequest', () => {
- let requestId: string
- // a concrete chainlink attached to an empty oracle
- let ecc: Contract
-
- beforeEach(async () => {
- const emptyOracle = await emptyOracleFactory
- .connect(roles.defaultAccount)
- .deploy()
- ecc = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, emptyOracle.address)
-
- const tx = await ecc.publicRequest(
- specId,
- ecc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { events } = await tx.wait()
- requestId = (events?.[0]?.args as any).id
- })
-
- it('emits an event from the contract showing the run was cancelled', async () => {
- const tx = await ecc.publicCancelRequest(
- requestId,
- 0,
- ethers.utils.hexZeroPad('0x', 4),
- 0,
- )
- const { events } = await tx.wait()
-
- assert.equal(1, events?.length)
- assert.equal(events?.[0].event, 'ChainlinkCancelled')
- assert.equal(requestId, (events?.[0].args as any).id)
- })
-
- it('throws if given a bogus event ID', async () => {
- await evmRevert(
- ecc.publicCancelRequest(
- ethers.utils.formatBytes32String('bogusId'),
- 0,
- ethers.utils.hexZeroPad('0x', 4),
- 0,
- ),
- )
- })
- })
-
- describe('#recordChainlinkFulfillment(modifier)', () => {
- let request: RunRequest
-
- beforeEach(async () => {
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { logs } = await tx.wait()
-
- request = decodeRunRequest(logs?.[3])
- })
-
- it('emits an event marking the request fulfilled', async () => {
- const tx = await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
- const { logs } = await tx.wait()
-
- const event = logs && cc.interface.parseLog(logs[0])
-
- assert.equal(1, logs?.length)
- assert.equal(event?.name, 'ChainlinkFulfilled')
- assert.equal(request.requestId, event?.args.id)
- })
-
- it('should only allow one fulfillment per id', async () => {
- await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- await evmRevert(
- oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Must have a valid requestId',
- )
- })
-
- it('should only allow the oracle to fulfill the request', async () => {
- await evmRevert(
- oc
- .connect(roles.stranger)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Not an authorized node to fulfill requests',
- )
- })
- })
-
- describe('#fulfillChainlinkRequest(function)', () => {
- let request: RunRequest
-
- beforeEach(async () => {
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes(
- 'publicFulfillChainlinkRequest(bytes32,bytes32)',
- ),
- 0,
- )
- const { logs } = await tx.wait()
-
- request = decodeRunRequest(logs?.[3])
- })
-
- it('emits an event marking the request fulfilled', async () => {
- const tx = await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- const { logs } = await tx.wait()
- const event = logs && cc.interface.parseLog(logs[0])
-
- assert.equal(1, logs?.length)
- assert.equal(event?.name, 'ChainlinkFulfilled')
- assert.equal(request.requestId, event?.args?.id)
- })
-
- it('should only allow one fulfillment per id', async () => {
- await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- await evmRevert(
- oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Must have a valid requestId',
- )
- })
-
- it('should only allow the oracle to fulfill the request', async () => {
- await evmRevert(
- oc
- .connect(roles.stranger)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Not an authorized node to fulfill requests',
- )
- })
- })
-
- describe('#chainlinkToken', () => {
- it('returns the Link Token address', async () => {
- const addr = await cc.publicChainlinkToken()
- assert.equal(addr, link.address)
- })
- })
-
- describe('#addExternalRequest', () => {
- let mock: Contract
- let request: RunRequest
-
- beforeEach(async () => {
- mock = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, oc.address)
-
- const tx = await cc.publicRequest(
- specId,
- mock.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const receipt = await tx.wait()
-
- request = decodeRunRequest(receipt.logs?.[3])
- await mock.publicAddExternalRequest(oc.address, request.requestId)
- })
-
- it('allows the external request to be fulfilled', async () => {
- await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
- })
-
- it('does not allow the same requestId to be used', async () => {
- await evmRevert(
- cc.publicAddExternalRequest(newoc.address, request.requestId),
- )
- })
- })
-})
diff --git a/contracts/test/v0.6/CheckedMath.test.ts b/contracts/test/v0.6/CheckedMath.test.ts
deleted file mode 100644
index 14520d9d9b9..00000000000
--- a/contracts/test/v0.6/CheckedMath.test.ts
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: MIT
-// Adapted from https://github.com/OpenZeppelin/openzeppelin-contracts/blob/c9630526e24ba53d9647787588a19ffaa3dd65e1/test/math/SignedSafeMath.test.js
-
-import { ethers } from 'hardhat'
-import { assert } from 'chai'
-import { BigNumber, constants, Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals } from '../test-helpers/matchers'
-
-let mathFactory: ContractFactory
-let personas: Personas
-
-before(async () => {
- personas = (await getUsers()).personas
- mathFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/CheckedMathTestHelper.sol:CheckedMathTestHelper',
- personas.Default,
- )
-})
-
-const int256Max = constants.MaxInt256
-const int256Min = constants.MinInt256
-
-describe('CheckedMath', () => {
- let math: Contract
-
- beforeEach(async () => {
- math = await mathFactory.connect(personas.Default).deploy()
- })
-
- describe('#add', () => {
- const a = BigNumber.from('1234')
- const b = BigNumber.from('5678')
-
- it('is commutative', async () => {
- const c1 = await math.add(a, b)
- const c2 = await math.add(b, a)
-
- bigNumEquals(c1.result, c2.result)
- assert.isTrue(c1.ok)
- assert.isTrue(c2.ok)
- })
-
- it('is commutative with big numbers', async () => {
- const c1 = await math.add(int256Max, int256Min)
- const c2 = await math.add(int256Min, int256Max)
-
- bigNumEquals(c1.result, c2.result)
- assert.isTrue(c1.ok)
- assert.isTrue(c2.ok)
- })
-
- it('returns false when overflowing', async () => {
- const c1 = await math.add(int256Max, 1)
- const c2 = await math.add(1, int256Max)
-
- bigNumEquals(0, c1.result)
- bigNumEquals(0, c2.result)
- assert.isFalse(c1.ok)
- assert.isFalse(c2.ok)
- })
-
- it('returns false when underflowing', async () => {
- const c1 = await math.add(int256Min, -1)
- const c2 = await math.add(-1, int256Min)
-
- bigNumEquals(0, c1.result)
- bigNumEquals(0, c2.result)
- assert.isFalse(c1.ok)
- assert.isFalse(c2.ok)
- })
- })
-
- describe('#sub', () => {
- const a = BigNumber.from('1234')
- const b = BigNumber.from('5678')
-
- it('subtracts correctly if it does not overflow and the result is negative', async () => {
- const c = await math.sub(a, b)
- const expected = a.sub(b)
-
- bigNumEquals(expected, c.result)
- assert.isTrue(c.ok)
- })
-
- it('subtracts correctly if it does not overflow and the result is positive', async () => {
- const c = await math.sub(b, a)
- const expected = b.sub(a)
-
- bigNumEquals(expected, c.result)
- assert.isTrue(c.ok)
- })
-
- it('returns false on overflow', async () => {
- const c = await math.sub(int256Max, -1)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
-
- it('returns false on underflow', async () => {
- const c = await math.sub(int256Min, 1)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
- })
-
- describe('#mul', () => {
- const a = BigNumber.from('5678')
- const b = BigNumber.from('-1234')
-
- it('is commutative', async () => {
- const c1 = await math.mul(a, b)
- const c2 = await math.mul(b, a)
-
- bigNumEquals(c1.result, c2.result)
- assert.isTrue(c1.ok)
- assert.isTrue(c2.ok)
- })
-
- it('multiplies by 0 correctly', async () => {
- const c = await math.mul(a, 0)
-
- bigNumEquals(0, c.result)
- assert.isTrue(c.ok)
- })
-
- it('returns false on multiplication overflow', async () => {
- const c = await math.mul(int256Max, 2)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
-
- it('returns false when the integer minimum is negated', async () => {
- const c = await math.mul(int256Min, -1)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
- })
-
- describe('#div', () => {
- const a = BigNumber.from('5678')
- const b = BigNumber.from('-5678')
-
- it('divides correctly', async () => {
- const c = await math.div(a, b)
-
- bigNumEquals(a.div(b), c.result)
- assert.isTrue(c.ok)
- })
-
- it('divides a 0 numerator correctly', async () => {
- const c = await math.div(0, a)
-
- bigNumEquals(0, c.result)
- assert.isTrue(c.ok)
- })
-
- it('returns complete number result on non-even division', async () => {
- const c = await math.div(7000, 5678)
-
- bigNumEquals(1, c.result)
- assert.isTrue(c.ok)
- })
-
- it('reverts when 0 is the denominator', async () => {
- const c = await math.div(a, 0)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
-
- it('reverts on underflow with a negative denominator', async () => {
- const c = await math.div(int256Min, -1)
-
- bigNumEquals(0, c.result)
- assert.isFalse(c.ok)
- })
- })
-})
diff --git a/contracts/test/v0.6/DeviationFlaggingValidator.test.ts b/contracts/test/v0.6/DeviationFlaggingValidator.test.ts
deleted file mode 100644
index f79a8c7aa47..00000000000
--- a/contracts/test/v0.6/DeviationFlaggingValidator.test.ts
+++ /dev/null
@@ -1,296 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { BigNumber, Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals } from '../test-helpers/matchers'
-
-let personas: Personas
-let validatorFactory: ContractFactory
-let flagsFactory: ContractFactory
-let acFactory: ContractFactory
-
-before(async () => {
- personas = (await getUsers()).personas
- validatorFactory = await ethers.getContractFactory(
- 'src/v0.6/DeviationFlaggingValidator.sol:DeviationFlaggingValidator',
- personas.Carol,
- )
- flagsFactory = await ethers.getContractFactory(
- 'src/v0.6/Flags.sol:Flags',
- personas.Carol,
- )
- acFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- personas.Carol,
- )
-})
-
-describe('DeviationFlaggingValidator', () => {
- let validator: Contract
- let flags: Contract
- let ac: Contract
- const flaggingThreshold = 10000 // 10%
- const previousRoundId = 2
- const previousValue = 1000000
- const currentRoundId = 3
- const currentValue = 1000000
-
- beforeEach(async () => {
- ac = await acFactory.connect(personas.Carol).deploy()
- flags = await flagsFactory.connect(personas.Carol).deploy(ac.address)
- validator = await validatorFactory
- .connect(personas.Carol)
- .deploy(flags.address, flaggingThreshold)
- await ac.connect(personas.Carol).addAccess(validator.address)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(validator, [
- 'THRESHOLD_MULTIPLIER',
- 'flaggingThreshold',
- 'flags',
- 'isValid',
- 'setFlagsAddress',
- 'setFlaggingThreshold',
- 'validate',
- // Owned methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('sets the arguments passed in', async () => {
- assert.equal(flags.address, await validator.flags())
- bigNumEquals(flaggingThreshold, await validator.flaggingThreshold())
- })
- })
-
- describe('#validate', () => {
- describe('when the deviation is greater than the threshold', () => {
- const currentValue = 1100010
-
- it('does raises a flag for the calling address', async () => {
- await expect(
- validator
- .connect(personas.Nelly)
- .validate(
- previousRoundId,
- previousValue,
- currentRoundId,
- currentValue,
- ),
- )
- .to.emit(flags, 'FlagRaised')
- .withArgs(await personas.Nelly.getAddress())
- })
-
- it('uses less than the gas allotted by the aggregator', async () => {
- const tx = await validator
- .connect(personas.Nelly)
- .validate(
- previousRoundId,
- previousValue,
- currentRoundId,
- currentValue,
- )
- const receipt = await tx.wait()
- assert(receipt)
- if (receipt && receipt.gasUsed) {
- assert.isAbove(receipt.gasUsed.toNumber(), 60000)
- }
- })
- })
-
- describe('when the deviation is less than or equal to the threshold', () => {
- const currentValue = 1100009
-
- it('does raises a flag for the calling address', async () => {
- await expect(
- validator
- .connect(personas.Nelly)
- .validate(
- previousRoundId,
- previousValue,
- currentRoundId,
- currentValue,
- ),
- ).to.not.emit(flags, 'FlagRaised')
- })
-
- it('uses less than the gas allotted by the aggregator', async () => {
- const tx = await validator
- .connect(personas.Nelly)
- .validate(
- previousRoundId,
- previousValue,
- currentRoundId,
- currentValue,
- )
- const receipt = await tx.wait()
- assert(receipt)
- if (receipt && receipt.gasUsed) {
- assert.isAbove(receipt.gasUsed.toNumber(), 24000)
- }
- })
- })
-
- describe('when called with a previous value of zero', () => {
- const previousValue = 0
-
- it('does not raise any flags', async () => {
- const tx = await validator
- .connect(personas.Nelly)
- .validate(
- previousRoundId,
- previousValue,
- currentRoundId,
- currentValue,
- )
- const receipt = await tx.wait()
- assert.equal(0, receipt.events?.length)
- })
- })
- })
-
- describe('#isValid', () => {
- const previousValue = 1000000
-
- describe('with a validation larger than the deviation', () => {
- const currentValue = 1100010
- it('is not valid', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('with a validation smaller than the deviation', () => {
- const currentValue = 1100009
- it('is valid', async () => {
- assert.isTrue(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('with positive previous and negative current', () => {
- const previousValue = 1000000
- const currentValue = -900000
- it('correctly detects the difference', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('with negative previous and positive current', () => {
- const previousValue = -900000
- const currentValue = 1000000
- it('correctly detects the difference', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('when the difference overflows', () => {
- const previousValue = BigNumber.from(2).pow(255).sub(1)
- const currentValue = BigNumber.from(-1)
-
- it('does not revert and returns false', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('when the rounding overflows', () => {
- const previousValue = BigNumber.from(2).pow(255).div(10000)
- const currentValue = BigNumber.from(1)
-
- it('does not revert and returns false', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
-
- describe('when the division overflows', () => {
- const previousValue = BigNumber.from(2).pow(255).sub(1)
- const currentValue = BigNumber.from(-1)
-
- it('does not revert and returns false', async () => {
- assert.isFalse(
- await validator.isValid(0, previousValue, 1, currentValue),
- )
- })
- })
- })
-
- describe('#setFlaggingThreshold', () => {
- const newThreshold = 777
-
- it('changes the flagging thresold', async () => {
- assert.equal(flaggingThreshold, await validator.flaggingThreshold())
-
- await validator.connect(personas.Carol).setFlaggingThreshold(newThreshold)
-
- assert.equal(newThreshold, await validator.flaggingThreshold())
- })
-
- it('emits a log event only when actually changed', async () => {
- await expect(
- validator.connect(personas.Carol).setFlaggingThreshold(newThreshold),
- )
- .to.emit(validator, 'FlaggingThresholdUpdated')
- .withArgs(flaggingThreshold, newThreshold)
-
- await expect(
- validator.connect(personas.Carol).setFlaggingThreshold(newThreshold),
- ).to.not.emit(validator, 'FlaggingThresholdUpdated')
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- validator.connect(personas.Neil).setFlaggingThreshold(newThreshold),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
- })
-
- describe('#setFlagsAddress', () => {
- const newFlagsAddress = '0x0123456789012345678901234567890123456789'
-
- it('changes the flags address', async () => {
- assert.equal(flags.address, await validator.flags())
-
- await validator.connect(personas.Carol).setFlagsAddress(newFlagsAddress)
-
- assert.equal(newFlagsAddress, await validator.flags())
- })
-
- it('emits a log event only when actually changed', async () => {
- await expect(
- validator.connect(personas.Carol).setFlagsAddress(newFlagsAddress),
- )
- .to.emit(validator, 'FlagsAddressUpdated')
- .withArgs(flags.address, newFlagsAddress)
-
- await expect(
- validator.connect(personas.Carol).setFlagsAddress(newFlagsAddress),
- ).to.not.emit(validator, 'FlagsAddressUpdated')
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- validator.connect(personas.Neil).setFlagsAddress(newFlagsAddress),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/Flags.test.ts b/contracts/test/v0.6/Flags.test.ts
deleted file mode 100644
index 8f589184299..00000000000
--- a/contracts/test/v0.6/Flags.test.ts
+++ /dev/null
@@ -1,405 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-
-let personas: Personas
-
-let controllerFactory: ContractFactory
-let flagsFactory: ContractFactory
-let consumerFactory: ContractFactory
-
-let controller: Contract
-let flags: Contract
-let consumer: Contract
-
-before(async () => {
- personas = (await getUsers()).personas
- controllerFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- personas.Nelly,
- )
- consumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/FlagsTestHelper.sol:FlagsTestHelper',
- personas.Nelly,
- )
- flagsFactory = await ethers.getContractFactory(
- 'src/v0.6/Flags.sol:Flags',
- personas.Nelly,
- )
-})
-
-describe('Flags', () => {
- beforeEach(async () => {
- controller = await controllerFactory.deploy()
- flags = await flagsFactory.deploy(controller.address)
- await flags.disableAccessCheck()
- consumer = await consumerFactory.deploy(flags.address)
- })
-
- it('has a limited public interface [ @skip-coverage ]', async () => {
- publicAbi(flags, [
- 'getFlag',
- 'getFlags',
- 'lowerFlags',
- 'raiseFlag',
- 'raiseFlags',
- 'raisingAccessController',
- 'setRaisingAccessController',
- // Ownable methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- // AccessControl methods:
- 'addAccess',
- 'disableAccessCheck',
- 'enableAccessCheck',
- 'removeAccess',
- 'checkEnabled',
- 'hasAccess',
- ])
- })
-
- describe('#raiseFlag', () => {
- describe('when called by the owner', () => {
- it('updates the warning flag', async () => {
- assert.equal(false, await flags.getFlag(consumer.address))
-
- await flags.connect(personas.Nelly).raiseFlag(consumer.address)
-
- assert.equal(true, await flags.getFlag(consumer.address))
- })
-
- it('emits an event log', async () => {
- await expect(flags.connect(personas.Nelly).raiseFlag(consumer.address))
- .to.emit(flags, 'FlagRaised')
- .withArgs(consumer.address)
- })
-
- describe('if a flag has already been raised', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).raiseFlag(consumer.address)
- })
-
- it('emits an event log', async () => {
- const tx = await flags
- .connect(personas.Nelly)
- .raiseFlag(consumer.address)
- const receipt = await tx.wait()
- assert.equal(0, receipt.events?.length)
- })
- })
- })
-
- describe('when called by an enabled setter', () => {
- beforeEach(async () => {
- await controller
- .connect(personas.Nelly)
- .addAccess(await personas.Neil.getAddress())
- })
-
- it('sets the flags', async () => {
- await flags.connect(personas.Neil).raiseFlag(consumer.address),
- assert.equal(true, await flags.getFlag(consumer.address))
- })
- })
-
- describe('when called by a non-enabled setter', () => {
- it('reverts', async () => {
- await expect(
- flags.connect(personas.Neil).raiseFlag(consumer.address),
- ).to.be.revertedWith('Not allowed to raise flags')
- })
- })
-
- describe('when called when there is no raisingAccessController', () => {
- beforeEach(async () => {
- await expect(
- flags
- .connect(personas.Nelly)
- .setRaisingAccessController(
- '0x0000000000000000000000000000000000000000',
- ),
- ).to.emit(flags, 'RaisingAccessControllerUpdated')
- assert.equal(
- '0x0000000000000000000000000000000000000000',
- await flags.raisingAccessController(),
- )
- })
-
- it('succeeds for the owner', async () => {
- await flags.connect(personas.Nelly).raiseFlag(consumer.address)
- assert.equal(true, await flags.getFlag(consumer.address))
- })
-
- it('reverts for non-owner', async () => {
- await expect(flags.connect(personas.Neil).raiseFlag(consumer.address))
- .to.be.reverted
- })
- })
- })
-
- describe('#raiseFlags', () => {
- describe('when called by the owner', () => {
- it('updates the warning flag', async () => {
- assert.equal(false, await flags.getFlag(consumer.address))
-
- await flags.connect(personas.Nelly).raiseFlags([consumer.address])
-
- assert.equal(true, await flags.getFlag(consumer.address))
- })
-
- it('emits an event log', async () => {
- await expect(
- flags.connect(personas.Nelly).raiseFlags([consumer.address]),
- )
- .to.emit(flags, 'FlagRaised')
- .withArgs(consumer.address)
- })
-
- describe('if a flag has already been raised', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).raiseFlags([consumer.address])
- })
-
- it('emits an event log', async () => {
- const tx = await flags
- .connect(personas.Nelly)
- .raiseFlags([consumer.address])
- const receipt = await tx.wait()
- assert.equal(0, receipt.events?.length)
- })
- })
- })
-
- describe('when called by an enabled setter', () => {
- beforeEach(async () => {
- await controller
- .connect(personas.Nelly)
- .addAccess(await personas.Neil.getAddress())
- })
-
- it('sets the flags', async () => {
- await flags.connect(personas.Neil).raiseFlags([consumer.address]),
- assert.equal(true, await flags.getFlag(consumer.address))
- })
- })
-
- describe('when called by a non-enabled setter', () => {
- it('reverts', async () => {
- await expect(
- flags.connect(personas.Neil).raiseFlags([consumer.address]),
- ).to.be.revertedWith('Not allowed to raise flags')
- })
- })
-
- describe('when called when there is no raisingAccessController', () => {
- beforeEach(async () => {
- await expect(
- flags
- .connect(personas.Nelly)
- .setRaisingAccessController(
- '0x0000000000000000000000000000000000000000',
- ),
- ).to.emit(flags, 'RaisingAccessControllerUpdated')
-
- assert.equal(
- '0x0000000000000000000000000000000000000000',
- await flags.raisingAccessController(),
- )
- })
-
- it('succeeds for the owner', async () => {
- await flags.connect(personas.Nelly).raiseFlags([consumer.address])
- assert.equal(true, await flags.getFlag(consumer.address))
- })
-
- it('reverts for non-owners', async () => {
- await expect(
- flags.connect(personas.Neil).raiseFlags([consumer.address]),
- ).to.be.reverted
- })
- })
- })
-
- describe('#lowerFlags', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).raiseFlags([consumer.address])
- })
-
- describe('when called by the owner', () => {
- it('updates the warning flag', async () => {
- assert.equal(true, await flags.getFlag(consumer.address))
-
- await flags.connect(personas.Nelly).lowerFlags([consumer.address])
-
- assert.equal(false, await flags.getFlag(consumer.address))
- })
-
- it('emits an event log', async () => {
- await expect(
- flags.connect(personas.Nelly).lowerFlags([consumer.address]),
- )
- .to.emit(flags, 'FlagLowered')
- .withArgs(consumer.address)
- })
-
- describe('if a flag has already been raised', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).lowerFlags([consumer.address])
- })
-
- it('emits an event log', async () => {
- const tx = await flags
- .connect(personas.Nelly)
- .lowerFlags([consumer.address])
- const receipt = await tx.wait()
- assert.equal(0, receipt.events?.length)
- })
- })
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- flags.connect(personas.Neil).lowerFlags([consumer.address]),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
- })
-
- describe('#getFlag', () => {
- describe('if the access control is turned on', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).enableAccessCheck()
- })
-
- it('reverts', async () => {
- await expect(consumer.getFlag(consumer.address)).to.be.revertedWith(
- 'No access',
- )
- })
-
- describe('if access is granted to the address', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).addAccess(consumer.address)
- })
-
- it('does not revert', async () => {
- await consumer.getFlag(consumer.address)
- })
- })
- })
-
- describe('if the access control is turned off', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).disableAccessCheck()
- })
-
- it('does not revert', async () => {
- await consumer.getFlag(consumer.address)
- })
-
- describe('if access is granted to the address', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).addAccess(consumer.address)
- })
-
- it('does not revert', async () => {
- await consumer.getFlag(consumer.address)
- })
- })
- })
- })
-
- describe('#getFlags', () => {
- beforeEach(async () => {
- await flags.connect(personas.Nelly).disableAccessCheck()
- await flags
- .connect(personas.Nelly)
- .raiseFlags([
- await personas.Neil.getAddress(),
- await personas.Norbert.getAddress(),
- ])
- })
-
- it('respects the access controls of #getFlag', async () => {
- await flags.connect(personas.Nelly).enableAccessCheck()
-
- await expect(consumer.getFlag(consumer.address)).to.be.revertedWith(
- 'No access',
- )
-
- await flags.connect(personas.Nelly).addAccess(consumer.address)
-
- await consumer.getFlag(consumer.address)
- })
-
- it('returns the flags in the order they are requested', async () => {
- const response = await consumer.getFlags([
- await personas.Nelly.getAddress(),
- await personas.Neil.getAddress(),
- await personas.Ned.getAddress(),
- await personas.Norbert.getAddress(),
- ])
-
- assert.deepEqual([false, true, false, true], response)
- })
- })
-
- describe('#setRaisingAccessController', () => {
- let controller2: Contract
-
- beforeEach(async () => {
- controller2 = await controllerFactory.connect(personas.Nelly).deploy()
- await controller2.connect(personas.Nelly).enableAccessCheck()
- })
-
- it('updates access control rules', async () => {
- const neilAddress = await personas.Neil.getAddress()
- await controller.connect(personas.Nelly).addAccess(neilAddress)
- await flags.connect(personas.Neil).raiseFlags([consumer.address]) // doesn't raise
-
- await flags
- .connect(personas.Nelly)
- .setRaisingAccessController(controller2.address)
-
- await expect(
- flags.connect(personas.Neil).raiseFlags([consumer.address]),
- ).to.be.revertedWith('Not allowed to raise flags')
- })
-
- it('emits a log announcing the change', async () => {
- await expect(
- flags
- .connect(personas.Nelly)
- .setRaisingAccessController(controller2.address),
- )
- .to.emit(flags, 'RaisingAccessControllerUpdated')
- .withArgs(controller.address, controller2.address)
- })
-
- it('does not emit a log when there is no change', async () => {
- await flags
- .connect(personas.Nelly)
- .setRaisingAccessController(controller2.address)
-
- await expect(
- flags
- .connect(personas.Nelly)
- .setRaisingAccessController(controller2.address),
- ).to.not.emit(flags, 'RaisingAccessControllerUpdated')
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- flags
- .connect(personas.Neil)
- .setRaisingAccessController(controller2.address),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/FluxAggregator.test.ts b/contracts/test/v0.6/FluxAggregator.test.ts
deleted file mode 100644
index 5a268ceebe9..00000000000
--- a/contracts/test/v0.6/FluxAggregator.test.ts
+++ /dev/null
@@ -1,3252 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import {
- Signer,
- Contract,
- ContractFactory,
- BigNumber,
- BigNumberish,
- ContractTransaction,
- constants,
-} from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals, evmRevert } from '../test-helpers/matchers'
-import {
- publicAbi,
- toWei,
- increaseTimeBy,
- mineBlock,
- evmWordToAddress,
-} from '../test-helpers/helpers'
-import { randomBytes } from '@ethersproject/random'
-import { fail } from 'assert'
-
-let personas: Personas
-let linkTokenFactory: ContractFactory
-let fluxAggregatorFactory: ContractFactory
-let validatorMockFactory: ContractFactory
-let testHelperFactory: ContractFactory
-let validatorFactory: ContractFactory
-let flagsFactory: ContractFactory
-let acFactory: ContractFactory
-let gasGuzzlerFactory: ContractFactory
-let emptyAddress: string
-
-before(async () => {
- personas = (await getUsers()).personas
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- )
- fluxAggregatorFactory = await ethers.getContractFactory(
- 'src/v0.6/FluxAggregator.sol:FluxAggregator',
- )
- validatorMockFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/AggregatorValidatorMock.sol:AggregatorValidatorMock',
- )
- testHelperFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/FluxAggregatorTestHelper.sol:FluxAggregatorTestHelper',
- )
- validatorFactory = await ethers.getContractFactory(
- 'src/v0.6/DeviationFlaggingValidator.sol:DeviationFlaggingValidator',
- )
- flagsFactory = await ethers.getContractFactory('src/v0.6/Flags.sol:Flags')
- acFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- )
- gasGuzzlerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/GasGuzzler.sol:GasGuzzler',
- )
- emptyAddress = constants.AddressZero
-})
-
-describe('FluxAggregator', () => {
- const paymentAmount = toWei('3')
- const deposit = toWei('100')
- const answer = 100
- const minAns = 1
- const maxAns = 1
- const rrDelay = 0
- const timeout = 1800
- const decimals = 18
- const description = 'LINK/USD'
- const reserveRounds = 2
- const minSubmissionValue = BigNumber.from('1')
- const maxSubmissionValue = BigNumber.from('100000000000000000000')
-
- let aggregator: Contract
- let link: Contract
- let testHelper: Contract
- let validator: Contract
- let gasGuzzler: Contract
- let nextRound: number
- let oracles: Signer[]
-
- async function updateFutureRounds(
- aggregator: Contract,
- overrides: {
- minAnswers?: BigNumberish
- maxAnswers?: BigNumberish
- payment?: BigNumberish
- restartDelay?: BigNumberish
- timeout?: BigNumberish
- } = {},
- ) {
- overrides = overrides || {}
- const round = {
- payment: overrides.payment || paymentAmount,
- minAnswers: overrides.minAnswers || minAns,
- maxAnswers: overrides.maxAnswers || maxAns,
- restartDelay: overrides.restartDelay || rrDelay,
- timeout: overrides.timeout || timeout,
- }
-
- return aggregator.updateFutureRounds(
- round.payment,
- round.minAnswers,
- round.maxAnswers,
- round.restartDelay,
- round.timeout,
- )
- }
-
- async function addOracles(
- aggregator: Contract,
- oraclesAndAdmin: Signer[],
- minAnswers: number,
- maxAnswers: number,
- restartDelay: number,
- ): Promise {
- return aggregator.connect(personas.Carol).changeOracles(
- [],
- oraclesAndAdmin.map(async (oracle) => await oracle.getAddress()),
- oraclesAndAdmin.map(async (admin) => await admin.getAddress()),
- minAnswers,
- maxAnswers,
- restartDelay,
- )
- }
-
- async function advanceRound(
- aggregator: Contract,
- submitters: Signer[],
- currentSubmission: number = answer,
- ): Promise {
- for (const submitter of submitters) {
- await aggregator.connect(submitter).submit(nextRound, currentSubmission)
- }
- nextRound++
- return nextRound
- }
-
- const ShouldBeSet = 'expects it to be different'
- const ShouldNotBeSet = 'expects it to equal'
- let startingState: any
-
- async function checkOracleRoundState(
- state: any,
- want: {
- eligibleToSubmit: boolean
- roundId: BigNumberish
- latestSubmission: BigNumberish
- startedAt: string
- timeout: BigNumberish
- availableFunds: BigNumberish
- oracleCount: BigNumberish
- paymentAmount: BigNumberish
- },
- ) {
- assert.equal(
- want.eligibleToSubmit,
- state._eligibleToSubmit,
- 'round state: unexecpted eligibility',
- )
- bigNumEquals(
- want.roundId,
- state._roundId,
- 'round state: unexpected Round ID',
- )
- bigNumEquals(
- want.latestSubmission,
- state._latestSubmission,
- 'round state: unexpected latest submission',
- )
- if (want.startedAt === ShouldBeSet) {
- assert.isAbove(
- state._startedAt.toNumber(),
- startingState._startedAt.toNumber(),
- 'round state: expected the started at to be the same as previous',
- )
- } else {
- bigNumEquals(
- 0,
- state._startedAt,
- 'round state: expected the started at not to be updated',
- )
- }
- bigNumEquals(
- want.timeout,
- state._timeout.toNumber(),
- 'round state: unexepcted timeout',
- )
- bigNumEquals(
- want.availableFunds,
- state._availableFunds,
- 'round state: unexepected funds',
- )
- bigNumEquals(
- want.oracleCount,
- state._oracleCount,
- 'round state: unexpected oracle count',
- )
- bigNumEquals(
- want.paymentAmount,
- state._paymentAmount,
- 'round state: unexpected paymentamount',
- )
- }
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(personas.Default).deploy()
- aggregator = await fluxAggregatorFactory
- .connect(personas.Carol)
- .deploy(
- link.address,
- paymentAmount,
- timeout,
- emptyAddress,
- minSubmissionValue,
- maxSubmissionValue,
- decimals,
- ethers.utils.formatBytes32String(description),
- )
- await link.transfer(aggregator.address, deposit)
- await aggregator.updateAvailableFunds()
- bigNumEquals(deposit, await link.balanceOf(aggregator.address))
- nextRound = 1
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(aggregator, [
- 'acceptAdmin',
- 'allocatedFunds',
- 'availableFunds',
- 'changeOracles',
- 'decimals',
- 'description',
- 'getAdmin',
- 'getAnswer',
- 'getOracles',
- 'getRoundData',
- 'getTimestamp',
- 'latestAnswer',
- 'latestRound',
- 'latestRoundData',
- 'latestTimestamp',
- 'linkToken',
- 'maxSubmissionCount',
- 'maxSubmissionValue',
- 'minSubmissionCount',
- 'minSubmissionValue',
- 'onTokenTransfer',
- 'oracleCount',
- 'oracleRoundState',
- 'paymentAmount',
- 'requestNewRound',
- 'restartDelay',
- 'setRequesterPermissions',
- 'setValidator',
- 'submit',
- 'timeout',
- 'transferAdmin',
- 'updateAvailableFunds',
- 'updateFutureRounds',
- 'withdrawFunds',
- 'withdrawPayment',
- 'withdrawablePayment',
- 'validator',
- 'version',
- // Owned methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('sets the paymentAmount', async () => {
- bigNumEquals(
- BigNumber.from(paymentAmount),
- await aggregator.paymentAmount(),
- )
- })
-
- it('sets the timeout', async () => {
- bigNumEquals(BigNumber.from(timeout), await aggregator.timeout())
- })
-
- it('sets the decimals', async () => {
- bigNumEquals(BigNumber.from(decimals), await aggregator.decimals())
- })
-
- it('sets the description', async () => {
- assert.equal(
- ethers.utils.formatBytes32String(description),
- await aggregator.description(),
- )
- })
-
- it('sets the version to 3', async () => {
- bigNumEquals(3, await aggregator.version())
- })
-
- it('sets the validator', async () => {
- assert.equal(emptyAddress, await aggregator.validator())
- })
- })
-
- describe('#submit', () => {
- let minMax
-
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned, personas.Nelly]
- minMax = oracles.length
- await addOracles(aggregator, oracles, minMax, minMax, rrDelay)
- })
-
- it('updates the allocated and available funds counters', async () => {
- bigNumEquals(0, await aggregator.allocatedFunds())
-
- const tx = await aggregator
- .connect(personas.Neil)
- .submit(nextRound, answer)
- const receipt = await tx.wait()
-
- bigNumEquals(paymentAmount, await aggregator.allocatedFunds())
- const expectedAvailable = deposit.sub(paymentAmount)
- bigNumEquals(expectedAvailable, await aggregator.availableFunds())
- const logged = BigNumber.from(
- receipt.logs?.[2].topics[1] ?? BigNumber.from(-1),
- )
- bigNumEquals(expectedAvailable, logged)
- })
-
- it('emits a log event announcing submission details', async () => {
- await expect(aggregator.connect(personas.Nelly).submit(nextRound, answer))
- .to.emit(aggregator, 'SubmissionReceived')
- .withArgs(answer, nextRound, await personas.Nelly.getAddress())
- })
-
- describe('when the minimum oracles have not reported', () => {
- it('pays the oracles that have reported', async () => {
- bigNumEquals(
- 0,
- await aggregator
- .connect(personas.Neil)
- .withdrawablePayment(await personas.Neil.getAddress()),
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- bigNumEquals(
- paymentAmount,
- await aggregator
- .connect(personas.Neil)
- .withdrawablePayment(await personas.Neil.getAddress()),
- )
- bigNumEquals(
- 0,
- await aggregator
- .connect(personas.Ned)
- .withdrawablePayment(await personas.Ned.getAddress()),
- )
- bigNumEquals(
- 0,
- await aggregator
- .connect(personas.Nelly)
- .withdrawablePayment(await personas.Nelly.getAddress()),
- )
- })
-
- it('does not update the answer', async () => {
- bigNumEquals(ethers.constants.Zero, await aggregator.latestAnswer())
-
- // Not updated because of changes by the owner setting minSubmissionCount to 3
- await aggregator.connect(personas.Ned).submit(nextRound, answer)
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- bigNumEquals(ethers.constants.Zero, await aggregator.latestAnswer())
- })
- })
-
- describe('when an oracle prematurely bumps the round', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, { minAnswers: 2, maxAnswers: 3 })
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Neil).submit(nextRound + 1, answer),
- 'previous round not supersedable',
- )
- })
- })
-
- describe('when the minimum number of oracles have reported', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, { minAnswers: 2, maxAnswers: 3 })
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('updates the answer with the median', async () => {
- bigNumEquals(0, await aggregator.latestAnswer())
-
- await aggregator.connect(personas.Ned).submit(nextRound, 99)
- bigNumEquals(99, await aggregator.latestAnswer()) // ((100+99) / 2).to_i
-
- await aggregator.connect(personas.Nelly).submit(nextRound, 101)
-
- bigNumEquals(100, await aggregator.latestAnswer())
- })
-
- it('updates the updated timestamp', async () => {
- const originalTimestamp = await aggregator.latestTimestamp()
- assert.isAbove(originalTimestamp.toNumber(), 0)
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- const currentTimestamp = await aggregator.latestTimestamp()
- assert.isAbove(
- currentTimestamp.toNumber(),
- originalTimestamp.toNumber(),
- )
- })
-
- it('announces the new answer with a log event', async () => {
- const tx = await aggregator
- .connect(personas.Nelly)
- .submit(nextRound, answer)
- const receipt = await tx.wait()
-
- const newAnswer = BigNumber.from(
- receipt.logs?.[0].topics[1] ?? ethers.constants.Zero,
- )
-
- assert.equal(answer, newAnswer.toNumber())
- })
-
- it('does not set the timedout flag', async () => {
- evmRevert(aggregator.getRoundData(nextRound), 'No data present')
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- const round = await aggregator.getRoundData(nextRound)
- assert.equal(nextRound, round.answeredInRound.toNumber())
- })
-
- it('updates the round details', async () => {
- evmRevert(aggregator.latestRoundData(), 'No data present')
-
- increaseTimeBy(15, ethers.provider)
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- const roundAfter = await aggregator.getRoundData(nextRound)
- bigNumEquals(nextRound, roundAfter.roundId)
- bigNumEquals(answer, roundAfter.answer)
- assert.isFalse(roundAfter.startedAt.isZero())
- bigNumEquals(
- await aggregator.getTimestamp(nextRound),
- roundAfter.updatedAt,
- )
- bigNumEquals(nextRound, roundAfter.answeredInRound)
-
- assert.isBelow(
- roundAfter.startedAt.toNumber(),
- roundAfter.updatedAt.toNumber(),
- )
-
- const roundAfterLatest = await aggregator.latestRoundData()
- bigNumEquals(roundAfter.roundId, roundAfterLatest.roundId)
- bigNumEquals(roundAfter.answer, roundAfterLatest.answer)
- bigNumEquals(roundAfter.startedAt, roundAfterLatest.startedAt)
- bigNumEquals(roundAfter.updatedAt, roundAfterLatest.updatedAt)
- bigNumEquals(
- roundAfter.answeredInRound,
- roundAfterLatest.answeredInRound,
- )
- })
- })
-
- describe('when an oracle submits for a round twice', () => {
- it('reverts', async () => {
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- await evmRevert(
- aggregator.connect(personas.Neil).submit(nextRound, answer),
- 'cannot report on previous rounds',
- )
- })
- })
-
- describe('when updated after the max answers submitted', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator)
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Ned).submit(nextRound, answer),
- 'round not accepting submissions',
- )
- })
- })
-
- describe('when a new highest round number is passed in', () => {
- it('increments the answer round', async () => {
- const startingState = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
- bigNumEquals(1, startingState._roundId)
-
- await advanceRound(aggregator, oracles)
-
- const updatedState = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
- bigNumEquals(2, updatedState._roundId)
- })
-
- it('sets the startedAt time for the reporting round', async () => {
- evmRevert(aggregator.getRoundData(nextRound), 'No data present')
-
- const tx = await aggregator
- .connect(oracles[0])
- .submit(nextRound, answer)
- await aggregator.connect(oracles[1]).submit(nextRound, answer)
- await aggregator.connect(oracles[2]).submit(nextRound, answer)
- const receipt = await tx.wait()
- const block = await ethers.provider.getBlock(receipt.blockHash ?? '')
-
- const round = await aggregator.getRoundData(nextRound)
- bigNumEquals(BigNumber.from(block.timestamp), round.startedAt)
- })
-
- it('announces a new round by emitting a log', async () => {
- const tx = await aggregator
- .connect(personas.Neil)
- .submit(nextRound, answer)
- const receipt = await tx.wait()
-
- const topics = receipt.logs?.[0].topics ?? []
- const roundNumber = BigNumber.from(topics[1])
- const startedBy = evmWordToAddress(topics[2])
-
- bigNumEquals(nextRound, roundNumber.toNumber())
- bigNumEquals(startedBy, await personas.Neil.getAddress())
- })
- })
-
- describe('when a round is passed in higher than expected', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Neil).submit(nextRound + 1, answer),
- 'invalid round to report',
- )
- })
- })
-
- describe('when called by a non-oracle', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Carol).submit(nextRound, answer),
- 'not enabled oracle',
- )
- })
- })
-
- describe('when there are not sufficient available funds', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Carol)
- .withdrawFunds(
- await personas.Carol.getAddress(),
- deposit.sub(paymentAmount.mul(oracles.length).mul(reserveRounds)),
- )
-
- // drain remaining funds
- await advanceRound(aggregator, oracles)
- await advanceRound(aggregator, oracles)
- })
-
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Neil).submit(nextRound, answer),
- 'SafeMath: subtraction overflow',
- )
- })
- })
-
- describe('when a new round opens before the previous rounds closes', () => {
- beforeEach(async () => {
- oracles = [personas.Nancy, personas.Norbert]
- await addOracles(aggregator, oracles, 3, 4, rrDelay)
- await advanceRound(aggregator, [
- personas.Nelly,
- personas.Neil,
- personas.Nancy,
- ])
-
- // start the next round
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- })
-
- it('still allows the previous round to be answered', async () => {
- await aggregator.connect(personas.Ned).submit(nextRound - 1, answer)
- })
-
- describe('once the current round is answered', () => {
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Nancy]
- for (let i = 0; i < oracles.length; i++) {
- await aggregator.connect(oracles[i]).submit(nextRound, answer)
- }
- })
-
- it('does not allow reports for the previous round', async () => {
- await evmRevert(
- aggregator.connect(personas.Ned).submit(nextRound - 1, answer),
- 'invalid round to report',
- )
- })
- })
-
- describe('when the previous round has finished', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Norbert)
- .submit(nextRound - 1, answer)
- })
-
- it('does not allow reports for the previous round', async () => {
- await evmRevert(
- aggregator.connect(personas.Ned).submit(nextRound - 1, answer),
- 'round not accepting submissions',
- )
- })
- })
- })
-
- describe('when price is updated mid-round', () => {
- const newAmount = toWei('50')
-
- it('pays the same amount to all oracles per round', async () => {
- await link.transfer(
- aggregator.address,
- newAmount.mul(oracles.length).mul(reserveRounds),
- )
- await aggregator.updateAvailableFunds()
-
- bigNumEquals(
- 0,
- await aggregator
- .connect(personas.Neil)
- .withdrawablePayment(await personas.Neil.getAddress()),
- )
- bigNumEquals(
- 0,
- await aggregator
- .connect(personas.Nelly)
- .withdrawablePayment(await personas.Nelly.getAddress()),
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- await updateFutureRounds(aggregator, { payment: newAmount })
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- bigNumEquals(
- paymentAmount,
- await aggregator
- .connect(personas.Neil)
- .withdrawablePayment(await personas.Neil.getAddress()),
- )
- bigNumEquals(
- paymentAmount,
- await aggregator
- .connect(personas.Nelly)
- .withdrawablePayment(await personas.Nelly.getAddress()),
- )
- })
- })
-
- describe('when delay is on', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, {
- minAnswers: oracles.length,
- maxAnswers: oracles.length,
- restartDelay: 1,
- })
- })
-
- it("does not revert on the oracle's first round", async () => {
- // Since lastUpdatedRound defaults to zero and that's the only
- // indication that an oracle hasn't responded, this test guards against
- // the situation where we don't check that and no one can start a round.
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('does revert before the delay', async () => {
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- nextRound++
-
- await evmRevert(
- aggregator.connect(personas.Neil).submit(nextRound, answer),
- 'previous round not supersedable',
- )
- })
- })
-
- describe('when an oracle starts a round before the restart delay is over', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator.connect(personas.Carol), {
- minAnswers: 1,
- maxAnswers: 1,
- })
-
- oracles = [personas.Neil, personas.Ned, personas.Nelly]
- for (let i = 0; i < oracles.length; i++) {
- await aggregator.connect(oracles[i]).submit(nextRound, answer)
- nextRound++
- }
-
- const newDelay = 2
- // Since Ned and Nelly have answered recently, and we set the delay
- // to 2, only Nelly can answer as she is the only oracle that hasn't
- // started the last two rounds.
- await updateFutureRounds(aggregator, {
- maxAnswers: oracles.length,
- restartDelay: newDelay,
- })
- })
-
- describe('when called by an oracle who has not answered recently', () => {
- it('does not revert', async () => {
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
- })
-
- describe('when called by an oracle who answered recently', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Ned).submit(nextRound, answer),
- 'round not accepting submissions',
- )
-
- await evmRevert(
- aggregator.connect(personas.Nelly).submit(nextRound, answer),
- 'round not accepting submissions',
- )
- })
- })
- })
-
- describe('when the price is not updated for a round', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, {
- minAnswers: oracles.length,
- maxAnswers: oracles.length,
- restartDelay: 1,
- })
-
- for (const oracle of oracles) {
- await aggregator.connect(oracle).submit(nextRound, answer)
- }
- nextRound++
-
- await aggregator.connect(personas.Ned).submit(nextRound, answer)
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- await increaseTimeBy(timeout + 1, ethers.provider)
- nextRound++
- })
-
- it('allows a new round to be started', async () => {
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- })
-
- it('sets the info for the previous round', async () => {
- const previousRound = nextRound - 1
- let updated = await aggregator.getTimestamp(previousRound)
- let ans = await aggregator.getAnswer(previousRound)
- assert.equal(0, updated.toNumber())
- assert.equal(0, ans.toNumber())
-
- const tx = await aggregator
- .connect(personas.Nelly)
- .submit(nextRound, answer)
- const receipt = await tx.wait()
-
- const block = await ethers.provider.getBlock(receipt.blockHash ?? '')
-
- updated = await aggregator.getTimestamp(previousRound)
- ans = await aggregator.getAnswer(previousRound)
- bigNumEquals(BigNumber.from(block.timestamp), updated)
- assert.equal(answer, ans.toNumber())
-
- const round = await aggregator.getRoundData(previousRound)
- bigNumEquals(previousRound, round.roundId)
- bigNumEquals(ans, round.answer)
- bigNumEquals(updated, round.updatedAt)
- bigNumEquals(previousRound - 1, round.answeredInRound)
- })
-
- it('sets the previous round as timed out', async () => {
- const previousRound = nextRound - 1
- evmRevert(aggregator.getRoundData(previousRound), 'No data present')
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
-
- const round = await aggregator.getRoundData(previousRound)
- assert.notEqual(round.roundId, round.answeredInRound)
- bigNumEquals(previousRound - 1, round.answeredInRound)
- })
-
- it('still respects the delay restriction', async () => {
- // expected to revert because the sender started the last round
- await evmRevert(
- aggregator.connect(personas.Ned).submit(nextRound, answer),
- )
- })
-
- it('uses the timeout set at the beginning of the round', async () => {
- await updateFutureRounds(aggregator, {
- timeout: timeout + 100000,
- })
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- })
- })
-
- describe('submitting values near the edges of allowed values', () => {
- it('rejects values below the submission value range', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .submit(nextRound, minSubmissionValue.sub(1)),
- 'value below minSubmissionValue',
- )
- })
-
- it('accepts submissions equal to the min submission value', async () => {
- await aggregator
- .connect(personas.Neil)
- .submit(nextRound, minSubmissionValue)
- })
-
- it('accepts submissions equal to the max submission value', async () => {
- await aggregator
- .connect(personas.Neil)
- .submit(nextRound, maxSubmissionValue)
- })
-
- it('rejects submissions equal to the max submission value', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .submit(nextRound, maxSubmissionValue.add(1)),
- 'value above maxSubmissionValue',
- )
- })
- })
-
- describe('when a validator is set', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, { minAnswers: 1, maxAnswers: 1 })
- oracles = [personas.Nelly]
-
- validator = await validatorMockFactory.connect(personas.Carol).deploy()
- await aggregator.connect(personas.Carol).setValidator(validator.address)
- })
-
- it('calls out to the validator', async () => {
- await expect(
- aggregator.connect(personas.Nelly).submit(nextRound, answer),
- )
- .to.emit(validator, 'Validated')
- .withArgs(0, 0, nextRound, answer)
- })
- })
-
- describe('when the answer validator eats all gas', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, { minAnswers: 1, maxAnswers: 1 })
- oracles = [personas.Nelly]
-
- gasGuzzler = await gasGuzzlerFactory.connect(personas.Carol).deploy()
- await aggregator
- .connect(personas.Carol)
- .setValidator(gasGuzzler.address)
- assert.equal(gasGuzzler.address, await aggregator.validator())
- })
-
- it('still updates', async () => {
- bigNumEquals(0, await aggregator.latestAnswer())
-
- await aggregator
- .connect(personas.Nelly)
- .submit(nextRound, answer, { gasLimit: 500000 })
-
- bigNumEquals(answer, await aggregator.latestAnswer())
- })
- })
- })
-
- describe('#getAnswer', () => {
- const answers = [1, 10, 101, 1010, 10101, 101010, 1010101]
-
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
-
- for (const answer of answers) {
- await aggregator.connect(personas.Neil).submit(nextRound++, answer)
- }
- })
-
- it('retrieves the answer recorded for past rounds', async () => {
- for (let i = nextRound; i < nextRound; i++) {
- const answer = await aggregator.getAnswer(i)
- bigNumEquals(BigNumber.from(answers[i - 1]), answer)
- }
- })
-
- it("returns 0 for answers greater than uint32's max", async () => {
- const overflowedId = BigNumber.from(2).pow(32).add(1)
- const answer = await aggregator.getAnswer(overflowedId)
- bigNumEquals(0, answer)
- })
- })
-
- describe('#getTimestamp', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
-
- for (let i = 0; i < 10; i++) {
- await aggregator.connect(personas.Neil).submit(nextRound++, i + 1)
- }
- })
-
- it('retrieves the answer recorded for past rounds', async () => {
- let lastTimestamp = ethers.constants.Zero
-
- for (let i = 1; i < nextRound; i++) {
- const currentTimestamp = await aggregator.getTimestamp(i)
- assert.isAtLeast(currentTimestamp.toNumber(), lastTimestamp.toNumber())
- lastTimestamp = currentTimestamp
- }
- })
-
- it("returns 0 for answers greater than uint32's max", async () => {
- const overflowedId = BigNumber.from(2).pow(32).add(1)
- const answer = await aggregator.getTimestamp(overflowedId)
- bigNumEquals(0, answer)
- })
- })
-
- describe('#changeOracles', () => {
- describe('adding oracles', () => {
- it('increases the oracle count', async () => {
- const pastCount = await aggregator.oracleCount()
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
- const currentCount = await aggregator.oracleCount()
-
- bigNumEquals(currentCount, pastCount + 1)
- })
-
- it('adds the address in getOracles', async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
- assert.deepEqual(
- [await personas.Neil.getAddress()],
- await aggregator.getOracles(),
- )
- })
-
- it('updates the round details', async () => {
- await addOracles(
- aggregator,
- [personas.Neil, personas.Ned, personas.Nelly],
- 1,
- 3,
- 2,
- )
- bigNumEquals(1, await aggregator.minSubmissionCount())
- bigNumEquals(3, await aggregator.maxSubmissionCount())
- bigNumEquals(2, await aggregator.restartDelay())
- })
-
- it('emits a log', async () => {
- const tx = await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [],
- [await personas.Ned.getAddress()],
- [await personas.Neil.getAddress()],
- 1,
- 1,
- 0,
- )
- expect(tx)
- .to.emit(aggregator, 'OraclePermissionsUpdated')
- .withArgs(await personas.Ned.getAddress(), true)
-
- expect(tx)
- .to.emit(aggregator, 'OracleAdminUpdated')
- .withArgs(
- await personas.Ned.getAddress(),
- await personas.Neil.getAddress(),
- )
- })
-
- describe('when the oracle has already been added', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
- })
-
- it('reverts', async () => {
- await evmRevert(
- addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay),
- 'oracle already enabled',
- )
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .changeOracles(
- [],
- [await personas.Neil.getAddress()],
- [await personas.Neil.getAddress()],
- minAns,
- maxAns,
- rrDelay,
- ),
- 'Only callable by owner',
- )
- })
- })
-
- describe('when an oracle gets added mid-round', () => {
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned]
- await addOracles(
- aggregator,
- oracles,
- oracles.length,
- oracles.length,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- await addOracles(
- aggregator,
- [personas.Nelly],
- oracles.length + 1,
- oracles.length + 1,
- rrDelay,
- )
- })
-
- it('does not allow the oracle to update the round', async () => {
- await evmRevert(
- aggregator.connect(personas.Nelly).submit(nextRound, answer),
- 'not yet enabled oracle',
- )
- })
-
- it('does allow the oracle to update future rounds', async () => {
- // complete round
- await aggregator.connect(personas.Ned).submit(nextRound, answer)
-
- // now can participate in new rounds
- await aggregator.connect(personas.Nelly).submit(nextRound + 1, answer)
- })
- })
-
- describe('when an oracle is added after removed for a round', () => {
- it('allows the oracle to update', async () => {
- oracles = [personas.Neil, personas.Nelly]
- await addOracles(
- aggregator,
- oracles,
- oracles.length,
- oracles.length,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- nextRound++
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 1,
- 1,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound++
-
- await addOracles(aggregator, [personas.Nelly], 1, 1, rrDelay)
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- })
- })
-
- describe('when an oracle is added and immediately removed mid-round', () => {
- it('allows the oracle to update', async () => {
- await addOracles(
- aggregator,
- oracles,
- oracles.length,
- oracles.length,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- nextRound++
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 1,
- 1,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound++
-
- await addOracles(aggregator, [personas.Nelly], 1, 1, rrDelay)
-
- await aggregator.connect(personas.Nelly).submit(nextRound, answer)
- })
- })
-
- describe('when an oracle is re-added with a different admin address', () => {
- it('reverts', async () => {
- await addOracles(
- aggregator,
- oracles,
- oracles.length,
- oracles.length,
- rrDelay,
- )
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 1,
- 1,
- rrDelay,
- )
-
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .changeOracles(
- [],
- [await personas.Nelly.getAddress()],
- [await personas.Carol.getAddress()],
- 1,
- 1,
- rrDelay,
- ),
- 'owner cannot overwrite admin',
- )
- })
- })
-
- const limit = 77
- describe(`when adding more than ${limit} oracles`, () => {
- let oracles: Signer[]
-
- beforeEach(async () => {
- oracles = []
- for (let i = 0; i < limit; i++) {
- const account = await new ethers.Wallet(
- randomBytes(32),
- ethers.provider,
- )
- await personas.Default.sendTransaction({
- to: account.address,
- value: toWei('0.1'),
- })
-
- oracles.push(account)
- }
-
- await link.transfer(
- aggregator.address,
- paymentAmount.mul(limit).mul(reserveRounds),
- )
- await aggregator.updateAvailableFunds()
-
- let addresses = oracles.slice(0, 50).map(async (o) => o.getAddress())
- await aggregator
- .connect(personas.Carol)
- .changeOracles([], addresses, addresses, 1, 50, rrDelay)
- // add in two transactions to avoid gas limit issues
- addresses = oracles.slice(50, 100).map(async (o) => o.getAddress())
- await aggregator
- .connect(personas.Carol)
- .changeOracles([], addresses, addresses, 1, oracles.length, rrDelay)
- })
-
- it('not use too much gas [ @skip-coverage ]', async () => {
- let tx: any
- assert.deepEqual(
- // test adveserial quickselect algo
- [2, 4, 6, 8, 10, 12, 14, 16, 1, 9, 5, 11, 3, 13, 7, 15],
- adverserialQuickselectList(16),
- )
- const inputs = adverserialQuickselectList(limit)
- for (let i = 0; i < limit; i++) {
- tx = await aggregator
- .connect(oracles[i])
- .submit(nextRound, inputs[i])
- }
- assert.isTrue(!!tx)
- if (tx) {
- const receipt = await tx.wait()
- assert.isBelow(receipt.gasUsed.toNumber(), 600_000)
- }
- })
-
- function adverserialQuickselectList(len: number): number[] {
- const xs: number[] = []
- const pi: number[] = []
- for (let i = 0; i < len; i++) {
- pi[i] = i
- xs[i] = 0
- }
-
- for (let l = len; l > 0; l--) {
- const pivot = Math.floor((l - 1) / 2)
- xs[pi[pivot]] = l
- const temp = pi[l - 1]
- pi[l - 1] = pi[pivot]
- pi[pivot] = temp
- }
- return xs
- }
-
- it('reverts when another oracle is added', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .changeOracles(
- [],
- [await personas.Neil.getAddress()],
- [await personas.Neil.getAddress()],
- limit + 1,
- limit + 1,
- rrDelay,
- ),
- 'max oracles allowed',
- )
- })
- })
-
- it('reverts when minSubmissions is set to 0', async () => {
- await evmRevert(
- addOracles(aggregator, [personas.Neil], 0, 0, 0),
- 'min must be greater than 0',
- )
- })
- })
-
- describe('removing oracles', () => {
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Nelly]
- await addOracles(
- aggregator,
- oracles,
- oracles.length,
- oracles.length,
- rrDelay,
- )
- })
-
- it('decreases the oracle count', async () => {
- const pastCount = await aggregator.oracleCount()
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- const currentCount = await aggregator.oracleCount()
-
- expect(currentCount).to.equal(pastCount - 1)
- })
-
- it('updates the round details', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles([await personas.Neil.getAddress()], [], [], 1, 1, 0)
-
- bigNumEquals(1, await aggregator.minSubmissionCount())
- bigNumEquals(1, await aggregator.maxSubmissionCount())
- bigNumEquals(ethers.constants.Zero, await aggregator.restartDelay())
- })
-
- it('emits a log', async () => {
- await expect(
- aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- ),
- )
- .to.emit(aggregator, 'OraclePermissionsUpdated')
- .withArgs(await personas.Neil.getAddress(), false)
- })
-
- it('removes the address in getOracles', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- assert.deepEqual(
- [await personas.Nelly.getAddress()],
- await aggregator.getOracles(),
- )
- })
-
- describe('when the oracle is not currently added', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- })
-
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- ),
- 'oracle not enabled',
- )
- })
- })
-
- describe('when removing the last oracle', () => {
- it('does not revert', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles([await personas.Nelly.getAddress()], [], [], 0, 0, 0)
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Ned)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- 0,
- 0,
- rrDelay,
- ),
- 'Only callable by owner',
- )
- })
- })
-
- describe('when an oracle gets removed', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 1,
- 1,
- rrDelay,
- )
- })
-
- it('is allowed to report on one more round', async () => {
- // next round
- await advanceRound(aggregator, [personas.Nelly])
- // finish round
- await advanceRound(aggregator, [personas.Neil])
-
- // cannot participate in future rounds
- await evmRevert(
- aggregator.connect(personas.Nelly).submit(nextRound, answer),
- 'no longer allowed oracle',
- )
- })
- })
-
- describe('when an oracle gets removed mid-round', () => {
- beforeEach(async () => {
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 1,
- 1,
- rrDelay,
- )
- })
-
- it('is allowed to finish that round and one more round', async () => {
- await advanceRound(aggregator, [personas.Nelly]) // finish round
-
- await advanceRound(aggregator, [personas.Nelly]) // next round
-
- // cannot participate in future rounds
- await evmRevert(
- aggregator.connect(personas.Nelly).submit(nextRound, answer),
- 'no longer allowed oracle',
- )
- })
- })
-
- it('reverts when minSubmissions is set to 0', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- 0,
- 0,
- 0,
- ),
- 'min must be greater than 0',
- )
- })
- })
-
- describe('adding and removing oracles at once', () => {
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned]
- await addOracles(aggregator, oracles, 1, 1, rrDelay)
- })
-
- it('can swap out oracles', async () => {
- assert.include(
- await aggregator.getOracles(),
- await personas.Ned.getAddress(),
- )
- assert.notInclude(
- await aggregator.getOracles(),
- await personas.Nelly.getAddress(),
- )
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Ned.getAddress()],
- [await personas.Nelly.getAddress()],
- [await personas.Nelly.getAddress()],
- 1,
- 1,
- rrDelay,
- )
-
- assert.notInclude(
- await aggregator.getOracles(),
- await personas.Ned.getAddress(),
- )
- assert.include(
- await aggregator.getOracles(),
- await personas.Nelly.getAddress(),
- )
- })
-
- it('is possible to remove and add the same address', async () => {
- assert.include(
- await aggregator.getOracles(),
- await personas.Ned.getAddress(),
- )
-
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Ned.getAddress()],
- [await personas.Ned.getAddress()],
- [await personas.Ned.getAddress()],
- 1,
- 1,
- rrDelay,
- )
-
- assert.include(
- await aggregator.getOracles(),
- await personas.Ned.getAddress(),
- )
- })
- })
- })
-
- describe('#getOracles', () => {
- describe('after adding oracles', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
-
- assert.deepEqual(
- [await personas.Neil.getAddress()],
- await aggregator.getOracles(),
- )
- })
-
- it('returns the addresses of added oracles', async () => {
- await addOracles(aggregator, [personas.Ned], minAns, maxAns, rrDelay)
-
- assert.deepEqual(
- [await personas.Neil.getAddress(), await personas.Ned.getAddress()],
- await aggregator.getOracles(),
- )
-
- await addOracles(aggregator, [personas.Nelly], minAns, maxAns, rrDelay)
- assert.deepEqual(
- [
- await personas.Neil.getAddress(),
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- ],
- await aggregator.getOracles(),
- )
- })
- })
-
- describe('after removing oracles', () => {
- beforeEach(async () => {
- await addOracles(
- aggregator,
- [personas.Neil, personas.Ned, personas.Nelly],
- minAns,
- maxAns,
- rrDelay,
- )
-
- assert.deepEqual(
- [
- await personas.Neil.getAddress(),
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- ],
- await aggregator.getOracles(),
- )
- })
-
- it('reorders when removing from the beginning', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Neil.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- assert.deepEqual(
- [await personas.Nelly.getAddress(), await personas.Ned.getAddress()],
- await aggregator.getOracles(),
- )
- })
-
- it('reorders when removing from the middle', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Ned.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- assert.deepEqual(
- [await personas.Neil.getAddress(), await personas.Nelly.getAddress()],
- await aggregator.getOracles(),
- )
- })
-
- it('pops the last node off at the end', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [await personas.Nelly.getAddress()],
- [],
- [],
- minAns,
- maxAns,
- rrDelay,
- )
- assert.deepEqual(
- [await personas.Neil.getAddress(), await personas.Ned.getAddress()],
- await aggregator.getOracles(),
- )
- })
- })
- })
-
- describe('#withdrawFunds', () => {
- it('succeeds', async () => {
- await aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), deposit)
-
- bigNumEquals(0, await aggregator.availableFunds())
- bigNumEquals(
- deposit,
- await link.balanceOf(await personas.Carol.getAddress()),
- )
- })
-
- it('does not let withdrawals happen multiple times', async () => {
- await aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), deposit)
-
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), deposit),
- 'insufficient reserve funds',
- )
- })
-
- describe('with a number higher than the available LINK balance', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('fails', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), deposit),
- 'insufficient reserve funds',
- )
-
- bigNumEquals(
- deposit.sub(paymentAmount),
- await aggregator.availableFunds(),
- )
- })
- })
-
- describe('with oracles still present', () => {
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned, personas.Nelly]
- await addOracles(aggregator, oracles, 1, 1, rrDelay)
-
- bigNumEquals(deposit, await aggregator.availableFunds())
- })
-
- it('does not allow withdrawal with less than 2x rounds of payments', async () => {
- const oracleReserve = paymentAmount
- .mul(oracles.length)
- .mul(reserveRounds)
- const allowed = deposit.sub(oracleReserve)
-
- //one more than the allowed amount cannot be withdrawn
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), allowed.add(1)),
- 'insufficient reserve funds',
- )
-
- // the allowed amount can be withdrawn
- await aggregator
- .connect(personas.Carol)
- .withdrawFunds(await personas.Carol.getAddress(), allowed)
- })
- })
-
- describe('when called by a non-owner', () => {
- it('fails', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Eddy)
- .withdrawFunds(await personas.Carol.getAddress(), deposit),
- 'Only callable by owner',
- )
-
- bigNumEquals(deposit, await aggregator.availableFunds())
- })
- })
- })
-
- describe('#updateFutureRounds', () => {
- let minSubmissionCount, maxSubmissionCount
- const newPaymentAmount = toWei('2')
- const newMin = 1
- const newMax = 3
- const newDelay = 2
-
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned, personas.Nelly]
- minSubmissionCount = oracles.length
- maxSubmissionCount = oracles.length
- await addOracles(
- aggregator,
- oracles,
- minSubmissionCount,
- maxSubmissionCount,
- rrDelay,
- )
-
- bigNumEquals(paymentAmount, await aggregator.paymentAmount())
- assert.equal(minSubmissionCount, await aggregator.minSubmissionCount())
- assert.equal(maxSubmissionCount, await aggregator.maxSubmissionCount())
- })
-
- it('updates the min and max answer counts', async () => {
- await updateFutureRounds(aggregator, {
- payment: newPaymentAmount,
- minAnswers: newMin,
- maxAnswers: newMax,
- restartDelay: newDelay,
- })
-
- bigNumEquals(newPaymentAmount, await aggregator.paymentAmount())
- bigNumEquals(
- BigNumber.from(newMin),
- await aggregator.minSubmissionCount(),
- )
- bigNumEquals(
- BigNumber.from(newMax),
- await aggregator.maxSubmissionCount(),
- )
- bigNumEquals(BigNumber.from(newDelay), await aggregator.restartDelay())
- })
-
- it('emits a log announcing the new round details', async () => {
- await expect(
- updateFutureRounds(aggregator, {
- payment: newPaymentAmount,
- minAnswers: newMin,
- maxAnswers: newMax,
- restartDelay: newDelay,
- timeout: timeout + 1,
- }),
- )
- .to.emit(aggregator, 'RoundDetailsUpdated')
- .withArgs(newPaymentAmount, newMin, newMax, newDelay, timeout + 1)
- })
-
- describe('when it is set to higher than the number or oracles', () => {
- it('reverts', async () => {
- await evmRevert(
- updateFutureRounds(aggregator, {
- maxAnswers: 4,
- }),
- 'max cannot exceed total',
- )
- })
- })
-
- describe('when it sets the min higher than the max', () => {
- it('reverts', async () => {
- await evmRevert(
- updateFutureRounds(aggregator, {
- minAnswers: 3,
- maxAnswers: 2,
- }),
- 'max must equal/exceed min',
- )
- })
- })
-
- describe('when delay equal or greater the oracle count', () => {
- it('reverts', async () => {
- await evmRevert(
- updateFutureRounds(aggregator, {
- restartDelay: 3,
- }),
- 'delay cannot exceed total',
- )
- })
- })
-
- describe('when the payment amount does not cover reserve rounds', () => {
- beforeEach(async () => {})
-
- it('reverts', async () => {
- const most = deposit.div(oracles.length * reserveRounds)
-
- // Relaxed check for the revert message due to a bug in ethers where any error message
- // that starts with insufficient funds will be incorrectly returned as 'insufficient funds for intrinsic transaction cost'
- await updateFutureRounds(aggregator, {
- payment: most.add(1),
- }).then(
- () => {
- // onFulfillment callback
- fail('expected to revert but did not')
- },
- (error: any) => {
- // onRejected callback
- const message =
- error instanceof Object && 'message' in error
- ? error.message
- : JSON.stringify(error)
- assert.isTrue(message.includes('insufficient funds'))
- },
- )
-
- await updateFutureRounds(aggregator, {
- payment: most,
- })
- })
- })
-
- describe('min oracles is set to 0', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator.updateFutureRounds(paymentAmount, 0, 0, rrDelay, timeout),
- 'min must be greater than 0',
- )
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () => {
- await evmRevert(
- updateFutureRounds(aggregator.connect(personas.Ned)),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#updateAvailableFunds', () => {
- it('checks the LINK token to see if any additional funds are available', async () => {
- const originalBalance = await aggregator.availableFunds()
-
- await aggregator.updateAvailableFunds()
-
- bigNumEquals(originalBalance, await aggregator.availableFunds())
-
- await link.transfer(aggregator.address, deposit)
- await aggregator.updateAvailableFunds()
-
- const newBalance = await aggregator.availableFunds()
- bigNumEquals(originalBalance.add(deposit), newBalance)
- })
-
- it('removes allocated funds from the available balance', async () => {
- const originalBalance = await aggregator.availableFunds()
-
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- await link.transfer(aggregator.address, deposit)
- await aggregator.updateAvailableFunds()
-
- const expected = originalBalance.add(deposit).sub(paymentAmount)
- const newBalance = await aggregator.availableFunds()
- bigNumEquals(expected, newBalance)
- })
-
- it('emits a log', async () => {
- await link.transfer(aggregator.address, deposit)
-
- const tx = await aggregator.updateAvailableFunds()
- const receipt = await tx.wait()
-
- const reportedBalance = BigNumber.from(receipt.logs?.[0].topics[1] ?? -1)
- bigNumEquals(await aggregator.availableFunds(), reportedBalance)
- })
-
- describe('when the available funds have not changed', () => {
- it('does not emit a log', async () => {
- const tx = await aggregator.updateAvailableFunds()
- const receipt = await tx.wait()
-
- assert.equal(0, receipt.logs?.length)
- })
- })
- })
-
- describe('#withdrawPayment', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], minAns, maxAns, rrDelay)
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
-
- it('transfers LINK to the recipient', async () => {
- const originalBalance = await link.balanceOf(aggregator.address)
- bigNumEquals(0, await link.balanceOf(await personas.Neil.getAddress()))
-
- await aggregator
- .connect(personas.Neil)
- .withdrawPayment(
- await personas.Neil.getAddress(),
- await personas.Neil.getAddress(),
- paymentAmount,
- )
-
- bigNumEquals(
- originalBalance.sub(paymentAmount),
- await link.balanceOf(aggregator.address),
- )
- bigNumEquals(
- paymentAmount,
- await link.balanceOf(await personas.Neil.getAddress()),
- )
- })
-
- it('decrements the allocated funds counter', async () => {
- const originalAllocation = await aggregator.allocatedFunds()
-
- await aggregator
- .connect(personas.Neil)
- .withdrawPayment(
- await personas.Neil.getAddress(),
- await personas.Neil.getAddress(),
- paymentAmount,
- )
-
- bigNumEquals(
- originalAllocation.sub(paymentAmount),
- await aggregator.allocatedFunds(),
- )
- })
-
- describe('when the caller withdraws more than they have', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .withdrawPayment(
- await personas.Neil.getAddress(),
- await personas.Neil.getAddress(),
- paymentAmount.add(BigNumber.from(1)),
- ),
- 'insufficient withdrawable funds',
- )
- })
- })
-
- describe('when the caller is not the admin', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Nelly)
- .withdrawPayment(
- await personas.Neil.getAddress(),
- await personas.Nelly.getAddress(),
- BigNumber.from(1),
- ),
- 'only callable by admin',
- )
- })
- })
- })
-
- describe('#transferAdmin', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [],
- [await personas.Ned.getAddress()],
- [await personas.Neil.getAddress()],
- minAns,
- maxAns,
- rrDelay,
- )
- })
-
- describe('when the admin tries to transfer the admin', () => {
- it('works', async () => {
- await expect(
- aggregator
- .connect(personas.Neil)
- .transferAdmin(
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- ),
- )
- .to.emit(aggregator, 'OracleAdminUpdateRequested')
- .withArgs(
- await personas.Ned.getAddress(),
- await personas.Neil.getAddress(),
- await personas.Nelly.getAddress(),
- )
- assert.equal(
- await personas.Neil.getAddress(),
- await aggregator.getAdmin(await personas.Ned.getAddress()),
- )
- })
- })
-
- describe('when the non-admin owner tries to update the admin', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Carol)
- .transferAdmin(
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- ),
- 'only callable by admin',
- )
- })
- })
-
- describe('when the non-admin oracle tries to update the admin', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Ned)
- .transferAdmin(
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- ),
- 'only callable by admin',
- )
- })
- })
- })
-
- describe('#acceptAdmin', () => {
- beforeEach(async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles(
- [],
- [await personas.Ned.getAddress()],
- [await personas.Neil.getAddress()],
- minAns,
- maxAns,
- rrDelay,
- )
- const tx = await aggregator
- .connect(personas.Neil)
- .transferAdmin(
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- )
- await tx.wait()
- })
-
- describe('when the new admin tries to accept', () => {
- it('works', async () => {
- await expect(
- aggregator
- .connect(personas.Nelly)
- .acceptAdmin(await personas.Ned.getAddress()),
- )
- .to.emit(aggregator, 'OracleAdminUpdated')
- .withArgs(
- await personas.Ned.getAddress(),
- await personas.Nelly.getAddress(),
- )
- assert.equal(
- await personas.Nelly.getAddress(),
- await aggregator.getAdmin(await personas.Ned.getAddress()),
- )
- })
- })
-
- describe('when someone other than the new admin tries to accept', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Ned)
- .acceptAdmin(await personas.Ned.getAddress()),
- 'only callable by pending admin',
- )
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .acceptAdmin(await personas.Ned.getAddress()),
- 'only callable by pending admin',
- )
- })
- })
- })
-
- describe('#onTokenTransfer', () => {
- it('updates the available balance', async () => {
- const originalBalance = await aggregator.availableFunds()
-
- await aggregator.updateAvailableFunds()
-
- bigNumEquals(originalBalance, await aggregator.availableFunds())
-
- await link.transferAndCall(aggregator.address, deposit, '0x')
-
- const newBalance = await aggregator.availableFunds()
- bigNumEquals(originalBalance.add(deposit), newBalance)
- })
-
- it('reverts given calldata', async () => {
- await evmRevert(
- // error message is not bubbled up by link token
- link.transferAndCall(aggregator.address, deposit, '0x12345678'),
- )
- })
- })
-
- describe('#requestNewRound', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], 1, 1, 0)
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound = nextRound + 1
-
- await aggregator.setRequesterPermissions(
- await personas.Carol.getAddress(),
- true,
- 0,
- )
- })
-
- it('announces a new round via log event', async () => {
- await expect(aggregator.requestNewRound()).to.emit(aggregator, 'NewRound')
- })
-
- it('returns the new round ID', async () => {
- testHelper = await testHelperFactory.connect(personas.Carol).deploy()
- await aggregator.setRequesterPermissions(testHelper.address, true, 0)
- let roundId = await testHelper.requestedRoundId()
- assert.equal(roundId.toNumber(), 0)
-
- await testHelper.requestNewRound(aggregator.address)
-
- // return value captured by test helper
- roundId = await testHelper.requestedRoundId()
- assert.isAbove(roundId.toNumber(), 0)
- })
-
- describe('when there is a round in progress', () => {
- beforeEach(async () => {
- await aggregator.requestNewRound()
- })
-
- it('reverts', async () => {
- await evmRevert(
- aggregator.requestNewRound(),
- 'prev round must be supersedable',
- )
- })
-
- describe('when that round has timed out', () => {
- beforeEach(async () => {
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- })
-
- it('starts a new round', async () => {
- await expect(aggregator.requestNewRound()).to.emit(
- aggregator,
- 'NewRound',
- )
- })
- })
- })
-
- describe('when there is a restart delay set', () => {
- beforeEach(async () => {
- await aggregator.setRequesterPermissions(
- await personas.Eddy.getAddress(),
- true,
- 1,
- )
- })
-
- it('reverts if a round is started before the delay', async () => {
- await aggregator.connect(personas.Eddy).requestNewRound()
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound = nextRound + 1
-
- // Eddy can't start because of the delay
- await evmRevert(
- aggregator.connect(personas.Eddy).requestNewRound(),
- 'must delay requests',
- )
- // Carol starts a new round instead
- await aggregator.connect(personas.Carol).requestNewRound()
-
- // round completes
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound = nextRound + 1
-
- // now Eddy can start again
- await aggregator.connect(personas.Eddy).requestNewRound()
- })
- })
-
- describe('when all oracles have been removed and then re-added', () => {
- it('does not get stuck', async () => {
- await aggregator
- .connect(personas.Carol)
- .changeOracles([await personas.Neil.getAddress()], [], [], 0, 0, 0)
-
- // advance a few rounds
- for (let i = 0; i < 7; i++) {
- await aggregator.requestNewRound()
- nextRound = nextRound + 1
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- }
-
- await addOracles(aggregator, [personas.Neil], 1, 1, 0)
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- })
- })
- })
-
- describe('#setRequesterPermissions', () => {
- beforeEach(async () => {
- await addOracles(aggregator, [personas.Neil], 1, 1, 0)
-
- await aggregator.connect(personas.Neil).submit(nextRound, answer)
- nextRound = nextRound + 1
- })
-
- describe('when called by the owner', () => {
- it('allows the specified address to start new rounds', async () => {
- await aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- true,
- 0,
- )
-
- await aggregator.connect(personas.Neil).requestNewRound()
- })
-
- it('emits a log announcing the update', async () => {
- await expect(
- aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- true,
- 0,
- ),
- )
- .to.emit(aggregator, 'RequesterPermissionsSet')
- .withArgs(await personas.Neil.getAddress(), true, 0)
- })
-
- describe('when the address is already authorized', () => {
- beforeEach(async () => {
- await aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- true,
- 0,
- )
- })
-
- it('does not emit a log for already authorized accounts', async () => {
- const tx = await aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- true,
- 0,
- )
- const receipt = await tx.wait()
- assert.equal(0, receipt?.logs?.length)
- })
- })
-
- describe('when permission is removed by the owner', () => {
- beforeEach(async () => {
- await aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- true,
- 0,
- )
- })
-
- it('does not allow the specified address to start new rounds', async () => {
- await aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- false,
- 0,
- )
-
- await evmRevert(
- aggregator.connect(personas.Neil).requestNewRound(),
- 'not authorized requester',
- )
- })
-
- it('emits a log announcing the update', async () => {
- await expect(
- aggregator.setRequesterPermissions(
- await personas.Neil.getAddress(),
- false,
- 0,
- ),
- )
- .to.emit(aggregator, 'RequesterPermissionsSet')
- .withArgs(await personas.Neil.getAddress(), false, 0)
- })
-
- it('does not emit a log for accounts without authorization', async () => {
- const tx = await aggregator.setRequesterPermissions(
- await personas.Ned.getAddress(),
- false,
- 0,
- )
- const receipt = await tx.wait()
- assert.equal(0, receipt?.logs?.length)
- })
- })
- })
-
- describe('when called by a stranger', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator
- .connect(personas.Neil)
- .setRequesterPermissions(await personas.Neil.getAddress(), true, 0),
- 'Only callable by owner',
- )
-
- await evmRevert(
- aggregator.connect(personas.Neil).requestNewRound(),
- 'not authorized requester',
- )
- })
- })
- })
-
- describe('#oracleRoundState', () => {
- describe('when round ID 0 is passed in', () => {
- const previousSubmission = 42
- let baseFunds: any
- let minAnswers: number
- let maxAnswers: number
- let submitters: Signer[]
-
- beforeEach(async () => {
- oracles = [
- personas.Neil,
- personas.Ned,
- personas.Nelly,
- personas.Nancy,
- personas.Norbert,
- ]
- minAnswers = 3
- maxAnswers = 4
-
- await addOracles(aggregator, oracles, minAnswers, maxAnswers, rrDelay)
- submitters = [
- personas.Nelly,
- personas.Ned,
- personas.Neil,
- personas.Nancy,
- ]
- await advanceRound(aggregator, submitters, previousSubmission)
- baseFunds = BigNumber.from(deposit).sub(
- paymentAmount.mul(submitters.length),
- )
- startingState = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
- })
-
- it('returns all of the important round information', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 2,
- latestSubmission: previousSubmission,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds,
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- it('reverts if called by a contract', async () => {
- testHelper = await testHelperFactory.connect(personas.Carol).deploy()
- await evmRevert(
- testHelper.readOracleRoundState(
- aggregator.address,
- await personas.Neil.getAddress(),
- ),
- 'off-chain reading only',
- )
- })
-
- describe('when the restart delay is not enforced', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, {
- minAnswers,
- maxAnswers,
- restartDelay: 0,
- })
- })
-
- describe('< min submissions and oracle not included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [personas.Neil])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 2,
- latestSubmission: previousSubmission,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('< min submissions and oracle included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [personas.Nelly])
- })
-
- it('is not eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 2,
- latestSubmission: answer,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- describe('and timed out', () => {
- beforeEach(async () => {
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('>= min submissions and oracle not included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [
- personas.Neil,
- personas.Nancy,
- personas.Ned,
- ])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 2,
- latestSubmission: previousSubmission,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('>= min submissions and oracle included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [
- personas.Neil,
- personas.Nelly,
- personas.Ned,
- ])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- describe('and timed out', () => {
- beforeEach(async () => {
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('max submissions and oracle not included', () => {
- beforeEach(async () => {
- submitters = [
- personas.Neil,
- personas.Ned,
- personas.Nancy,
- personas.Norbert,
- ]
- assert.equal(
- submitters.length,
- maxAnswers,
- 'precondition, please update submitters if maxAnswers changes',
- )
- await advanceRound(aggregator, submitters)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 3,
- latestSubmission: previousSubmission,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(4)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('max submissions and oracle included', () => {
- beforeEach(async () => {
- submitters = [
- personas.Neil,
- personas.Ned,
- personas.Nelly,
- personas.Nancy,
- ]
- assert.equal(
- submitters.length,
- maxAnswers,
- 'precondition, please update submitters if maxAnswers changes',
- )
- await advanceRound(aggregator, submitters)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(4)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('when the restart delay is enforced', () => {
- beforeEach(async () => {
- await updateFutureRounds(aggregator, {
- minAnswers,
- maxAnswers,
- restartDelay: maxAnswers - 1,
- })
- })
-
- describe('< min submissions and oracle not included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [personas.Neil, personas.Ned])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 2,
- latestSubmission: previousSubmission,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount.mul(2)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('< min submissions and oracle included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [personas.Neil, personas.Nelly])
- })
-
- it('is not eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 2,
- latestSubmission: answer,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount.mul(2)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- describe('and timed out', () => {
- beforeEach(async () => {
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(2)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('>= min submissions and oracle not included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [
- personas.Neil,
- personas.Ned,
- personas.Nancy,
- ])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 2,
- latestSubmission: previousSubmission,
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('>= min submissions and oracle included', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, [
- personas.Neil,
- personas.Ned,
- personas.Nelly,
- ])
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- describe('and timed out', () => {
- beforeEach(async () => {
- await increaseTimeBy(timeout + 1, ethers.provider)
- await mineBlock(ethers.provider)
- })
-
- it('is eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false, // restart delay enforced
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(3)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('max submissions and oracle not included', () => {
- beforeEach(async () => {
- submitters = [
- personas.Neil,
- personas.Ned,
- personas.Nancy,
- personas.Norbert,
- ]
- assert.equal(
- submitters.length,
- maxAnswers,
- 'precondition, please update submitters if maxAnswers changes',
- )
- await advanceRound(aggregator, submitters, answer)
- })
-
- it('is not eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 3,
- latestSubmission: previousSubmission,
- startedAt: ShouldNotBeSet,
- timeout: 0, // details have been deleted
- availableFunds: baseFunds.sub(paymentAmount.mul(4)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('max submissions and oracle included', () => {
- beforeEach(async () => {
- submitters = [
- personas.Neil,
- personas.Ned,
- personas.Nelly,
- personas.Nancy,
- ]
- assert.equal(
- submitters.length,
- maxAnswers,
- 'precondition, please update submitters if maxAnswers changes',
- )
- await advanceRound(aggregator, submitters, answer)
- })
-
- it('is not eligible to submit', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 3,
- latestSubmission: answer,
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: baseFunds.sub(paymentAmount.mul(4)),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
- })
-
- describe('when non-zero round ID 0 is passed in', () => {
- const answers = [0, 42, 47, 52, 57]
- let currentFunds: any
-
- beforeEach(async () => {
- oracles = [personas.Neil, personas.Ned, personas.Nelly]
-
- await addOracles(aggregator, oracles, 2, 3, rrDelay)
- startingState = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 0,
- )
- await advanceRound(aggregator, oracles, answers[1])
- await advanceRound(
- aggregator,
- [personas.Neil, personas.Ned],
- answers[2],
- )
- await advanceRound(aggregator, oracles, answers[3])
- await advanceRound(aggregator, [personas.Neil], answers[4])
- const submissionsSoFar = 9
- currentFunds = BigNumber.from(deposit).sub(
- paymentAmount.mul(submissionsSoFar),
- )
- })
-
- it('returns info about previous rounds', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 1,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 1,
- latestSubmission: answers[3],
- startedAt: ShouldBeSet,
- timeout: 0,
- availableFunds: currentFunds,
- oracleCount: oracles.length,
- paymentAmount: 0,
- })
- })
-
- it('returns info about previous rounds that were not submitted to', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 2,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 2,
- latestSubmission: answers[3],
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: currentFunds,
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- describe('for the current round', () => {
- describe('which has not been submitted to', () => {
- it("returns info about the current round that hasn't been submitted to", async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 4,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 4,
- latestSubmission: answers[3],
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: currentFunds,
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- it('returns info about the subsequent round', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 5,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 5,
- latestSubmission: answers[3],
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: currentFunds,
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
-
- describe('which has been submitted to', () => {
- beforeEach(async () => {
- await aggregator.connect(personas.Nelly).submit(4, answers[4])
- })
-
- it("returns info about the current round that hasn't been submitted to", async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 4,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 4,
- latestSubmission: answers[4],
- startedAt: ShouldBeSet,
- timeout,
- availableFunds: currentFunds.sub(paymentAmount),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
-
- it('returns info about the subsequent round', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 5,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: true,
- roundId: 5,
- latestSubmission: answers[4],
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: currentFunds.sub(paymentAmount),
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- it('returns speculative info about future rounds', async () => {
- const state = await aggregator.oracleRoundState(
- await personas.Nelly.getAddress(),
- 6,
- )
-
- await checkOracleRoundState(state, {
- eligibleToSubmit: false,
- roundId: 6,
- latestSubmission: answers[3],
- startedAt: ShouldNotBeSet,
- timeout: 0,
- availableFunds: currentFunds,
- oracleCount: oracles.length,
- paymentAmount,
- })
- })
- })
- })
-
- describe('#getRoundData', () => {
- let latestRoundId: any
- beforeEach(async () => {
- oracles = [personas.Nelly]
- const minMax = oracles.length
- await addOracles(aggregator, oracles, minMax, minMax, rrDelay)
- await advanceRound(aggregator, oracles, answer)
- latestRoundId = await aggregator.latestRound()
- })
-
- it('returns the relevant round information', async () => {
- const round = await aggregator.getRoundData(latestRoundId)
- bigNumEquals(latestRoundId, round.roundId)
- bigNumEquals(answer, round.answer)
- const nowSeconds = new Date().valueOf() / 1000
- assert.isAbove(round.updatedAt.toNumber(), nowSeconds - 120)
- bigNumEquals(round.updatedAt, round.startedAt)
- bigNumEquals(latestRoundId, round.answeredInRound)
- })
-
- it('reverts if a round is not present', async () => {
- await evmRevert(
- aggregator.getRoundData(latestRoundId.add(1)),
- 'No data present',
- )
- })
-
- it('reverts if a round ID is too big', async () => {
- const overflowedId = BigNumber.from(2).pow(32).add(1)
-
- await evmRevert(aggregator.getRoundData(overflowedId), 'No data present')
- })
- })
-
- describe('#latestRoundData', () => {
- beforeEach(async () => {
- oracles = [personas.Nelly]
- const minMax = oracles.length
- await addOracles(aggregator, oracles, minMax, minMax, rrDelay)
- })
-
- describe('when an answer has already been received', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, oracles, answer)
- })
-
- it('returns the relevant round info without reverting', async () => {
- const round = await aggregator.latestRoundData()
- const latestRoundId = await aggregator.latestRound()
-
- bigNumEquals(latestRoundId, round.roundId)
- bigNumEquals(answer, round.answer)
- const nowSeconds = new Date().valueOf() / 1000
- assert.isAbove(round.updatedAt.toNumber(), nowSeconds - 120)
- bigNumEquals(round.updatedAt, round.startedAt)
- bigNumEquals(latestRoundId, round.answeredInRound)
- })
- })
-
- it('reverts if a round is not present', async () => {
- await evmRevert(aggregator.latestRoundData(), 'No data present')
- })
- })
-
- describe('#latestAnswer', () => {
- beforeEach(async () => {
- oracles = [personas.Nelly]
- const minMax = oracles.length
- await addOracles(aggregator, oracles, minMax, minMax, rrDelay)
- })
-
- describe('when an answer has already been received', () => {
- beforeEach(async () => {
- await advanceRound(aggregator, oracles, answer)
- })
-
- it('returns the latest answer without reverting', async () => {
- bigNumEquals(answer, await aggregator.latestAnswer())
- })
- })
-
- it('returns zero', async () => {
- bigNumEquals(0, await aggregator.latestAnswer())
- })
- })
-
- describe('#setValidator', () => {
- beforeEach(async () => {
- validator = await validatorMockFactory.connect(personas.Carol).deploy()
- assert.equal(emptyAddress, await aggregator.validator())
- })
-
- it('emits a log event showing the validator was changed', async () => {
- await expect(
- aggregator.connect(personas.Carol).setValidator(validator.address),
- )
- .to.emit(aggregator, 'ValidatorUpdated')
- .withArgs(emptyAddress, validator.address)
- assert.equal(validator.address, await aggregator.validator())
-
- await expect(
- aggregator.connect(personas.Carol).setValidator(validator.address),
- ).to.not.emit(aggregator, 'ValidatorUpdated')
- assert.equal(validator.address, await aggregator.validator())
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- aggregator.connect(personas.Neil).setValidator(validator.address),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('integrating with historic deviation checker', () => {
- let validator: Contract
- let flags: Contract
- let ac: Contract
- const flaggingThreshold = 1000 // 1%
-
- beforeEach(async () => {
- ac = await acFactory.connect(personas.Carol).deploy()
- flags = await flagsFactory.connect(personas.Carol).deploy(ac.address)
- validator = await validatorFactory
- .connect(personas.Carol)
- .deploy(flags.address, flaggingThreshold)
- await ac.connect(personas.Carol).addAccess(validator.address)
-
- await aggregator.connect(personas.Carol).setValidator(validator.address)
-
- oracles = [personas.Nelly]
- const minMax = oracles.length
- await addOracles(aggregator, oracles, minMax, minMax, rrDelay)
- })
-
- it('raises a flag on with high enough deviation', async () => {
- await aggregator.connect(personas.Nelly).submit(nextRound, 100)
- nextRound++
-
- await expect(aggregator.connect(personas.Nelly).submit(nextRound, 102))
- .to.emit(flags, 'FlagRaised')
- .withArgs(aggregator.address)
- })
-
- it('does not raise a flag with low enough deviation', async () => {
- await aggregator.connect(personas.Nelly).submit(nextRound, 100)
- nextRound++
-
- await expect(
- aggregator.connect(personas.Nelly).submit(nextRound, 101),
- ).to.not.emit(flags, 'FlagRaised')
- })
- })
-})
diff --git a/contracts/test/v0.6/Median.test.ts b/contracts/test/v0.6/Median.test.ts
deleted file mode 100644
index 8aea4722b6d..00000000000
--- a/contracts/test/v0.6/Median.test.ts
+++ /dev/null
@@ -1,237 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert } from 'chai'
-import { Signer, Contract, ContractFactory, BigNumber } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals } from '../test-helpers/matchers'
-
-let defaultAccount: Signer
-let medianTestHelperFactory: ContractFactory
-before(async () => {
- const personas: Personas = (await getUsers()).personas
- defaultAccount = personas.Default
- medianTestHelperFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/MedianTestHelper.sol:MedianTestHelper',
- defaultAccount,
- )
-})
-
-describe('Median', () => {
- let median: Contract
-
- beforeEach(async () => {
- median = await medianTestHelperFactory.connect(defaultAccount).deploy()
- })
-
- describe('testing various lists', () => {
- const tests = [
- {
- name: 'ordered ascending',
- responses: [0, 1, 2, 3, 4, 5, 6, 7],
- want: 3,
- },
- {
- name: 'ordered descending',
- responses: [7, 6, 5, 4, 3, 2, 1, 0],
- want: 3,
- },
- {
- name: 'unordered length 1',
- responses: [20],
- want: 20,
- },
- {
- name: 'unordered length 2',
- responses: [20, 0],
- want: 10,
- },
- {
- name: 'unordered length 3',
- responses: [20, 0, 16],
- want: 16,
- },
- {
- name: 'unordered length 4',
- responses: [20, 0, 15, 16],
- want: 15,
- },
- {
- name: 'unordered length 7',
- responses: [1001, 1, 101, 10, 11, 0, 111],
- want: 11,
- },
- {
- name: 'unordered length 9',
- responses: [8, 8, 4, 5, 5, 7, 9, 5, 9],
- want: 7,
- },
- {
- name: 'unordered long',
- responses: [33, 44, 89, 101, 67, 7, 23, 55, 88, 324, 0, 88],
- want: 61, // 67 + 55 / 2
- },
- {
- name: 'unordered longer',
- responses: [
- 333121, 323453, 337654, 345363, 345363, 333456, 335477, 333323,
- 332352, 354648, 983260, 333856, 335468, 376987, 333253, 388867,
- 337879, 333324, 338678,
- ],
- want: 335477,
- },
- {
- name: 'overflowing numbers',
- responses: [
- BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819967',
- ),
- BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819967',
- ),
- ],
- want: BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819967',
- ),
- },
- {
- name: 'overflowing numbers',
- responses: [
- BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819967',
- ),
- BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819966',
- ),
- ],
- want: BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819966',
- ),
- },
- {
- name: 'really long',
- responses: [
- 56, 2, 31, 33, 55, 38, 35, 12, 41, 47, 21, 22, 40, 39, 10, 32, 49, 3,
- 54, 45, 53, 14, 20, 59, 1, 30, 24, 6, 5, 37, 58, 51, 46, 17, 29, 7,
- 27, 9, 43, 8, 34, 42, 28, 23, 57, 0, 11, 48, 52, 50, 15, 16, 26, 25,
- 4, 36, 19, 44, 18, 13,
- ],
- want: 29,
- },
- ]
-
- for (const test of tests) {
- it(test.name, async () => {
- bigNumEquals(test.want, await median.publicGet(test.responses))
- })
- }
- })
-
- // long running (minutes) exhaustive test.
- // skipped because very slow, but useful for thorough validation
- xit('permutations', async () => {
- const permutations = (list: number[]) => {
- const result: number[][] = []
- const used: number[] = []
-
- const permute = (unused: number[]) => {
- if (unused.length == 0) {
- result.push([...used])
- return
- }
-
- for (let i = 0; i < unused.length; i++) {
- const elem = unused.splice(i, 1)[0]
- used.push(elem)
- permute(unused)
- unused.splice(i, 0, elem)
- used.pop()
- }
- }
-
- permute(list)
- return result
- }
-
- {
- const list = [0, 2, 5, 7, 8, 10]
- for (const permuted of permutations(list)) {
- for (let i = 0; i < list.length; i++) {
- for (let j = 0; j < list.length; j++) {
- if (i < j) {
- const foo = await median.publicQuickselectTwo(permuted, i, j)
- bigNumEquals(list[i], foo[0])
- bigNumEquals(list[j], foo[1])
- }
- }
- }
- }
- }
-
- {
- const list = [0, 1, 1, 1, 2]
- for (const permuted of permutations(list)) {
- for (let i = 0; i < list.length; i++) {
- for (let j = 0; j < list.length; j++) {
- if (i < j) {
- const foo = await median.publicQuickselectTwo(permuted, i, j)
- bigNumEquals(list[i], foo[0])
- bigNumEquals(list[j], foo[1])
- }
- }
- }
- }
- }
- })
-
- // Checks the validity of the sorting network in `shortList`
- describe('validate sorting network', () => {
- const net = [
- [0, 1],
- [2, 3],
- [4, 5],
- [0, 2],
- [1, 3],
- [4, 6],
- [1, 2],
- [5, 6],
- [0, 4],
- [1, 5],
- [2, 6],
- [1, 4],
- [3, 6],
- [2, 4],
- [3, 5],
- [3, 4],
- ]
-
- // See: https://en.wikipedia.org/wiki/Sorting_network#Zero-one_principle
- xit('zero-one principle', async () => {
- const sortWithNet = (list: number[]) => {
- for (const [i, j] of net) {
- if (list[i] > list[j]) {
- ;[list[i], list[j]] = [list[j], list[i]]
- }
- }
- }
-
- for (let n = 0; n < (1 << 7) - 1; n++) {
- const list = [
- (n >> 6) & 1,
- (n >> 5) & 1,
- (n >> 4) & 1,
- (n >> 3) & 1,
- (n >> 2) & 1,
- (n >> 1) & 1,
- (n >> 0) & 1,
- ]
- const sum = list.reduce((a, b) => a + b, 0)
- sortWithNet(list)
- const sortedSum = list.reduce((a, b) => a + b, 0)
- assert.equal(sortedSum, sum, 'Number of zeros and ones changed')
- list.reduce((switched, i) => {
- assert.isTrue(!switched || i != 0, 'error at n=' + n.toString())
- return i != 0
- }, false)
- }
- })
- })
-})
diff --git a/contracts/test/v0.6/Owned.test.ts b/contracts/test/v0.6/Owned.test.ts
deleted file mode 100644
index f522b9c44c9..00000000000
--- a/contracts/test/v0.6/Owned.test.ts
+++ /dev/null
@@ -1,84 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Signer, Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-
-let personas: Personas
-
-let owner: Signer
-let nonOwner: Signer
-let newOwner: Signer
-
-let ownedFactory: ContractFactory
-let owned: Contract
-
-before(async () => {
- personas = (await getUsers()).personas
- owner = personas.Carol
- nonOwner = personas.Neil
- newOwner = personas.Ned
- ownedFactory = await ethers.getContractFactory(
- 'src/v0.6/Owned.sol:Owned',
- owner,
- )
-})
-
-describe('Owned', () => {
- beforeEach(async () => {
- owned = await ownedFactory.connect(owner).deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', async () => {
- publicAbi(owned, ['acceptOwnership', 'owner', 'transferOwnership'])
- })
-
- describe('#constructor', () => {
- it('assigns ownership to the deployer', async () => {
- const [actual, expected] = await Promise.all([
- owner.getAddress(),
- owned.owner(),
- ])
-
- assert.equal(actual, expected)
- })
- })
-
- describe('#transferOwnership', () => {
- describe('when called by an owner', () => {
- it('emits a log', async () => {
- await expect(
- owned.connect(owner).transferOwnership(await newOwner.getAddress()),
- )
- .to.emit(owned, 'OwnershipTransferRequested')
- .withArgs(await owner.getAddress(), await newOwner.getAddress())
- })
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () =>
- await expect(
- owned.connect(nonOwner).transferOwnership(await newOwner.getAddress()),
- ).to.be.reverted)
- })
-
- describe('#acceptOwnership', () => {
- describe('after #transferOwnership has been called', () => {
- beforeEach(async () => {
- await owned
- .connect(owner)
- .transferOwnership(await newOwner.getAddress())
- })
-
- it('allows the recipient to call it', async () => {
- await expect(owned.connect(newOwner).acceptOwnership())
- .to.emit(owned, 'OwnershipTransferred')
- .withArgs(await owner.getAddress(), await newOwner.getAddress())
- })
-
- it('does not allow a non-recipient to call it', async () =>
- await expect(owned.connect(nonOwner).acceptOwnership()).to.be.reverted)
- })
- })
-})
diff --git a/contracts/test/v0.6/SignedSafeMath.test.ts b/contracts/test/v0.6/SignedSafeMath.test.ts
deleted file mode 100644
index e942f64d6b5..00000000000
--- a/contracts/test/v0.6/SignedSafeMath.test.ts
+++ /dev/null
@@ -1,187 +0,0 @@
-import { ethers } from 'hardhat'
-import { expect } from 'chai'
-import { Signer, Contract, ContractFactory, BigNumber } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals } from '../test-helpers/matchers'
-
-let defaultAccount: Signer
-let concreteSignedSafeMathFactory: ContractFactory
-
-before(async () => {
- const personas: Personas = (await getUsers()).personas
- defaultAccount = personas.Default
- concreteSignedSafeMathFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/ConcreteSignedSafeMath.sol:ConcreteSignedSafeMath',
- defaultAccount,
- )
-})
-
-describe('SignedSafeMath', () => {
- // a version of the adder contract where we make all ABI exposed functions constant
- // TODO: submit upstream PR to support constant contract type generation
- let adder: Contract
- let response: BigNumber
-
- const INT256_MAX = BigNumber.from(
- '57896044618658097711785492504343953926634992332820282019728792003956564819967',
- )
- const INT256_MIN = BigNumber.from(
- '-57896044618658097711785492504343953926634992332820282019728792003956564819968',
- )
-
- beforeEach(async () => {
- adder = await concreteSignedSafeMathFactory.connect(defaultAccount).deploy()
- })
-
- describe('#add', () => {
- describe('given a positive and a positive', () => {
- it('works', async () => {
- response = await adder.testAdd(1, 2)
- bigNumEquals(3, response)
- })
-
- it('works with zero', async () => {
- response = await adder.testAdd(INT256_MAX, 0)
- bigNumEquals(INT256_MAX, response)
- })
-
- describe('when both are large enough to overflow', () => {
- it('throws', async () => {
- await expect(adder.testAdd(INT256_MAX, 1)).to.be.revertedWith(
- 'SignedSafeMath: addition overflow',
- )
- })
- })
- })
-
- describe('given a negative and a negative', () => {
- it('works', async () => {
- response = await adder.testAdd(-1, -2)
- bigNumEquals(-3, response)
- })
-
- it('works with zero', async () => {
- response = await adder.testAdd(INT256_MIN, 0)
- bigNumEquals(INT256_MIN, response)
- })
-
- describe('when both are large enough to overflow', () => {
- it('throws', async () => {
- await expect(adder.testAdd(INT256_MIN, -1)).to.be.revertedWith(
- 'SignedSafeMath: addition overflow',
- )
- })
- })
- })
-
- describe('given a positive and a negative', () => {
- it('works', async () => {
- response = await adder.testAdd(1, -2)
- bigNumEquals(-1, response)
- })
- })
-
- describe('given a negative and a positive', () => {
- it('works', async () => {
- response = await adder.testAdd(-1, 2)
- bigNumEquals(1, response)
- })
- })
- })
-
- describe('#avg', () => {
- describe('given a positive and a positive', () => {
- it('works', async () => {
- response = await adder.testAvg(2, 4)
- bigNumEquals(3, response)
- })
-
- it('works with zero', async () => {
- response = await adder.testAvg(0, 4)
- bigNumEquals(2, response)
- response = await adder.testAvg(4, 0)
- bigNumEquals(2, response)
- })
-
- it('works with large numbers', async () => {
- response = await adder.testAvg(INT256_MAX, INT256_MAX)
- bigNumEquals(INT256_MAX, response)
- })
-
- it('rounds towards zero', async () => {
- response = await adder.testAvg(1, 2)
- bigNumEquals(1, response)
- })
- })
-
- describe('given a negative and a negative', () => {
- it('works', async () => {
- response = await adder.testAvg(-2, -4)
- bigNumEquals(-3, response)
- })
-
- it('works with zero', async () => {
- response = await adder.testAvg(0, -4)
- bigNumEquals(-2, response)
- response = await adder.testAvg(-4, 0)
- bigNumEquals(-2, response)
- })
-
- it('works with large numbers', async () => {
- response = await adder.testAvg(INT256_MIN, INT256_MIN)
- bigNumEquals(INT256_MIN, response)
- })
-
- it('rounds towards zero', async () => {
- response = await adder.testAvg(-1, -2)
- bigNumEquals(-1, response)
- })
- })
-
- describe('given a positive and a negative', () => {
- it('works', async () => {
- response = await adder.testAvg(2, -4)
- bigNumEquals(-1, response)
- response = await adder.testAvg(4, -2)
- bigNumEquals(1, response)
- })
-
- it('works with large numbers', async () => {
- response = await adder.testAvg(INT256_MAX, -2)
- bigNumEquals(INT256_MAX.sub(2).div(2), response)
- response = await adder.testAvg(INT256_MAX, INT256_MIN)
- bigNumEquals(0, response)
- })
-
- it('rounds towards zero', async () => {
- response = await adder.testAvg(1, -4)
- bigNumEquals(-1, response)
- response = await adder.testAvg(4, -1)
- bigNumEquals(1, response)
- })
- })
-
- describe('given a negative and a positive', () => {
- it('works', async () => {
- response = await adder.testAvg(-2, 4)
- bigNumEquals(1, response)
- response = await adder.testAvg(-4, 2)
- bigNumEquals(-1, response)
- })
-
- it('works with large numbers', async () => {
- response = await adder.testAvg(INT256_MIN, 2)
- bigNumEquals(INT256_MIN.add(2).div(2), response)
- response = await adder.testAvg(INT256_MIN, INT256_MAX)
- bigNumEquals(0, response)
- })
-
- it('rounds towards zero', async () => {
- response = await adder.testAvg(-1, 4)
- bigNumEquals(1, response)
- response = await adder.testAvg(-4, 1)
- bigNumEquals(-1, response)
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/SimpleReadAccessController.test.ts b/contracts/test/v0.6/SimpleReadAccessController.test.ts
deleted file mode 100644
index 7b76bc38cad..00000000000
--- a/contracts/test/v0.6/SimpleReadAccessController.test.ts
+++ /dev/null
@@ -1,250 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory, Transaction } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-
-let personas: Personas
-
-let controllerFactory: ContractFactory
-let controller: Contract
-
-before(async () => {
- personas = (await getUsers()).personas
- controllerFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleReadAccessController.sol:SimpleReadAccessController',
- personas.Carol,
- )
-})
-
-describe('SimpleReadAccessController', () => {
- beforeEach(async () => {
- controller = await controllerFactory.connect(personas.Carol).deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', async () => {
- publicAbi(controller, [
- 'hasAccess',
- 'addAccess',
- 'disableAccessCheck',
- 'enableAccessCheck',
- 'removeAccess',
- 'checkEnabled',
- // Owned
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('defaults checkEnabled to true', async () => {
- assert(await controller.checkEnabled())
- })
- })
-
- describe('#hasAccess', () => {
- it('allows unauthorized calls originating from the same account', async () => {
- assert.isTrue(
- await controller
- .connect(personas.Eddy)
- .hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('blocks unauthorized calls originating from different accounts', async () => {
- assert.isFalse(
- await controller
- .connect(personas.Carol)
- .hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- assert.isFalse(
- await controller
- .connect(personas.Eddy)
- .hasAccess(await personas.Carol.getAddress(), '0x00'),
- )
- })
- })
-
- describe('#addAccess', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller
- .connect(personas.Eddy)
- .addAccess(await personas.Eddy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- tx = await controller.addAccess(await personas.Eddy.getAddress())
- })
-
- it('adds the address to the controller', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- await expect(tx)
- .to.emit(controller, 'AddedAccess')
- .withArgs(await personas.Eddy.getAddress())
- })
-
- describe('when called twice', () => {
- it('does not emit a log', async () => {
- const tx2 = await controller.addAccess(
- await personas.Eddy.getAddress(),
- )
- const receipt = await tx2.wait()
- assert.equal(receipt.events?.length, 0)
- })
- })
- })
- })
-
- describe('#removeAccess', () => {
- beforeEach(async () => {
- await controller.addAccess(await personas.Eddy.getAddress())
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller
- .connect(personas.Eddy)
- .removeAccess(await personas.Eddy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- tx = await controller.removeAccess(await personas.Eddy.getAddress())
- })
-
- it('removes the address from the controller', async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- await expect(tx)
- .to.emit(controller, 'RemovedAccess')
- .withArgs(await personas.Eddy.getAddress())
- })
-
- describe('when called twice', () => {
- it('does not emit a log', async () => {
- const tx2 = await controller.removeAccess(
- await personas.Eddy.getAddress(),
- )
- const receipt = await tx2.wait()
- assert.equal(receipt.events?.length, 0)
- })
- })
- })
- })
-
- describe('#disableAccessCheck', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller.connect(personas.Eddy).disableAccessCheck(),
- ).to.be.revertedWith('Only callable by owner')
- assert.isTrue(await controller.checkEnabled())
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- await controller.addAccess(await personas.Eddy.getAddress())
- tx = await controller.disableAccessCheck()
- })
-
- it('sets checkEnabled to false', async () => {
- assert.isFalse(await controller.checkEnabled())
- })
-
- it('allows users with access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('allows users without access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Ned.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- await expect(tx).to.emit(controller, 'CheckAccessDisabled')
- })
-
- describe('when called twice', () => {
- it('does not emit a log', async () => {
- const tx2 = await controller.disableAccessCheck()
- const receipt = await tx2.wait()
- assert.equal(receipt.events?.length, 0)
- })
- })
- })
- })
-
- describe('#enableAccessCheck', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller.connect(personas.Eddy).enableAccessCheck(),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- await controller.disableAccessCheck()
- await controller.addAccess(await personas.Eddy.getAddress())
- tx = await controller.enableAccessCheck()
- })
-
- it('allows users with access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('does not allow users without access', async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Ned.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- expect(tx).to.emit(controller, 'CheckAccessEnabled')
- })
-
- describe('when called twice', () => {
- it('does not emit a log', async () => {
- const tx2 = await controller.enableAccessCheck()
- const receipt = await tx2.wait()
- assert.equal(receipt.events?.length, 0)
- })
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/SimpleWriteAccessController.test.ts b/contracts/test/v0.6/SimpleWriteAccessController.test.ts
deleted file mode 100644
index ae6c1691f91..00000000000
--- a/contracts/test/v0.6/SimpleWriteAccessController.test.ts
+++ /dev/null
@@ -1,214 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory, Transaction } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-
-let personas: Personas
-
-let controllerFactory: ContractFactory
-let controller: Contract
-
-before(async () => {
- personas = (await getUsers()).personas
- controllerFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- personas.Carol,
- )
-})
-
-describe('SimpleWriteAccessController', () => {
- beforeEach(async () => {
- controller = await controllerFactory.connect(personas.Carol).deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', async () => {
- publicAbi(controller, [
- 'hasAccess',
- 'addAccess',
- 'disableAccessCheck',
- 'enableAccessCheck',
- 'removeAccess',
- 'checkEnabled',
- // Owned
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('defaults checkEnabled to true', async () => {
- assert(await controller.checkEnabled())
- })
- })
-
- describe('#hasAccess', () => {
- it('allows unauthorized calls originating from the same account', async () => {
- assert.isFalse(
- await controller
- .connect(personas.Eddy)
- .hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('blocks unauthorized calls originating from different accounts', async () => {
- assert.isFalse(
- await controller
- .connect(personas.Carol)
- .hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- assert.isFalse(
- await controller
- .connect(personas.Eddy)
- .hasAccess(await personas.Carol.getAddress(), '0x00'),
- )
- })
- })
-
- describe('#addAccess', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller
- .connect(personas.Eddy)
- .addAccess(await personas.Eddy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- tx = await controller.addAccess(await personas.Eddy.getAddress())
- })
-
- it('adds the address to the controller', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- expect(tx)
- .to.emit(controller, 'AddedAccess')
- .withArgs(await personas.Eddy.getAddress())
- })
- })
- })
-
- describe('#removeAccess', () => {
- beforeEach(async () => {
- await controller.addAccess(await personas.Eddy.getAddress())
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller
- .connect(personas.Eddy)
- .removeAccess(await personas.Eddy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- tx = await controller.removeAccess(await personas.Eddy.getAddress())
- })
-
- it('removes the address from the controller', async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- expect(tx)
- .to.emit(controller, 'RemovedAccess')
- .withArgs(await personas.Eddy.getAddress())
- })
- })
- })
-
- describe('#disableAccessCheck', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller.connect(personas.Eddy).disableAccessCheck(),
- ).to.be.revertedWith('Only callable by owner')
- assert.isTrue(await controller.checkEnabled())
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- await controller.addAccess(await personas.Eddy.getAddress())
- tx = await controller.disableAccessCheck()
- })
-
- it('sets checkEnabled to false', async () => {
- assert.isFalse(await controller.checkEnabled())
- })
-
- it('allows users with access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('allows users without access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Ned.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- await expect(tx).to.emit(controller, 'CheckAccessDisabled')
- })
- })
- })
-
- describe('#enableAccessCheck', () => {
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await expect(
- controller.connect(personas.Eddy).enableAccessCheck(),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('when called by the owner', () => {
- let tx: Transaction
- beforeEach(async () => {
- await controller.disableAccessCheck()
- await controller.addAccess(await personas.Eddy.getAddress())
- tx = await controller.enableAccessCheck()
- })
-
- it('allows users with access', async () => {
- assert.isTrue(
- await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'),
- )
- })
-
- it('does not allow users without access', async () => {
- assert.isFalse(
- await controller.hasAccess(await personas.Ned.getAddress(), '0x00'),
- )
- })
-
- it('announces the change via a log', async () => {
- await expect(tx).to.emit(controller, 'CheckAccessEnabled')
- })
- })
- })
-})
diff --git a/contracts/test/v0.6/VRFD20.test.ts b/contracts/test/v0.6/VRFD20.test.ts
deleted file mode 100644
index 77141be6230..00000000000
--- a/contracts/test/v0.6/VRFD20.test.ts
+++ /dev/null
@@ -1,303 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import {
- BigNumber,
- constants,
- Contract,
- ContractFactory,
- ContractTransaction,
-} from 'ethers'
-import { getUsers, Personas, Roles } from '../test-helpers/setup'
-import {
- evmWordToAddress,
- getLog,
- publicAbi,
- toBytes32String,
- toWei,
- numToBytes32,
- getLogs,
-} from '../test-helpers/helpers'
-
-let roles: Roles
-let personas: Personas
-let linkTokenFactory: ContractFactory
-let vrfCoordinatorMockFactory: ContractFactory
-let vrfD20Factory: ContractFactory
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- personas = users.personas
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
- vrfCoordinatorMockFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/VRFCoordinatorMock.sol:VRFCoordinatorMock',
- roles.defaultAccount,
- )
- vrfD20Factory = await ethers.getContractFactory(
- 'src/v0.6/examples/VRFD20.sol:VRFD20',
- roles.defaultAccount,
- )
-})
-
-describe('VRFD20', () => {
- const deposit = toWei('1')
- const fee = toWei('0.1')
- const keyHash = toBytes32String('keyHash')
-
- let link: Contract
- let vrfCoordinator: Contract
- let vrfD20: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- vrfCoordinator = await vrfCoordinatorMockFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
- vrfD20 = await vrfD20Factory
- .connect(roles.defaultAccount)
- .deploy(vrfCoordinator.address, link.address, keyHash, fee)
- await link.transfer(vrfD20.address, deposit)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(vrfD20, [
- // Owned
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- //VRFConsumerBase
- 'rawFulfillRandomness',
- // VRFD20
- 'rollDice',
- 'house',
- 'withdrawLINK',
- 'keyHash',
- 'fee',
- 'setKeyHash',
- 'setFee',
- ])
- })
-
- describe('#withdrawLINK', () => {
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20
- .connect(roles.stranger)
- .withdrawLINK(await roles.stranger.getAddress(), deposit),
- ).to.be.revertedWith('Only callable by owner')
- })
-
- it('reverts when not enough LINK in the contract', async () => {
- const withdrawAmount = deposit.mul(2)
- await expect(
- vrfD20
- .connect(roles.defaultAccount)
- .withdrawLINK(
- await roles.defaultAccount.getAddress(),
- withdrawAmount,
- ),
- ).to.be.reverted
- })
- })
-
- describe('success', () => {
- it('withdraws LINK', async () => {
- const startingAmount = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
- const expectedAmount = BigNumber.from(startingAmount).add(deposit)
- await vrfD20
- .connect(roles.defaultAccount)
- .withdrawLINK(await roles.defaultAccount.getAddress(), deposit)
- const actualAmount = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
- assert.equal(actualAmount.toString(), expectedAmount.toString())
- })
- })
- })
-
- describe('#setKeyHash', () => {
- const newHash = toBytes32String('newhash')
-
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20.connect(roles.stranger).setKeyHash(newHash),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('success', () => {
- it('sets the key hash', async () => {
- await vrfD20.setKeyHash(newHash)
- const actualHash = await vrfD20.keyHash()
- assert.equal(actualHash, newHash)
- })
- })
- })
-
- describe('#setFee', () => {
- const newFee = 1234
-
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20.connect(roles.stranger).setFee(newFee),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('success', () => {
- it('sets the fee', async () => {
- await vrfD20.setFee(newFee)
- const actualFee = await vrfD20.fee()
- assert.equal(actualFee.toString(), newFee.toString())
- })
- })
- })
-
- describe('#house', () => {
- describe('failure', () => {
- it('reverts when dice not rolled', async () => {
- await expect(
- vrfD20.house(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Dice not rolled')
- })
-
- it('reverts when dice roll is in progress', async () => {
- await vrfD20.rollDice(await personas.Nancy.getAddress())
- await expect(
- vrfD20.house(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Roll in progress')
- })
- })
-
- describe('success', () => {
- it('returns the correct house', async () => {
- const randomness = 98765
- const expectedHouse = 'Martell'
- const tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- const log = await getLog(tx, 3)
- const eventRequestId = log?.topics?.[1]
- await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- const response = await vrfD20.house(await personas.Nancy.getAddress())
- assert.equal(response.toString(), expectedHouse)
- })
- })
- })
-
- describe('#rollDice', () => {
- describe('success', () => {
- let tx: ContractTransaction
- beforeEach(async () => {
- tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- })
-
- it('emits a RandomnessRequest event from the VRFCoordinator', async () => {
- const log = await getLog(tx, 2)
- const topics = log?.topics
- assert.equal(evmWordToAddress(topics?.[1]), vrfD20.address)
- assert.equal(topics?.[2], keyHash)
- assert.equal(topics?.[3], constants.HashZero)
- })
- })
-
- describe('failure', () => {
- it('reverts when LINK balance is zero', async () => {
- const vrfD202 = await vrfD20Factory
- .connect(roles.defaultAccount)
- .deploy(vrfCoordinator.address, link.address, keyHash, fee)
- await expect(
- vrfD202.rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Not enough LINK to pay fee')
- })
-
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20
- .connect(roles.stranger)
- .rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
-
- it('reverts when the roller rolls more than once', async () => {
- await vrfD20.rollDice(await personas.Nancy.getAddress())
- await expect(
- vrfD20.rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Already rolled')
- })
- })
- })
-
- describe('#fulfillRandomness', () => {
- const randomness = 98765
- const expectedModResult = (randomness % 20) + 1
- const expectedHouse = 'Martell'
- let eventRequestId: string
- beforeEach(async () => {
- const tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- const log = await getLog(tx, 3)
- eventRequestId = log?.topics?.[1]
- })
-
- describe('success', () => {
- let tx: ContractTransaction
- beforeEach(async () => {
- tx = await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- })
-
- it('emits a DiceLanded event', async () => {
- const log = await getLog(tx, 0)
- assert.equal(log?.topics[1], eventRequestId)
- assert.equal(log?.topics[2], numToBytes32(expectedModResult))
- })
-
- it('sets the correct dice roll result', async () => {
- const response = await vrfD20.house(await personas.Nancy.getAddress())
- assert.equal(response.toString(), expectedHouse)
- })
-
- it('allows someone else to roll', async () => {
- const secondRandomness = 55555
- tx = await vrfD20.rollDice(await personas.Ned.getAddress())
- const log = await getLog(tx, 3)
- eventRequestId = log?.topics?.[1]
- tx = await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- secondRandomness,
- vrfD20.address,
- )
- })
- })
-
- describe('failure', () => {
- it('does not fulfill when fulfilled by the wrong VRFcoordinator', async () => {
- const vrfCoordinator2 = await vrfCoordinatorMockFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
-
- const tx = await vrfCoordinator2.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/AggregatorProxy.test.ts b/contracts/test/v0.7/AggregatorProxy.test.ts
deleted file mode 100644
index 6e8ee41983d..00000000000
--- a/contracts/test/v0.7/AggregatorProxy.test.ts
+++ /dev/null
@@ -1,743 +0,0 @@
-import { ethers } from 'hardhat'
-import {
- increaseTimeBy,
- numToBytes32,
- publicAbi,
- toWei,
-} from '../test-helpers/helpers'
-import { assert } from 'chai'
-import { BigNumber, constants, Contract, ContractFactory, Signer } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { bigNumEquals, evmRevert } from '../test-helpers/matchers'
-
-let personas: Personas
-let defaultAccount: Signer
-
-let linkTokenFactory: ContractFactory
-let aggregatorFactory: ContractFactory
-let historicAggregatorFactory: ContractFactory
-let aggregatorFacadeFactory: ContractFactory
-let aggregatorProxyFactory: ContractFactory
-let fluxAggregatorFactory: ContractFactory
-let reverterFactory: ContractFactory
-
-before(async () => {
- const users = await getUsers()
-
- personas = users.personas
- defaultAccount = users.roles.defaultAccount
-
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- defaultAccount,
- )
- aggregatorFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator',
- defaultAccount,
- )
- historicAggregatorFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockV2Aggregator.sol:MockV2Aggregator',
- defaultAccount,
- )
- aggregatorFacadeFactory = await ethers.getContractFactory(
- 'src/v0.6/AggregatorFacade.sol:AggregatorFacade',
- defaultAccount,
- )
- historicAggregatorFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockV2Aggregator.sol:MockV2Aggregator',
- defaultAccount,
- )
- aggregatorFacadeFactory = await ethers.getContractFactory(
- 'src/v0.6/AggregatorFacade.sol:AggregatorFacade',
- defaultAccount,
- )
- aggregatorProxyFactory = await ethers.getContractFactory(
- 'src/v0.7/dev/AggregatorProxy.sol:AggregatorProxy',
- defaultAccount,
- )
- fluxAggregatorFactory = await ethers.getContractFactory(
- 'src/v0.6/FluxAggregator.sol:FluxAggregator',
- defaultAccount,
- )
- reverterFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/Reverter.sol:Reverter',
- defaultAccount,
- )
-})
-
-describe('AggregatorProxy', () => {
- const deposit = toWei('100')
- const response = numToBytes32(54321)
- const response2 = numToBytes32(67890)
- const decimals = 18
- const phaseBase = BigNumber.from(2).pow(64)
-
- let link: Contract
- let aggregator: Contract
- let aggregator2: Contract
- let historicAggregator: Contract
- let proxy: Contract
- let flux: Contract
- let reverter: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(defaultAccount).deploy()
- aggregator = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response)
- await link.transfer(aggregator.address, deposit)
- proxy = await aggregatorProxyFactory
- .connect(defaultAccount)
- .deploy(aggregator.address)
- const emptyAddress = constants.AddressZero
- flux = await fluxAggregatorFactory
- .connect(personas.Carol)
- .deploy(link.address, 0, 0, emptyAddress, 0, 0, 18, 'TEST / LINK')
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(proxy, [
- 'aggregator',
- 'confirmAggregator',
- 'decimals',
- 'description',
- 'getAnswer',
- 'getRoundData',
- 'getTimestamp',
- 'latestAnswer',
- 'latestRound',
- 'latestRoundData',
- 'latestTimestamp',
- 'phaseAggregators',
- 'phaseId',
- 'proposeAggregator',
- 'proposedAggregator',
- 'proposedGetRoundData',
- 'proposedLatestRoundData',
- 'version',
- // Ownable methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('constructor', () => {
- it('sets the proxy phase and aggregator', async () => {
- bigNumEquals(1, await proxy.phaseId())
- assert.equal(aggregator.address, await proxy.phaseAggregators(1))
- })
- })
-
- describe('#latestRound', () => {
- it('pulls the rate from the aggregator', async () => {
- bigNumEquals(phaseBase.add(1), await proxy.latestRound())
- })
- })
-
- describe('#latestAnswer', () => {
- it('pulls the rate from the aggregator', async () => {
- bigNumEquals(response, await proxy.latestAnswer())
- const latestRound = await proxy.latestRound()
- bigNumEquals(response, await proxy.getAnswer(latestRound))
- })
-
- describe('after being updated to another contract', () => {
- beforeEach(async () => {
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
- await link.transfer(aggregator2.address, deposit)
- bigNumEquals(response2, await aggregator2.latestAnswer())
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
- })
-
- it('pulls the rate from the new aggregator', async () => {
- bigNumEquals(response2, await proxy.latestAnswer())
- const latestRound = await proxy.latestRound()
- bigNumEquals(response2, await proxy.getAnswer(latestRound))
- })
- })
-
- describe('when the relevant info is not available', () => {
- beforeEach(async () => {
- await proxy.proposeAggregator(flux.address)
- await proxy.confirmAggregator(flux.address)
- })
-
- it('does not revert when called with a non existent ID', async () => {
- const actual = await proxy.latestAnswer()
- bigNumEquals(0, actual)
- })
- })
- })
-
- describe('#getAnswer', () => {
- describe('when the relevant round is not available', () => {
- beforeEach(async () => {
- await proxy.proposeAggregator(flux.address)
- await proxy.confirmAggregator(flux.address)
- })
-
- it('does not revert when called with a non existent ID', async () => {
- const proxyId = phaseBase.mul(await proxy.phaseId()).add(1)
- const actual = await proxy.getAnswer(proxyId)
- bigNumEquals(0, actual)
- })
- })
-
- describe('when the answer reverts in a non-predicted way', () => {
- it('reverts', async () => {
- reverter = await reverterFactory.connect(defaultAccount).deploy()
- await proxy.proposeAggregator(reverter.address)
- await proxy.confirmAggregator(reverter.address)
- assert.equal(reverter.address, await proxy.aggregator())
-
- const proxyId = phaseBase.mul(await proxy.phaseId())
-
- await evmRevert(proxy.getAnswer(proxyId), 'Raised by Reverter.sol')
- })
- })
-
- describe('after being updated to another contract', () => {
- let preUpdateRoundId: BigNumber
- let preUpdateAnswer: BigNumber
-
- beforeEach(async () => {
- preUpdateRoundId = await proxy.latestRound()
- preUpdateAnswer = await proxy.latestAnswer()
-
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
- await link.transfer(aggregator2.address, deposit)
- bigNumEquals(response2, await aggregator2.latestAnswer())
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
- })
-
- it('reports answers for previous phases', async () => {
- const actualAnswer = await proxy.getAnswer(preUpdateRoundId)
- bigNumEquals(preUpdateAnswer, actualAnswer)
- })
- })
-
- describe('when the relevant info is not available', () => {
- it('returns 0', async () => {
- const actual = await proxy.getAnswer(phaseBase.mul(777))
- bigNumEquals(0, actual)
- })
- })
-
- describe('when the round ID is too large', () => {
- const overflowRoundId = BigNumber.from(2)
- .pow(255)
- .add(phaseBase) // get the original phase
- .add(1) // get the original round
- it('returns 0', async () => {
- const actual = await proxy.getTimestamp(overflowRoundId)
- bigNumEquals(0, actual)
- })
- })
- })
-
- describe('#getTimestamp', () => {
- describe('when the relevant round is not available', () => {
- beforeEach(async () => {
- await proxy.proposeAggregator(flux.address)
- await proxy.confirmAggregator(flux.address)
- })
-
- it('does not revert when called with a non existent ID', async () => {
- const proxyId = phaseBase.mul(await proxy.phaseId()).add(1)
- const actual = await proxy.getTimestamp(proxyId)
- bigNumEquals(0, actual)
- })
- })
-
- describe('when the relevant info is not available', () => {
- it('returns 0', async () => {
- const actual = await proxy.getTimestamp(phaseBase.mul(777))
- bigNumEquals(0, actual)
- })
- })
-
- describe('when the round ID is too large', () => {
- const overflowRoundId = BigNumber.from(2)
- .pow(255)
- .add(phaseBase) // get the original phase
- .add(1) // get the original round
-
- it('returns 0', async () => {
- const actual = await proxy.getTimestamp(overflowRoundId)
- bigNumEquals(0, actual)
- })
- })
- })
-
- describe('#latestTimestamp', () => {
- beforeEach(async () => {
- const height = await aggregator.latestTimestamp()
- assert.notEqual('0', height.toString())
- })
-
- it('pulls the timestamp from the aggregator', async () => {
- bigNumEquals(
- await aggregator.latestTimestamp(),
- await proxy.latestTimestamp(),
- )
- const latestRound = await proxy.latestRound()
- bigNumEquals(
- await aggregator.latestTimestamp(),
- await proxy.getTimestamp(latestRound),
- )
- })
-
- describe('after being updated to another contract', () => {
- beforeEach(async () => {
- await increaseTimeBy(30, ethers.provider)
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
-
- const height2 = await aggregator2.latestTimestamp()
- assert.notEqual('0', height2.toString())
-
- const height1 = await aggregator.latestTimestamp()
- assert.notEqual(
- height1.toString(),
- height2.toString(),
- 'Height1 and Height2 should not be equal',
- )
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
- })
-
- it('pulls the timestamp from the new aggregator', async () => {
- bigNumEquals(
- await aggregator2.latestTimestamp(),
- await proxy.latestTimestamp(),
- )
- const latestRound = await proxy.latestRound()
- bigNumEquals(
- await aggregator2.latestTimestamp(),
- await proxy.getTimestamp(latestRound),
- )
- })
- })
- })
-
- describe('#getRoundData', () => {
- describe('when pointed at a Historic Aggregator', () => {
- beforeEach(async () => {
- historicAggregator = await historicAggregatorFactory
- .connect(defaultAccount)
- .deploy(response2)
- await proxy.proposeAggregator(historicAggregator.address)
- await proxy.confirmAggregator(historicAggregator.address)
- })
-
- it('reverts', async () => {
- const latestRoundId = await historicAggregator.latestRound()
- await evmRevert(proxy.getRoundData(latestRoundId))
- })
-
- describe('when pointed at an Aggregator Facade', () => {
- beforeEach(async () => {
- const facade = await aggregatorFacadeFactory
- .connect(defaultAccount)
- .deploy(aggregator.address, 18, 'LINK/USD: Aggregator Facade')
- await proxy.proposeAggregator(facade.address)
- await proxy.confirmAggregator(facade.address)
- })
-
- it('works for a valid roundId', async () => {
- const aggId = await aggregator.latestRound()
- const phaseId = phaseBase.mul(await proxy.phaseId())
- const proxyId = phaseId.add(aggId)
-
- const round = await proxy.getRoundData(proxyId)
- bigNumEquals(proxyId, round.id)
- bigNumEquals(response, round.answer)
- const nowSeconds = new Date().valueOf() / 1000
- assert.isAbove(round.updatedAt.toNumber(), nowSeconds - 120)
- bigNumEquals(round.updatedAt, round.startedAt)
- bigNumEquals(proxyId, round.answeredInRound)
- })
- })
- })
-
- describe('when pointed at a FluxAggregator', () => {
- beforeEach(async () => {
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
- })
-
- it('works for a valid round ID', async () => {
- const aggId = phaseBase.sub(2)
- await aggregator2
- .connect(personas.Carol)
- .updateRoundData(aggId, response2, 77, 42)
-
- const phaseId = phaseBase.mul(await proxy.phaseId())
- const proxyId = phaseId.add(aggId)
-
- const round = await proxy.getRoundData(proxyId)
- bigNumEquals(proxyId, round.id)
- bigNumEquals(response2, round.answer)
- bigNumEquals(42, round.startedAt)
- bigNumEquals(77, round.updatedAt)
- bigNumEquals(proxyId, round.answeredInRound)
- })
- })
-
- it('reads round ID of a previous phase', async () => {
- const oldphaseId = phaseBase.mul(await proxy.phaseId())
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
-
- const aggId = await aggregator.latestRound()
- const proxyId = oldphaseId.add(aggId)
-
- const round = await proxy.getRoundData(proxyId)
- bigNumEquals(proxyId, round.id)
- bigNumEquals(response, round.answer)
-
- const nowSeconds = new Date().valueOf() / 1000
- assert.isAbove(round.startedAt.toNumber(), nowSeconds - 120)
- bigNumEquals(round.startedAt, round.updatedAt)
- bigNumEquals(proxyId, round.answeredInRound)
- })
- })
-
- describe('#latestRoundData', () => {
- describe('when pointed at a Historic Aggregator', () => {
- beforeEach(async () => {
- historicAggregator = await historicAggregatorFactory
- .connect(defaultAccount)
- .deploy(response2)
- await proxy.proposeAggregator(historicAggregator.address)
- await proxy.confirmAggregator(historicAggregator.address)
- })
-
- it('reverts', async () => {
- await evmRevert(proxy.latestRoundData())
- })
-
- describe('when pointed at an Aggregator Facade', () => {
- beforeEach(async () => {
- const facade = await aggregatorFacadeFactory
- .connect(defaultAccount)
- .deploy(
- historicAggregator.address,
- 17,
- 'DOGE/ZWL: Aggregator Facade',
- )
- await proxy.proposeAggregator(facade.address)
- await proxy.confirmAggregator(facade.address)
- })
-
- it('does not revert', async () => {
- const aggId = await historicAggregator.latestRound()
- const phaseId = phaseBase.mul(await proxy.phaseId())
- const proxyId = phaseId.add(aggId)
-
- const round = await proxy.latestRoundData()
- bigNumEquals(proxyId, round.id)
- bigNumEquals(response2, round.answer)
- const nowSeconds = new Date().valueOf() / 1000
- assert.isAbove(round.updatedAt.toNumber(), nowSeconds - 120)
- bigNumEquals(round.updatedAt, round.startedAt)
- bigNumEquals(proxyId, round.answeredInRound)
- })
-
- it('uses the decimals set in the constructor', async () => {
- bigNumEquals(17, await proxy.decimals())
- })
-
- it('uses the description set in the constructor', async () => {
- assert.equal('DOGE/ZWL: Aggregator Facade', await proxy.description())
- })
-
- it('sets the version to 2', async () => {
- bigNumEquals(2, await proxy.version())
- })
- })
- })
-
- describe('when pointed at a FluxAggregator', () => {
- beforeEach(async () => {
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
-
- await proxy.proposeAggregator(aggregator2.address)
- await proxy.confirmAggregator(aggregator2.address)
- })
-
- it('does not revert', async () => {
- const aggId = phaseBase.sub(2)
- await aggregator2
- .connect(personas.Carol)
- .updateRoundData(aggId, response2, 77, 42)
-
- const phaseId = phaseBase.mul(await proxy.phaseId())
- const proxyId = phaseId.add(aggId)
-
- const round = await proxy.latestRoundData()
- bigNumEquals(proxyId, round.id)
- bigNumEquals(response2, round.answer)
- bigNumEquals(42, round.startedAt)
- bigNumEquals(77, round.updatedAt)
- bigNumEquals(proxyId, round.answeredInRound)
- })
-
- it('uses the decimals of the aggregator', async () => {
- bigNumEquals(18, await proxy.decimals())
- })
-
- it('uses the description of the aggregator', async () => {
- assert.equal(
- 'v0.6/tests/MockV3Aggregator.sol',
- await proxy.description(),
- )
- })
-
- it('uses the version of the aggregator', async () => {
- bigNumEquals(0, await proxy.version())
- })
- })
- })
-
- describe('#proposeAggregator', () => {
- beforeEach(async () => {
- await proxy.transferOwnership(await personas.Carol.getAddress())
- await proxy.connect(personas.Carol).acceptOwnership()
-
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, 1)
-
- assert.equal(aggregator.address, await proxy.aggregator())
- })
-
- describe('when called by the owner', () => {
- it('sets the address of the proposed aggregator', async () => {
- await proxy
- .connect(personas.Carol)
- .proposeAggregator(aggregator2.address)
-
- assert.equal(aggregator2.address, await proxy.proposedAggregator())
- })
-
- it('emits an AggregatorProposed event', async () => {
- const tx = await proxy
- .connect(personas.Carol)
- .proposeAggregator(aggregator2.address)
- const receipt = await tx.wait()
- const eventLog = receipt?.events
-
- assert.equal(eventLog?.length, 1)
- assert.equal(eventLog?.[0].event, 'AggregatorProposed')
- assert.equal(eventLog?.[0].args?.[0], aggregator.address)
- assert.equal(eventLog?.[0].args?.[1], aggregator2.address)
- })
- })
-
- describe('when called by a non-owner', () => {
- it('does not update', async () => {
- await evmRevert(
- proxy.connect(personas.Neil).proposeAggregator(aggregator2.address),
- 'Only callable by owner',
- )
-
- assert.equal(aggregator.address, await proxy.aggregator())
- })
- })
- })
-
- describe('#confirmAggregator', () => {
- beforeEach(async () => {
- await proxy.transferOwnership(await personas.Carol.getAddress())
- await proxy.connect(personas.Carol).acceptOwnership()
-
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, 1)
-
- assert.equal(aggregator.address, await proxy.aggregator())
- })
-
- describe('when called by the owner', () => {
- beforeEach(async () => {
- await proxy
- .connect(personas.Carol)
- .proposeAggregator(aggregator2.address)
- })
-
- it('sets the address of the new aggregator', async () => {
- await proxy
- .connect(personas.Carol)
- .confirmAggregator(aggregator2.address)
-
- assert.equal(aggregator2.address, await proxy.aggregator())
- })
-
- it('increases the phase', async () => {
- bigNumEquals(1, await proxy.phaseId())
-
- await proxy
- .connect(personas.Carol)
- .confirmAggregator(aggregator2.address)
-
- bigNumEquals(2, await proxy.phaseId())
- })
-
- it('increases the round ID', async () => {
- bigNumEquals(phaseBase.add(1), await proxy.latestRound())
-
- await proxy
- .connect(personas.Carol)
- .confirmAggregator(aggregator2.address)
-
- bigNumEquals(phaseBase.mul(2).add(1), await proxy.latestRound())
- })
-
- it('sets the proxy phase and aggregator', async () => {
- assert.equal(
- '0x0000000000000000000000000000000000000000',
- await proxy.phaseAggregators(2),
- )
-
- await proxy
- .connect(personas.Carol)
- .confirmAggregator(aggregator2.address)
-
- assert.equal(aggregator2.address, await proxy.phaseAggregators(2))
- })
-
- it('emits an AggregatorConfirmed event', async () => {
- const tx = await proxy
- .connect(personas.Carol)
- .confirmAggregator(aggregator2.address)
- const receipt = await tx.wait()
- const eventLog = receipt?.events
-
- assert.equal(eventLog?.length, 1)
- assert.equal(eventLog?.[0].event, 'AggregatorConfirmed')
- assert.equal(eventLog?.[0].args?.[0], aggregator.address)
- assert.equal(eventLog?.[0].args?.[1], aggregator2.address)
- })
- })
-
- describe('when called by a non-owner', () => {
- beforeEach(async () => {
- await proxy
- .connect(personas.Carol)
- .proposeAggregator(aggregator2.address)
- })
-
- it('does not update', async () => {
- await evmRevert(
- proxy.connect(personas.Neil).confirmAggregator(aggregator2.address),
- 'Only callable by owner',
- )
-
- assert.equal(aggregator.address, await proxy.aggregator())
- })
- })
- })
-
- describe('#proposedGetRoundData', () => {
- beforeEach(async () => {
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
- })
-
- describe('when an aggregator has been proposed', () => {
- beforeEach(async () => {
- await proxy
- .connect(defaultAccount)
- .proposeAggregator(aggregator2.address)
- assert.equal(await proxy.proposedAggregator(), aggregator2.address)
- })
-
- it('returns the data for the proposed aggregator', async () => {
- const roundId = await aggregator2.latestRound()
- const round = await proxy.proposedGetRoundData(roundId)
- bigNumEquals(roundId, round.id)
- bigNumEquals(response2, round.answer)
- })
-
- describe('after the aggregator has been confirmed', () => {
- beforeEach(async () => {
- await proxy
- .connect(defaultAccount)
- .confirmAggregator(aggregator2.address)
- assert.equal(await proxy.aggregator(), aggregator2.address)
- })
-
- it('reverts', async () => {
- const roundId = await aggregator2.latestRound()
- await evmRevert(
- proxy.proposedGetRoundData(roundId),
- 'No proposed aggregator present',
- )
- })
- })
- })
- })
-
- describe('#proposedLatestRoundData', () => {
- beforeEach(async () => {
- aggregator2 = await aggregatorFactory
- .connect(defaultAccount)
- .deploy(decimals, response2)
- })
-
- describe('when an aggregator has been proposed', () => {
- beforeEach(async () => {
- await proxy
- .connect(defaultAccount)
- .proposeAggregator(aggregator2.address)
- assert.equal(await proxy.proposedAggregator(), aggregator2.address)
- })
-
- it('returns the data for the proposed aggregator', async () => {
- const roundId = await aggregator2.latestRound()
- const round = await proxy.proposedLatestRoundData()
- bigNumEquals(roundId, round.id)
- bigNumEquals(response2, round.answer)
- })
-
- describe('after the aggregator has been confirmed', () => {
- beforeEach(async () => {
- await proxy
- .connect(defaultAccount)
- .confirmAggregator(aggregator2.address)
- assert.equal(await proxy.aggregator(), aggregator2.address)
- })
-
- it('reverts', async () => {
- await evmRevert(
- proxy.proposedLatestRoundData(),
- 'No proposed aggregator present',
- )
- })
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/AuthorizedForwarder.test.ts b/contracts/test/v0.7/AuthorizedForwarder.test.ts
deleted file mode 100644
index e1fa2f1f708..00000000000
--- a/contracts/test/v0.7/AuthorizedForwarder.test.ts
+++ /dev/null
@@ -1,444 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory, ContractReceipt } from 'ethers'
-import { getUsers, Roles } from '../test-helpers/setup'
-import { evmRevert } from '../test-helpers/matchers'
-
-let getterSetterFactory: ContractFactory
-let forwarderFactory: ContractFactory
-let brokenFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-
-let roles: Roles
-const zeroAddress = ethers.constants.AddressZero
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- getterSetterFactory = await ethers.getContractFactory(
- 'src/v0.4/tests/GetterSetter.sol:GetterSetter',
- roles.defaultAccount,
- )
- brokenFactory = await ethers.getContractFactory(
- 'src/v0.8/tests/Broken.sol:Broken',
- roles.defaultAccount,
- )
- forwarderFactory = await ethers.getContractFactory(
- 'src/v0.7/AuthorizedForwarder.sol:AuthorizedForwarder',
- roles.defaultAccount,
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
-})
-
-describe('AuthorizedForwarder', () => {
- let link: Contract
- let forwarder: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- forwarder = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(
- link.address,
- await roles.defaultAccount.getAddress(),
- zeroAddress,
- '0x',
- )
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(forwarder, [
- 'forward',
- 'getAuthorizedSenders',
- 'getChainlinkToken',
- 'isAuthorizedSender',
- 'ownerForward',
- 'setAuthorizedSenders',
- 'transferOwnershipWithMessage',
- 'typeAndVersion',
- // ConfirmedOwner
- 'transferOwnership',
- 'acceptOwnership',
- 'owner',
- ])
- })
-
- describe('#typeAndVersion', () => {
- it('describes the authorized forwarder', async () => {
- assert.equal(
- await forwarder.typeAndVersion(),
- 'AuthorizedForwarder 1.0.0',
- )
- })
- })
-
- describe('deployment', () => {
- it('sets the correct link token', async () => {
- assert.equal(await forwarder.getChainlinkToken(), link.address)
- })
-
- it('reverts on zeroAddress value for link token', async () => {
- await evmRevert(
- forwarderFactory.connect(roles.defaultAccount).deploy(
- zeroAddress, // Link Address
- await roles.defaultAccount.getAddress(),
- zeroAddress,
- '0x',
- ),
- )
- })
-
- it('sets no authorized senders', async () => {
- const senders = await forwarder.getAuthorizedSenders()
- assert.equal(senders.length, 0)
- })
- })
-
- describe('#setAuthorizedSenders', () => {
- let newSenders: string[]
- let receipt: ContractReceipt
- describe('when called by the owner', () => {
- describe('set authorized senders containing duplicate/s', () => {
- beforeEach(async () => {
- newSenders = [
- await roles.oracleNode1.getAddress(),
- await roles.oracleNode1.getAddress(),
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
- })
- it('reverts with a must not have duplicate senders message', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders),
- 'Must not have duplicate senders',
- )
- })
- })
-
- describe('setting 3 authorized senders', () => {
- beforeEach(async () => {
- newSenders = [
- await roles.oracleNode1.getAddress(),
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders)
- receipt = await tx.wait()
- })
-
- it('adds the authorized nodes', async () => {
- const authorizedSenders = await forwarder.getAuthorizedSenders()
- assert.equal(newSenders.length, authorizedSenders.length)
- for (let i = 0; i < authorizedSenders.length; i++) {
- assert.equal(authorizedSenders[i], newSenders[i])
- }
- })
-
- it('emits an event', async () => {
- assert.equal(receipt.events?.length, 1)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'AuthorizedSendersChanged')
- const encodedSenders = ethers.utils.defaultAbiCoder.encode(
- ['address[]', 'address'],
- [newSenders, await roles.defaultAccount.getAddress()],
- )
- assert.equal(responseEvent?.data, encodedSenders)
- })
-
- it('replaces the authorized nodes', async () => {
- const newSenders = await forwarder
- .connect(roles.defaultAccount)
- .getAuthorizedSenders()
- assert.notIncludeOrderedMembers(newSenders, [
- await roles.oracleNode.getAddress(),
- ])
- })
-
- after(async () => {
- await forwarder
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.oracleNode.getAddress()])
- })
- })
-
- describe('setting 0 authorized senders', () => {
- beforeEach(async () => {
- newSenders = []
- })
-
- it('reverts with a minimum senders message', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders),
- 'Must have at least 1 sender',
- )
- })
- })
- })
-
- describe('when called by a non-owner', () => {
- it('cannot add an authorized node', async () => {
- await evmRevert(
- forwarder
- .connect(roles.stranger)
- .setAuthorizedSenders([await roles.stranger.getAddress()]),
- 'Cannot set authorized senders',
- )
- })
- })
- })
-
- describe('#forward', () => {
- let bytes: string
- let payload: string
- let mock: Contract
-
- beforeEach(async () => {
- mock = await getterSetterFactory.connect(roles.defaultAccount).deploy()
- bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100))
- payload = getterSetterFactory.interface.encodeFunctionData(
- getterSetterFactory.interface.getFunction('setBytes'),
- [bytes],
- )
- })
-
- describe('when called by an unauthorized node', () => {
- it('reverts', async () => {
- await evmRevert(
- forwarder.connect(roles.stranger).forward(mock.address, payload),
- )
- })
- })
-
- describe('when called by an authorized node', () => {
- beforeEach(async () => {
- await forwarder
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.defaultAccount.getAddress()])
- })
-
- describe('when destination call reverts', () => {
- let brokenMock: Contract
- let brokenPayload: string
- let brokenMsgPayload: string
-
- beforeEach(async () => {
- brokenMock = await brokenFactory
- .connect(roles.defaultAccount)
- .deploy()
- brokenMsgPayload = brokenFactory.interface.encodeFunctionData(
- brokenFactory.interface.getFunction('revertWithMessage'),
- ['Failure message'],
- )
-
- brokenPayload = brokenFactory.interface.encodeFunctionData(
- brokenFactory.interface.getFunction('revertSilently'),
- [],
- )
- })
-
- describe('when reverts with message', () => {
- it('return revert message', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .forward(brokenMock.address, brokenMsgPayload),
- "reverted with reason string 'Failure message'",
- )
- })
- })
-
- describe('when reverts without message', () => {
- it('return silent failure message', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .forward(brokenMock.address, brokenPayload),
- 'Forwarded call reverted without reason',
- )
- })
- })
- })
-
- describe('when sending to a non-contract address', () => {
- it('reverts', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .forward(zeroAddress, payload),
- 'Must forward to a contract',
- )
- })
- })
-
- describe('when attempting to forward to the link token', () => {
- it('reverts', async () => {
- const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .forward(link.address, sighash),
- )
- })
- })
-
- describe('when forwarding to any other address', () => {
- it('forwards the data', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .forward(mock.address, payload)
- await tx.wait()
- assert.equal(await mock.getBytes(), bytes)
- })
-
- it('perceives the message is sent by the AuthorizedForwarder', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .forward(mock.address, payload)
- await expect(tx)
- .to.emit(mock, 'SetBytes')
- .withArgs(forwarder.address, bytes)
- })
- })
- })
- })
-
- describe('#transferOwnershipWithMessage', () => {
- const message = '0x42'
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- forwarder
- .connect(roles.stranger)
- .transferOwnershipWithMessage(
- await roles.stranger.getAddress(),
- message,
- ),
- 'Only callable by owner',
- )
- })
- })
-
- describe('when called by the owner', () => {
- it('calls the normal ownership transfer proposal', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .transferOwnershipWithMessage(
- await roles.stranger.getAddress(),
- message,
- )
- const receipt = await tx.wait()
-
- assert.equal(receipt?.events?.[0]?.event, 'OwnershipTransferRequested')
- assert.equal(receipt?.events?.[0]?.address, forwarder.address)
- assert.equal(
- receipt?.events?.[0]?.args?.[0],
- await roles.defaultAccount.getAddress(),
- )
- assert.equal(
- receipt?.events?.[0]?.args?.[1],
- await roles.stranger.getAddress(),
- )
- })
-
- it('calls the normal ownership transfer proposal', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .transferOwnershipWithMessage(
- await roles.stranger.getAddress(),
- message,
- )
- const receipt = await tx.wait()
-
- assert.equal(
- receipt?.events?.[1]?.event,
- 'OwnershipTransferRequestedWithMessage',
- )
- assert.equal(receipt?.events?.[1]?.address, forwarder.address)
- assert.equal(
- receipt?.events?.[1]?.args?.[0],
- await roles.defaultAccount.getAddress(),
- )
- assert.equal(
- receipt?.events?.[1]?.args?.[1],
- await roles.stranger.getAddress(),
- )
- assert.equal(receipt?.events?.[1]?.args?.[2], message)
- })
- })
- })
-
- describe('#ownerForward', () => {
- let bytes: string
- let payload: string
- let mock: Contract
-
- beforeEach(async () => {
- mock = await getterSetterFactory.connect(roles.defaultAccount).deploy()
- bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100))
- payload = getterSetterFactory.interface.encodeFunctionData(
- getterSetterFactory.interface.getFunction('setBytes'),
- [bytes],
- )
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- forwarder.connect(roles.stranger).ownerForward(mock.address, payload),
- )
- })
- })
-
- describe('when called by owner', () => {
- describe('when attempting to forward to the link token', () => {
- it('does not revert', async () => {
- const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function
-
- await forwarder
- .connect(roles.defaultAccount)
- .ownerForward(link.address, sighash)
- })
- })
-
- describe('when forwarding to any other address', () => {
- it('forwards the data', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .ownerForward(mock.address, payload)
- await tx.wait()
- assert.equal(await mock.getBytes(), bytes)
- })
-
- it('reverts when sending to a non-contract address', async () => {
- await evmRevert(
- forwarder
- .connect(roles.defaultAccount)
- .ownerForward(zeroAddress, payload),
- 'Must forward to a contract',
- )
- })
-
- it('perceives the message is sent by the Operator', async () => {
- const tx = await forwarder
- .connect(roles.defaultAccount)
- .ownerForward(mock.address, payload)
- await expect(tx)
- .to.emit(mock, 'SetBytes')
- .withArgs(forwarder.address, bytes)
- })
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/Chainlink.test.ts b/contracts/test/v0.7/Chainlink.test.ts
deleted file mode 100644
index 7792895934c..00000000000
--- a/contracts/test/v0.7/Chainlink.test.ts
+++ /dev/null
@@ -1,186 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi, decodeDietCBOR, hexToBuf } from '../test-helpers/helpers'
-import { assert } from 'chai'
-import { Contract, ContractFactory, providers, Signer } from 'ethers'
-import { Roles, getUsers } from '../test-helpers/setup'
-import { makeDebug } from '../test-helpers/debug'
-
-const debug = makeDebug('ChainlinkTestHelper')
-let concreteChainlinkFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- roles = (await getUsers()).roles
- concreteChainlinkFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/ChainlinkTestHelper.sol:ChainlinkTestHelper',
- roles.defaultAccount,
- )
-})
-
-describe('ChainlinkTestHelper', () => {
- let ccl: Contract
- let defaultAccount: Signer
-
- beforeEach(async () => {
- defaultAccount = roles.defaultAccount
- ccl = await concreteChainlinkFactory.connect(defaultAccount).deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(ccl, [
- 'add',
- 'addBytes',
- 'addInt',
- 'addStringArray',
- 'addUint',
- 'closeEvent',
- 'setBuffer',
- ])
- })
-
- async function parseCCLEvent(tx: providers.TransactionResponse) {
- const receipt = await tx.wait()
- const data = receipt.logs?.[0].data
- const d = debug.extend('parseCCLEvent')
- d('data %s', data)
- return ethers.utils.defaultAbiCoder.decode(['bytes'], data ?? '')
- }
-
- describe('#close', () => {
- it('handles empty payloads', async () => {
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, {})
- })
- })
-
- describe('#setBuffer', () => {
- it('emits the buffer', async () => {
- await ccl.setBuffer('0xA161616162')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { a: 'b' })
- })
- })
-
- describe('#add', () => {
- it('stores and logs keys and values', async () => {
- await ccl.add('first', 'word!!')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 'word!!' })
- })
-
- it('handles two entries', async () => {
- await ccl.add('first', 'uno')
- await ccl.add('second', 'dos')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 'uno',
- second: 'dos',
- })
- })
- })
-
- describe('#addBytes', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addBytes('first', '0xaabbccddeeff')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- const expected = hexToBuf('0xaabbccddeeff')
- assert.deepEqual(decoded, { first: expected })
- })
-
- it('handles two entries', async () => {
- await ccl.addBytes('first', '0x756E6F')
- await ccl.addBytes('second', '0x646F73')
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- const expectedFirst = hexToBuf('0x756E6F')
- const expectedSecond = hexToBuf('0x646F73')
- assert.deepEqual(decoded, {
- first: expectedFirst,
- second: expectedSecond,
- })
- })
-
- it('handles strings', async () => {
- await ccl.addBytes('first', ethers.utils.toUtf8Bytes('apple'))
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- const expected = ethers.utils.toUtf8Bytes('apple')
- assert.deepEqual(decoded, { first: expected })
- })
- })
-
- describe('#addInt', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addInt('first', 1)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 1 })
- })
-
- it('handles two entries', async () => {
- await ccl.addInt('first', 1)
- await ccl.addInt('second', 2)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 1,
- second: 2,
- })
- })
- })
-
- describe('#addUint', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addUint('first', 1)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { first: 1 })
- })
-
- it('handles two entries', async () => {
- await ccl.addUint('first', 1)
- await ccl.addUint('second', 2)
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
-
- assert.deepEqual(decoded, {
- first: 1,
- second: 2,
- })
- })
- })
-
- describe('#addStringArray', () => {
- it('stores and logs keys and values', async () => {
- await ccl.addStringArray('word', [
- ethers.utils.formatBytes32String('seinfeld'),
- ethers.utils.formatBytes32String('"4"'),
- ethers.utils.formatBytes32String('LIFE'),
- ])
- const tx = await ccl.closeEvent()
- const [payload] = await parseCCLEvent(tx)
- const decoded = await decodeDietCBOR(payload)
- assert.deepEqual(decoded, { word: ['seinfeld', '"4"', 'LIFE'] })
- })
- })
-})
diff --git a/contracts/test/v0.7/ChainlinkClient.test.ts b/contracts/test/v0.7/ChainlinkClient.test.ts
deleted file mode 100644
index 198d382af79..00000000000
--- a/contracts/test/v0.7/ChainlinkClient.test.ts
+++ /dev/null
@@ -1,454 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert } from 'chai'
-import { Contract, ContractFactory } from 'ethers'
-import { Roles, getUsers } from '../test-helpers/setup'
-import {
- convertFufillParams,
- decodeCCRequest,
- decodeRunRequest,
- RunRequest,
-} from '../test-helpers/oracle'
-import { decodeDietCBOR } from '../test-helpers/helpers'
-import { evmRevert } from '../test-helpers/matchers'
-
-let concreteChainlinkClientFactory: ContractFactory
-let emptyOracleFactory: ContractFactory
-let getterSetterFactory: ContractFactory
-let operatorFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- roles = (await getUsers()).roles
-
- concreteChainlinkClientFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/ChainlinkClientTestHelper.sol:ChainlinkClientTestHelper',
- roles.defaultAccount,
- )
- emptyOracleFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/EmptyOracle.sol:EmptyOracle',
- roles.defaultAccount,
- )
- getterSetterFactory = await ethers.getContractFactory(
- 'src/v0.5/tests/GetterSetter.sol:GetterSetter',
- roles.defaultAccount,
- )
- operatorFactory = await ethers.getContractFactory(
- 'src/v0.7/Operator.sol:Operator',
- roles.defaultAccount,
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
-})
-
-describe('ChainlinkClientTestHelper', () => {
- const specId =
- '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000'
- let cc: Contract
- let gs: Contract
- let oc: Contract
- let newoc: Contract
- let link: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- oc = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- newoc = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- gs = await getterSetterFactory.connect(roles.defaultAccount).deploy()
- cc = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, oc.address)
- })
-
- describe('#newRequest', () => {
- it('forwards the information to the oracle contract through the link token', async () => {
- const tx = await cc.publicNewRequest(
- specId,
- gs.address,
- ethers.utils.toUtf8Bytes('requestedBytes32(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
-
- assert.equal(1, receipt.logs?.length)
- const [jId, cbAddr, cbFId, cborData] = receipt.logs
- ? decodeCCRequest(receipt.logs[0])
- : []
- const params = decodeDietCBOR(cborData ?? '')
-
- assert.equal(specId, jId)
- assert.equal(gs.address, cbAddr)
- assert.equal('0xed53e511', cbFId)
- assert.deepEqual({}, params)
- })
- })
-
- describe('#chainlinkRequest(Request)', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const { events, logs } = await tx.wait()
-
- assert.equal(4, events?.length)
-
- assert.equal(logs?.[0].address, cc.address)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
- })
-
- describe('#chainlinkRequestTo(Request)', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { events } = await tx.wait()
-
- assert.equal(4, events?.length)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
-
- it('emits an event on the target oracle contract', async () => {
- const tx = await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { logs } = await tx.wait()
- const event = logs && newoc.interface.parseLog(logs[3])
-
- assert.equal(4, logs?.length)
- assert.equal(event?.name, 'OracleRequest')
- })
-
- it('does not modify the stored oracle address', async () => {
- await cc.publicRequestRunTo(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const actualOracleAddress = await cc.publicOracleAddress()
- assert.equal(oc.address, actualOracleAddress)
- })
- })
-
- describe('#requestOracleData', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequestOracleData(
- specId,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const { events, logs } = await tx.wait()
-
- assert.equal(4, events?.length)
-
- assert.equal(logs?.[0].address, cc.address)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
- })
-
- describe('#requestOracleDataFrom', () => {
- it('emits an event from the contract showing the run ID', async () => {
- const tx = await cc.publicRequestOracleDataFrom(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { events } = await tx.wait()
-
- assert.equal(4, events?.length)
- assert.equal(events?.[0].event, 'ChainlinkRequested')
- })
-
- it('emits an event on the target oracle contract', async () => {
- const tx = await cc.publicRequestOracleDataFrom(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { logs } = await tx.wait()
- const event = logs && newoc.interface.parseLog(logs[3])
-
- assert.equal(4, logs?.length)
- assert.equal(event?.name, 'OracleRequest')
- })
-
- it('does not modify the stored oracle address', async () => {
- await cc.publicRequestOracleDataFrom(
- newoc.address,
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
-
- const actualOracleAddress = await cc.publicOracleAddress()
- assert.equal(oc.address, actualOracleAddress)
- })
- })
-
- describe('#cancelChainlinkRequest', () => {
- let requestId: string
- // a concrete chainlink attached to an empty oracle
- let ecc: Contract
-
- beforeEach(async () => {
- const emptyOracle = await emptyOracleFactory
- .connect(roles.defaultAccount)
- .deploy()
- ecc = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, emptyOracle.address)
-
- const tx = await ecc.publicRequest(
- specId,
- ecc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { events } = await tx.wait()
- requestId = (events?.[0]?.args as any).id
- })
-
- it('emits an event from the contract showing the run was cancelled', async () => {
- const tx = await ecc.publicCancelRequest(
- requestId,
- 0,
- ethers.utils.hexZeroPad('0x', 4),
- 0,
- )
- const { events } = await tx.wait()
-
- assert.equal(1, events?.length)
- assert.equal(events?.[0].event, 'ChainlinkCancelled')
- assert.equal(requestId, (events?.[0].args as any).id)
- })
-
- it('throws if given a bogus event ID', async () => {
- await evmRevert(
- ecc.publicCancelRequest(
- ethers.utils.formatBytes32String('bogusId'),
- 0,
- ethers.utils.hexZeroPad('0x', 4),
- 0,
- ),
- )
- })
- })
-
- describe('#recordChainlinkFulfillment(modifier)', () => {
- let request: RunRequest
-
- beforeEach(async () => {
- await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()])
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const { logs } = await tx.wait()
-
- request = decodeRunRequest(logs?.[3])
- })
-
- it('emits an event marking the request fulfilled', async () => {
- const tx = await oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
- const { logs } = await tx.wait()
-
- const event = logs && cc.interface.parseLog(logs[1])
-
- assert.equal(2, logs?.length)
- assert.equal(event?.name, 'ChainlinkFulfilled')
- assert.equal(request.requestId, event?.args.id)
- })
-
- it('should only allow one fulfillment per id', async () => {
- await oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- await evmRevert(
- oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Must have a valid requestId',
- )
- })
-
- it('should only allow the oracle to fulfill the request', async () => {
- await evmRevert(
- oc
- .connect(roles.stranger)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Not authorized sender',
- )
- })
- })
-
- describe('#fulfillChainlinkRequest(function)', () => {
- let request: RunRequest
-
- beforeEach(async () => {
- await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()])
- const tx = await cc.publicRequest(
- specId,
- cc.address,
- ethers.utils.toUtf8Bytes(
- 'publicFulfillChainlinkRequest(bytes32,bytes32)',
- ),
- 0,
- )
- const { logs } = await tx.wait()
-
- request = decodeRunRequest(logs?.[3])
- })
-
- it('emits an event marking the request fulfilled', async () => {
- const tx = await oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- const { logs } = await tx.wait()
- const event = logs && cc.interface.parseLog(logs[1])
-
- assert.equal(2, logs?.length)
- assert.equal(event?.name, 'ChainlinkFulfilled')
- assert.equal(request.requestId, event?.args?.id)
- })
-
- it('should only allow one fulfillment per id', async () => {
- await oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
-
- await evmRevert(
- oc
- .connect(roles.defaultAccount)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Must have a valid requestId',
- )
- })
-
- it('should only allow the oracle to fulfill the request', async () => {
- await evmRevert(
- oc
- .connect(roles.stranger)
- .fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- ),
- 'Not authorized sender',
- )
- })
- })
-
- describe('#chainlinkToken', () => {
- it('returns the Link Token address', async () => {
- const addr = await cc.publicChainlinkToken()
- assert.equal(addr, link.address)
- })
- })
-
- describe('#addExternalRequest', () => {
- let mock: Contract
- let request: RunRequest
-
- beforeEach(async () => {
- mock = await concreteChainlinkClientFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, oc.address)
-
- const tx = await cc.publicRequest(
- specId,
- mock.address,
- ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'),
- 0,
- )
- const receipt = await tx.wait()
-
- request = decodeRunRequest(receipt.logs?.[3])
- await mock.publicAddExternalRequest(oc.address, request.requestId)
- })
-
- it('allows the external request to be fulfilled', async () => {
- await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()])
- await oc.fulfillOracleRequest(
- ...convertFufillParams(
- request,
- ethers.utils.formatBytes32String('hi mom!'),
- ),
- )
- })
-
- it('does not allow the same requestId to be used', async () => {
- await evmRevert(
- cc.publicAddExternalRequest(newoc.address, request.requestId),
- )
- })
- })
-})
diff --git a/contracts/test/v0.7/CompoundPriceFlaggingValidator.test.ts b/contracts/test/v0.7/CompoundPriceFlaggingValidator.test.ts
deleted file mode 100644
index 315f7bd9e6b..00000000000
--- a/contracts/test/v0.7/CompoundPriceFlaggingValidator.test.ts
+++ /dev/null
@@ -1,471 +0,0 @@
-import { ethers } from 'hardhat'
-import { evmWordToAddress, getLogs, publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import {
- BigNumber,
- Contract,
- ContractFactory,
- ContractTransaction,
-} from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { evmRevert } from '../test-helpers/matchers'
-
-let personas: Personas
-let validatorFactory: ContractFactory
-let acFactory: ContractFactory
-let flagsFactory: ContractFactory
-let aggregatorFactory: ContractFactory
-let compoundOracleFactory: ContractFactory
-
-before(async () => {
- personas = (await getUsers()).personas
-
- validatorFactory = await ethers.getContractFactory(
- 'src/v0.7/dev/CompoundPriceFlaggingValidator.sol:CompoundPriceFlaggingValidator',
- personas.Carol,
- )
- acFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- personas.Carol,
- )
- flagsFactory = await ethers.getContractFactory(
- 'src/v0.6/Flags.sol:Flags',
- personas.Carol,
- )
- aggregatorFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator',
- personas.Carol,
- )
- compoundOracleFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockCompoundOracle.sol:MockCompoundOracle',
- personas.Carol,
- )
-})
-
-describe('CompoundPriceFlaggingVlidator', () => {
- let validator: Contract
- let aggregator: Contract
- let compoundOracle: Contract
- let flags: Contract
- let ac: Contract
-
- const aggregatorDecimals = 18
- // 1000
- const initialAggregatorPrice = BigNumber.from('1000000000000000000000')
-
- const compoundSymbol = 'ETH'
- const compoundDecimals = 6
- // 1100 (10% deviation from aggregator price)
- const initialCompoundPrice = BigNumber.from('1100000000')
-
- // (50,000,000 / 1,000,000,000) = 0.05 = 5% deviation threshold
- const initialDeviationNumerator = 50_000_000
-
- beforeEach(async () => {
- ac = await acFactory.connect(personas.Carol).deploy()
- flags = await flagsFactory.connect(personas.Carol).deploy(ac.address)
- aggregator = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(aggregatorDecimals, initialAggregatorPrice)
- compoundOracle = await compoundOracleFactory
- .connect(personas.Carol)
- .deploy()
- await compoundOracle.setPrice(
- compoundSymbol,
- initialCompoundPrice,
- compoundDecimals,
- )
- validator = await validatorFactory
- .connect(personas.Carol)
- .deploy(flags.address, compoundOracle.address)
- await validator
- .connect(personas.Carol)
- .setFeedDetails(
- aggregator.address,
- compoundSymbol,
- compoundDecimals,
- initialDeviationNumerator,
- )
- await ac.connect(personas.Carol).addAccess(validator.address)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(validator, [
- 'update',
- 'check',
- 'setFeedDetails',
- 'setFlagsAddress',
- 'setCompoundOpenOracleAddress',
- 'getFeedDetails',
- 'flags',
- 'compoundOpenOracle',
- // Upkeep methods:
- 'checkUpkeep',
- 'performUpkeep',
- // Owned methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('sets the owner', async () => {
- assert.equal(await validator.owner(), await personas.Carol.getAddress())
- })
-
- it('sets the arguments passed in', async () => {
- assert.equal(await validator.flags(), flags.address)
- assert.equal(await validator.compoundOpenOracle(), compoundOracle.address)
- })
- })
-
- describe('#setOpenOracleAddress', () => {
- let newCompoundOracle: Contract
- let tx: ContractTransaction
-
- beforeEach(async () => {
- newCompoundOracle = await compoundOracleFactory
- .connect(personas.Carol)
- .deploy()
- tx = await validator
- .connect(personas.Carol)
- .setCompoundOpenOracleAddress(newCompoundOracle.address)
- })
-
- it('changes the compound oracke address', async () => {
- assert.equal(
- await validator.compoundOpenOracle(),
- newCompoundOracle.address,
- )
- })
-
- it('emits a log event', async () => {
- await expect(tx)
- .to.emit(validator, 'CompoundOpenOracleAddressUpdated')
- .withArgs(compoundOracle.address, newCompoundOracle.address)
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- validator
- .connect(personas.Neil)
- .setCompoundOpenOracleAddress(newCompoundOracle.address),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#setFlagsAddress', () => {
- let newFlagsContract: Contract
- let tx: ContractTransaction
-
- beforeEach(async () => {
- newFlagsContract = await flagsFactory
- .connect(personas.Carol)
- .deploy(ac.address)
- tx = await validator
- .connect(personas.Carol)
- .setFlagsAddress(newFlagsContract.address)
- })
-
- it('changes the flags address', async () => {
- assert.equal(await validator.flags(), newFlagsContract.address)
- })
-
- it('emits a log event', async () => {
- await expect(tx)
- .to.emit(validator, 'FlagsAddressUpdated')
- .withArgs(flags.address, newFlagsContract.address)
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- validator
- .connect(personas.Neil)
- .setFlagsAddress(newFlagsContract.address),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#setFeedDetails', () => {
- let mockAggregator: Contract
- let tx: ContractTransaction
- const symbol = 'BTC'
- const decimals = 8
- const deviationNumerator = 50_000_000 // 5%
-
- beforeEach(async () => {
- await compoundOracle.connect(personas.Carol).setPrice('BTC', 1500000, 2)
- mockAggregator = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, 4000000000000)
- tx = await validator
- .connect(personas.Carol)
- .setFeedDetails(
- mockAggregator.address,
- symbol,
- decimals,
- deviationNumerator,
- )
- })
-
- it('sets the correct state', async () => {
- const response = await validator
- .connect(personas.Carol)
- .getFeedDetails(mockAggregator.address)
-
- assert.equal(response[0], symbol)
- assert.equal(response[1], decimals)
- assert.equal(response[2].toString(), deviationNumerator.toString())
- })
-
- it('uses the existing symbol if one already exists', async () => {
- const newSymbol = 'LINK'
-
- await compoundOracle
- .connect(personas.Carol)
- .setPrice(newSymbol, 1500000, 2)
-
- tx = await validator
- .connect(personas.Carol)
- .setFeedDetails(
- mockAggregator.address,
- newSymbol,
- decimals,
- deviationNumerator,
- )
-
- // Check the event
- await expect(tx)
- .to.emit(validator, 'FeedDetailsSet')
- .withArgs(mockAggregator.address, symbol, decimals, deviationNumerator)
-
- // Check the state
- const response = await validator
- .connect(personas.Carol)
- .getFeedDetails(mockAggregator.address)
- assert.equal(response[0], symbol)
- })
-
- it('emits an event', async () => {
- await expect(tx)
- .to.emit(validator, 'FeedDetailsSet')
- .withArgs(mockAggregator.address, symbol, decimals, deviationNumerator)
- })
-
- it('fails when given a 0 numerator', async () => {
- await evmRevert(
- validator
- .connect(personas.Carol)
- .setFeedDetails(mockAggregator.address, symbol, decimals, 0),
- 'Invalid threshold numerator',
- )
- })
-
- it('fails when given a numerator above 1 billion', async () => {
- await evmRevert(
- validator
- .connect(personas.Carol)
- .setFeedDetails(
- mockAggregator.address,
- symbol,
- decimals,
- 1_200_000_000,
- ),
- 'Invalid threshold numerator',
- )
- })
-
- it('fails when the compound price is invalid', async () => {
- await evmRevert(
- validator
- .connect(personas.Carol)
- .setFeedDetails(
- mockAggregator.address,
- 'TEST',
- decimals,
- deviationNumerator,
- ),
- 'Invalid Compound price',
- )
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- validator
- .connect(personas.Neil)
- .setFeedDetails(
- mockAggregator.address,
- symbol,
- decimals,
- deviationNumerator,
- ),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#check', () => {
- describe('with a single aggregator', () => {
- describe('with a deviated price exceding threshold', () => {
- it('returns the deviated aggregator', async () => {
- const aggregators = [aggregator.address]
- const response = await validator.check(aggregators)
- assert.equal(response.length, 1)
- assert.equal(response[0], aggregator.address)
- })
- })
-
- describe('with a price within the threshold', () => {
- const newCompoundPrice = BigNumber.from('1000000000')
- beforeEach(async () => {
- await compoundOracle.setPrice(
- 'ETH',
- newCompoundPrice,
- compoundDecimals,
- )
- })
-
- it('returns an empty array', async () => {
- const aggregators = [aggregator.address]
- const response = await validator.check(aggregators)
- assert.equal(response.length, 0)
- })
- })
- })
- })
-
- describe('#update', () => {
- describe('with a single aggregator', () => {
- describe('with a deviated price exceding threshold', () => {
- it('raises a flag on the flags contract', async () => {
- const aggregators = [aggregator.address]
- const tx = await validator.connect(personas.Carol).update(aggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 1)
- assert.equal(evmWordToAddress(logs[0].topics[1]), aggregator.address)
- })
- })
-
- describe('with a price within the threshold', () => {
- const newCompoundPrice = BigNumber.from('1000000000')
- beforeEach(async () => {
- await compoundOracle.setPrice(
- 'ETH',
- newCompoundPrice,
- compoundDecimals,
- )
- })
-
- it('does nothing', async () => {
- const aggregators = [aggregator.address]
- const tx = await validator.connect(personas.Carol).update(aggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
- })
- })
-
- describe('#checkUpkeep', () => {
- describe('with a single aggregator', () => {
- describe('with a deviated price exceding threshold', () => {
- it('returns the deviated aggregator', async () => {
- const aggregators = [aggregator.address]
- const encodedAggregators = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const response = await validator
- .connect(personas.Carol)
- .checkUpkeep(encodedAggregators)
-
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- assert.equal(decodedResponse?.[0]?.[0], aggregators[0])
- })
- })
-
- describe('with a price within the threshold', () => {
- const newCompoundPrice = BigNumber.from('1000000000')
- beforeEach(async () => {
- await compoundOracle.setPrice(
- 'ETH',
- newCompoundPrice,
- compoundDecimals,
- )
- })
-
- it('returns an empty array', async () => {
- const aggregators = [aggregator.address]
- const encodedAggregators = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const response = await validator
- .connect(personas.Carol)
- .checkUpkeep(encodedAggregators)
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- assert.equal(decodedResponse?.[0]?.length, 0)
- })
- })
- })
- })
-
- describe('#performUpkeep', () => {
- describe('with a single aggregator', () => {
- describe('with a deviated price exceding threshold', () => {
- it('raises a flag on the flags contract', async () => {
- const aggregators = [aggregator.address]
- const encodedAggregators = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const tx = await validator
- .connect(personas.Carol)
- .performUpkeep(encodedAggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 1)
- assert.equal(evmWordToAddress(logs[0].topics[1]), aggregator.address)
- })
- })
-
- describe('with a price within the threshold', () => {
- const newCompoundPrice = BigNumber.from('1000000000')
- beforeEach(async () => {
- await compoundOracle.setPrice(
- 'ETH',
- newCompoundPrice,
- compoundDecimals,
- )
- })
-
- it('does nothing', async () => {
- const aggregators = [aggregator.address]
- const encodedAggregators = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const tx = await validator
- .connect(personas.Carol)
- .performUpkeep(encodedAggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/ConfirmedOwner.test.ts b/contracts/test/v0.7/ConfirmedOwner.test.ts
deleted file mode 100644
index 3502cd15bc2..00000000000
--- a/contracts/test/v0.7/ConfirmedOwner.test.ts
+++ /dev/null
@@ -1,136 +0,0 @@
-import { ethers } from 'hardhat'
-import { publicAbi } from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { Contract, ContractFactory, Signer } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { evmRevert } from '../test-helpers/matchers'
-
-let confirmedOwnerTestHelperFactory: ContractFactory
-let confirmedOwnerFactory: ContractFactory
-
-let personas: Personas
-let owner: Signer
-let nonOwner: Signer
-let newOwner: Signer
-
-before(async () => {
- const users = await getUsers()
- personas = users.personas
- owner = personas.Carol
- nonOwner = personas.Neil
- newOwner = personas.Ned
-
- confirmedOwnerTestHelperFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/ConfirmedOwnerTestHelper.sol:ConfirmedOwnerTestHelper',
- owner,
- )
- confirmedOwnerFactory = await ethers.getContractFactory(
- 'src/v0.7/ConfirmedOwner.sol:ConfirmedOwner',
- owner,
- )
-})
-
-describe('ConfirmedOwner', () => {
- let confirmedOwner: Contract
-
- beforeEach(async () => {
- confirmedOwner = await confirmedOwnerTestHelperFactory
- .connect(owner)
- .deploy()
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(confirmedOwner, [
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- // test helper public methods
- 'modifierOnlyOwner',
- ])
- })
-
- describe('#constructor', () => {
- it('assigns ownership to the deployer', async () => {
- const [actual, expected] = await Promise.all([
- owner.getAddress(),
- confirmedOwner.owner(),
- ])
-
- assert.equal(actual, expected)
- })
-
- it('reverts if assigned to the zero address', async () => {
- await evmRevert(
- confirmedOwnerFactory
- .connect(owner)
- .deploy(ethers.constants.AddressZero),
- 'Cannot set owner to zero',
- )
- })
- })
-
- describe('#onlyOwner modifier', () => {
- describe('when called by an owner', () => {
- it('successfully calls the method', async () => {
- const tx = await confirmedOwner.connect(owner).modifierOnlyOwner()
- await expect(tx).to.emit(confirmedOwner, 'Here')
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () =>
- await evmRevert(confirmedOwner.connect(nonOwner).modifierOnlyOwner()))
- })
- })
-
- describe('#transferOwnership', () => {
- describe('when called by an owner', () => {
- it('emits a log', async () => {
- const tx = await confirmedOwner
- .connect(owner)
- .transferOwnership(await newOwner.getAddress())
- await expect(tx)
- .to.emit(confirmedOwner, 'OwnershipTransferRequested')
- .withArgs(await owner.getAddress(), await newOwner.getAddress())
- })
-
- it('does not allow ownership transfer to self', async () => {
- await evmRevert(
- confirmedOwner
- .connect(owner)
- .transferOwnership(await owner.getAddress()),
- 'Cannot transfer to self',
- )
- })
- })
- })
-
- describe('when called by anyone but the owner', () => {
- it('reverts', async () =>
- await evmRevert(
- confirmedOwner
- .connect(nonOwner)
- .transferOwnership(await newOwner.getAddress()),
- ))
- })
-
- describe('#acceptOwnership', () => {
- describe('after #transferOwnership has been called', () => {
- beforeEach(async () => {
- await confirmedOwner
- .connect(owner)
- .transferOwnership(await newOwner.getAddress())
- })
-
- it('allows the recipient to call it', async () => {
- const tx = await confirmedOwner.connect(newOwner).acceptOwnership()
- await expect(tx)
- .to.emit(confirmedOwner, 'OwnershipTransferred')
- .withArgs(await owner.getAddress(), await newOwner.getAddress())
- })
-
- it('does not allow a non-recipient to call it', async () =>
- await evmRevert(confirmedOwner.connect(nonOwner).acceptOwnership()))
- })
- })
-})
diff --git a/contracts/test/v0.7/KeeperRegistry1_1.test.ts b/contracts/test/v0.7/KeeperRegistry1_1.test.ts
deleted file mode 100644
index 4e3a8c91b35..00000000000
--- a/contracts/test/v0.7/KeeperRegistry1_1.test.ts
+++ /dev/null
@@ -1,1725 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import { evmRevert } from '../test-helpers/matchers'
-import { getUsers, Personas } from '../test-helpers/setup'
-import { BigNumber, BigNumberish, Signer } from 'ethers'
-import { LinkToken__factory as LinkTokenFactory } from '../../typechain/factories/LinkToken__factory'
-import { KeeperRegistry1_1__factory as KeeperRegistryFactory } from '../../typechain/factories/KeeperRegistry1_1__factory'
-import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory'
-import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory'
-import { UpkeepReverter__factory as UpkeepReverterFactory } from '../../typechain/factories/UpkeepReverter__factory'
-import { KeeperRegistry1_1 as KeeperRegistry } from '../../typechain/KeeperRegistry1_1'
-import { MockV3Aggregator } from '../../typechain/MockV3Aggregator'
-import { LinkToken } from '../../typechain/LinkToken'
-import { UpkeepMock } from '../../typechain/UpkeepMock'
-import { toWei } from '../test-helpers/helpers'
-
-async function getUpkeepID(tx: any) {
- const receipt = await tx.wait()
- return receipt.events[0].args.id
-}
-
-// -----------------------------------------------------------------------------------------------
-// DEV: these *should* match the perform/check gas overhead values in the contract and on the node
-const PERFORM_GAS_OVERHEAD = BigNumber.from(90000)
-const CHECK_GAS_OVERHEAD = BigNumber.from(170000)
-// -----------------------------------------------------------------------------------------------
-
-// Smart contract factories
-let linkTokenFactory: LinkTokenFactory
-let mockV3AggregatorFactory: MockV3AggregatorFactory
-let keeperRegistryFactory: KeeperRegistryFactory
-let upkeepMockFactory: UpkeepMockFactory
-let upkeepReverterFactory: UpkeepReverterFactory
-
-let personas: Personas
-
-before(async () => {
- personas = (await getUsers()).personas
-
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- )
- // need full path because there are two contracts with name MockV3Aggregator
- mockV3AggregatorFactory = (await ethers.getContractFactory(
- 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator',
- )) as unknown as MockV3AggregatorFactory
- // @ts-ignore bug in autogen file
- keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry1_1')
- upkeepMockFactory = await ethers.getContractFactory('UpkeepMock')
- upkeepReverterFactory = await ethers.getContractFactory('UpkeepReverter')
-})
-
-describe('KeeperRegistry1_1', () => {
- const linkEth = BigNumber.from(300000000)
- const gasWei = BigNumber.from(100)
- const linkDivisibility = BigNumber.from('1000000000000000000')
- const executeGas = BigNumber.from('100000')
- const paymentPremiumBase = BigNumber.from('1000000000')
- const paymentPremiumPPB = BigNumber.from('250000000')
- const flatFeeMicroLink = BigNumber.from(0)
- const blockCountPerTurn = BigNumber.from(3)
- const emptyBytes = '0x00'
- const zeroAddress = ethers.constants.AddressZero
- const extraGas = BigNumber.from('250000')
- const registryGasOverhead = BigNumber.from('80000')
- const stalenessSeconds = BigNumber.from(43820)
- const gasCeilingMultiplier = BigNumber.from(1)
- const maxCheckGas = BigNumber.from(20000000)
- const fallbackGasPrice = BigNumber.from(200)
- const fallbackLinkPrice = BigNumber.from(200000000)
-
- let owner: Signer
- let keeper1: Signer
- let keeper2: Signer
- let keeper3: Signer
- let nonkeeper: Signer
- let admin: Signer
- let payee1: Signer
- let payee2: Signer
- let payee3: Signer
-
- let linkToken: LinkToken
- let linkEthFeed: MockV3Aggregator
- let gasPriceFeed: MockV3Aggregator
- let registry: KeeperRegistry
- let mock: UpkeepMock
-
- let id: BigNumber
- let keepers: string[]
- let payees: string[]
-
- beforeEach(async () => {
- owner = personas.Default
- keeper1 = personas.Carol
- keeper2 = personas.Eddy
- keeper3 = personas.Nancy
- nonkeeper = personas.Ned
- admin = personas.Neil
- payee1 = personas.Nelly
- payee2 = personas.Norbert
- payee3 = personas.Nick
-
- keepers = [
- await keeper1.getAddress(),
- await keeper2.getAddress(),
- await keeper3.getAddress(),
- ]
- payees = [
- await payee1.getAddress(),
- await payee2.getAddress(),
- await payee3.getAddress(),
- ]
-
- linkToken = await linkTokenFactory.connect(owner).deploy()
- gasPriceFeed = await mockV3AggregatorFactory
- .connect(owner)
- .deploy(0, gasWei)
- linkEthFeed = await mockV3AggregatorFactory
- .connect(owner)
- .deploy(9, linkEth)
- registry = await keeperRegistryFactory
- .connect(owner)
- .deploy(
- linkToken.address,
- linkEthFeed.address,
- gasPriceFeed.address,
- paymentPremiumPPB,
- flatFeeMicroLink,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- gasCeilingMultiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
-
- mock = await upkeepMockFactory.deploy()
- await linkToken
- .connect(owner)
- .transfer(await keeper1.getAddress(), toWei('1000'))
- await linkToken
- .connect(owner)
- .transfer(await keeper2.getAddress(), toWei('1000'))
- await linkToken
- .connect(owner)
- .transfer(await keeper3.getAddress(), toWei('1000'))
-
- await registry.connect(owner).setKeepers(keepers, payees)
- const tx = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- id = await getUpkeepID(tx)
- })
-
- const linkForGas = (
- upkeepGasSpent: BigNumberish,
- premiumPPB?: BigNumberish,
- flatFee?: BigNumberish,
- ) => {
- premiumPPB = premiumPPB === undefined ? paymentPremiumPPB : premiumPPB
- flatFee = flatFee === undefined ? flatFeeMicroLink : flatFee
- const gasSpent = registryGasOverhead.add(BigNumber.from(upkeepGasSpent))
- const base = gasWei.mul(gasSpent).mul(linkDivisibility).div(linkEth)
- const premium = base.mul(premiumPPB).div(paymentPremiumBase)
- const flatFeeJules = BigNumber.from(flatFee).mul('1000000000000')
- return base.add(premium).add(flatFeeJules)
- }
-
- describe('#setKeepers', () => {
- const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF'
- it('reverts when not called by the owner', async () => {
- await evmRevert(
- registry.connect(keeper1).setKeepers([], []),
- 'Only callable by owner',
- )
- })
-
- it('reverts when adding the same keeper twice', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress(), await keeper1.getAddress()],
- [await payee1.getAddress(), await payee1.getAddress()],
- ),
- 'cannot add keeper twice',
- )
- })
-
- it('reverts with different numbers of keepers/payees', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress(), await keeper2.getAddress()],
- [await payee1.getAddress()],
- ),
- 'address lists not the same length',
- )
- await evmRevert(
- registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress()],
- [await payee1.getAddress(), await payee2.getAddress()],
- ),
- 'address lists not the same length',
- )
- })
-
- it('reverts if the payee is the zero address', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress(), await keeper2.getAddress()],
- [
- await payee1.getAddress(),
- '0x0000000000000000000000000000000000000000',
- ],
- ),
- 'cannot set payee to the zero address',
- )
- })
-
- it('emits events for every keeper added and removed', async () => {
- const oldKeepers = [
- await keeper1.getAddress(),
- await keeper2.getAddress(),
- ]
- const oldPayees = [await payee1.getAddress(), await payee2.getAddress()]
- await registry.connect(owner).setKeepers(oldKeepers, oldPayees)
- assert.deepEqual(oldKeepers, await registry.getKeeperList())
-
- // remove keepers
- const newKeepers = [
- await keeper2.getAddress(),
- await keeper3.getAddress(),
- ]
- const newPayees = [await payee2.getAddress(), await payee3.getAddress()]
- const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees)
- assert.deepEqual(newKeepers, await registry.getKeeperList())
-
- await expect(tx)
- .to.emit(registry, 'KeepersUpdated')
- .withArgs(newKeepers, newPayees)
- })
-
- it('updates the keeper to inactive when removed', async () => {
- await registry.connect(owner).setKeepers(keepers, payees)
- await registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress(), await keeper3.getAddress()],
- [await payee1.getAddress(), await payee3.getAddress()],
- )
- const added = await registry.getKeeperInfo(await keeper1.getAddress())
- assert.isTrue(added.active)
- const removed = await registry.getKeeperInfo(await keeper2.getAddress())
- assert.isFalse(removed.active)
- })
-
- it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => {
- const oldKeepers = [
- await keeper1.getAddress(),
- await keeper2.getAddress(),
- ]
- const oldPayees = [await payee1.getAddress(), await payee2.getAddress()]
- await registry.connect(owner).setKeepers(oldKeepers, oldPayees)
- assert.deepEqual(oldKeepers, await registry.getKeeperList())
-
- const newKeepers = [
- await keeper2.getAddress(),
- await keeper3.getAddress(),
- ]
- const newPayees = [IGNORE_ADDRESS, await payee3.getAddress()]
- const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees)
- assert.deepEqual(newKeepers, await registry.getKeeperList())
-
- const ignored = await registry.getKeeperInfo(await keeper2.getAddress())
- assert.equal(await payee2.getAddress(), ignored.payee)
- assert.equal(true, ignored.active)
-
- await expect(tx)
- .to.emit(registry, 'KeepersUpdated')
- .withArgs(newKeepers, newPayees)
- })
-
- it('reverts if the owner changes the payee', async () => {
- await registry.connect(owner).setKeepers(keepers, payees)
- await evmRevert(
- registry
- .connect(owner)
- .setKeepers(keepers, [
- await payee1.getAddress(),
- await payee2.getAddress(),
- await owner.getAddress(),
- ]),
- 'cannot change payee',
- )
- })
- })
-
- describe('#registerUpkeep', () => {
- it('reverts if the target is not a contract', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .registerUpkeep(
- zeroAddress,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- ),
- 'target is not a contract',
- )
- })
-
- it('reverts if called by a non-owner', async () => {
- await evmRevert(
- registry
- .connect(keeper1)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- ),
- 'Only callable by owner or registrar',
- )
- })
-
- it('reverts if execute gas is too low', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- 2299,
- await admin.getAddress(),
- emptyBytes,
- ),
- 'min gas is 2300',
- )
- })
-
- it('reverts if execute gas is too high', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- 5000001,
- await admin.getAddress(),
- emptyBytes,
- ),
- 'max gas is 5000000',
- )
- })
-
- it('creates a record of the registration', async () => {
- const tx = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- id = await getUpkeepID(tx)
- await expect(tx)
- .to.emit(registry, 'UpkeepRegistered')
- .withArgs(id, executeGas, await admin.getAddress())
- const registration = await registry.getUpkeep(id)
- assert.equal(mock.address, registration.target)
- assert.equal(0, registration.balance.toNumber())
- assert.equal(emptyBytes, registration.checkData)
- assert(registration.maxValidBlocknumber.eq('0xffffffffffffffff'))
- })
- })
-
- describe('#addFunds', () => {
- const amount = toWei('1')
-
- beforeEach(async () => {
- await linkToken.connect(keeper1).approve(registry.address, toWei('100'))
- })
-
- it('reverts if the registration does not exist', async () => {
- await evmRevert(
- registry.connect(keeper1).addFunds(id.add(1), amount),
- 'upkeep must be active',
- )
- })
-
- it('adds to the balance of the registration', async () => {
- await registry.connect(keeper1).addFunds(id, amount)
- const registration = await registry.getUpkeep(id)
- assert.isTrue(amount.eq(registration.balance))
- })
-
- it('emits a log', async () => {
- const tx = await registry.connect(keeper1).addFunds(id, amount)
- await expect(tx)
- .to.emit(registry, 'FundsAdded')
- .withArgs(id, await keeper1.getAddress(), amount)
- })
-
- it('reverts if the upkeep is canceled', async () => {
- await registry.connect(admin).cancelUpkeep(id)
- await evmRevert(
- registry.connect(keeper1).addFunds(id, amount),
- 'upkeep must be active',
- )
- })
- })
-
- describe('#checkUpkeep', () => {
- it('reverts if the upkeep is not funded', async () => {
- await mock.setCanPerform(true)
- await mock.setCanCheck(true)
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress()),
- 'insufficient funds',
- )
- })
-
- context('when the registration is funded', () => {
- beforeEach(async () => {
- await linkToken.connect(keeper1).approve(registry.address, toWei('100'))
- await registry.connect(keeper1).addFunds(id, toWei('100'))
- })
-
- it('reverts if executed', async () => {
- await mock.setCanPerform(true)
- await mock.setCanCheck(true)
- await evmRevert(
- registry.checkUpkeep(id, await keeper1.getAddress()),
- 'only for simulated backend',
- )
- })
-
- it('reverts if the specified keeper is not valid', async () => {
- await mock.setCanPerform(true)
- await mock.setCanCheck(true)
- await evmRevert(
- registry.checkUpkeep(id, await owner.getAddress()),
- 'only for simulated backend',
- )
- })
-
- context('and upkeep is not needed', () => {
- beforeEach(async () => {
- await mock.setCanCheck(false)
- })
-
- it('reverts', async () => {
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress()),
- 'upkeep not needed',
- )
- })
- })
-
- context('and the upkeep check fails', () => {
- beforeEach(async () => {
- const reverter = await upkeepReverterFactory.deploy()
- const tx = await registry
- .connect(owner)
- .registerUpkeep(
- reverter.address,
- 2500000,
- await admin.getAddress(),
- emptyBytes,
- )
- id = await getUpkeepID(tx)
- await linkToken
- .connect(keeper1)
- .approve(registry.address, toWei('100'))
- await registry.connect(keeper1).addFunds(id, toWei('100'))
- })
-
- it('reverts', async () => {
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress()),
- 'call to check target failed',
- )
- })
- })
-
- context('and upkeep check simulations succeeds', () => {
- beforeEach(async () => {
- await mock.setCanCheck(true)
- await mock.setCanPerform(true)
- })
-
- context('and the registry is paused', () => {
- beforeEach(async () => {
- await registry.connect(owner).pause()
- })
-
- it('reverts', async () => {
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress()),
- 'Pausable: paused',
- )
-
- await registry.connect(owner).unpause()
-
- await registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress())
- })
- })
-
- it('returns true with pricing info if the target can execute', async () => {
- const newGasMultiplier = BigNumber.from(10)
- await registry
- .connect(owner)
- .setConfig(
- paymentPremiumPPB,
- flatFeeMicroLink,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- newGasMultiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
- const response = await registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress())
- assert.isTrue(response.gasLimit.eq(executeGas))
- assert.isTrue(response.linkEth.eq(linkEth))
- assert.isTrue(
- response.adjustedGasWei.eq(gasWei.mul(newGasMultiplier)),
- )
- assert.isTrue(
- response.maxLinkPayment.eq(
- linkForGas(executeGas.toNumber()).mul(newGasMultiplier),
- ),
- )
- })
-
- it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => {
- await mock.setCheckGasToBurn(maxCheckGas)
- await mock.setPerformGasToBurn(executeGas)
- const gas = maxCheckGas
- .add(executeGas)
- .add(PERFORM_GAS_OVERHEAD)
- .add(CHECK_GAS_OVERHEAD)
- await registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress(), {
- gasLimit: gas,
- })
- })
- })
- })
- })
-
- describe('#performUpkeep', () => {
- let _lastKeeper = keeper1
- async function getPerformPaymentAmount() {
- _lastKeeper = _lastKeeper === keeper1 ? keeper2 : keeper1
- const before = (
- await registry.getKeeperInfo(await _lastKeeper.getAddress())
- ).balance
- await registry.connect(_lastKeeper).performUpkeep(id, '0x')
- const after = (
- await registry.getKeeperInfo(await _lastKeeper.getAddress())
- ).balance
- const difference = after.sub(before)
- return difference
- }
-
- it('reverts if the registration is not funded', async () => {
- await evmRevert(
- registry.connect(keeper2).performUpkeep(id, '0x'),
- 'insufficient funds',
- )
- })
-
- context('when the registration is funded', () => {
- beforeEach(async () => {
- await linkToken.connect(owner).approve(registry.address, toWei('100'))
- await registry.connect(owner).addFunds(id, toWei('100'))
- })
-
- it('does not revert if the target cannot execute', async () => {
- const mockResponse = await mock
- .connect(zeroAddress)
- .callStatic.checkUpkeep('0x')
- assert.isFalse(mockResponse.callable)
-
- await registry.connect(keeper3).performUpkeep(id, '0x')
- })
-
- it('returns false if the target cannot execute', async () => {
- const mockResponse = await mock
- .connect(zeroAddress)
- .callStatic.checkUpkeep('0x')
- assert.isFalse(mockResponse.callable)
-
- assert.isFalse(
- await registry.connect(keeper1).callStatic.performUpkeep(id, '0x'),
- )
- })
-
- it('returns true if called', async () => {
- await mock.setCanPerform(true)
-
- const response = await registry
- .connect(keeper1)
- .callStatic.performUpkeep(id, '0x')
- assert.isTrue(response)
- })
-
- it('reverts if not enough gas supplied', async () => {
- await mock.setCanPerform(true)
-
- await evmRevert(
- registry
- .connect(keeper1)
- .performUpkeep(id, '0x', { gasLimit: BigNumber.from('120000') }),
- )
- })
-
- it('executes the data passed to the registry', async () => {
- await mock.setCanPerform(true)
-
- const performData = '0xc0ffeec0ffee'
- const tx = await registry
- .connect(keeper1)
- .performUpkeep(id, performData, { gasLimit: extraGas })
- const receipt = await tx.wait()
- const eventLog = receipt?.events
-
- assert.equal(eventLog?.length, 2)
- assert.equal(eventLog?.[1].event, 'UpkeepPerformed')
- assert.equal(eventLog?.[1].args?.[0].toNumber(), id.toNumber())
- assert.equal(eventLog?.[1].args?.[1], true)
- assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress())
- assert.isNotEmpty(eventLog?.[1].args?.[3])
- assert.equal(eventLog?.[1].args?.[4], performData)
- })
-
- it('updates payment balances', async () => {
- const keeperBefore = await registry.getKeeperInfo(
- await keeper1.getAddress(),
- )
- const registrationBefore = await registry.getUpkeep(id)
- const keeperLinkBefore = await linkToken.balanceOf(
- await keeper1.getAddress(),
- )
- const registryLinkBefore = await linkToken.balanceOf(registry.address)
-
- // Do the thing
- await registry.connect(keeper1).performUpkeep(id, '0x')
-
- const keeperAfter = await registry.getKeeperInfo(
- await keeper1.getAddress(),
- )
- const registrationAfter = await registry.getUpkeep(id)
- const keeperLinkAfter = await linkToken.balanceOf(
- await keeper1.getAddress(),
- )
- const registryLinkAfter = await linkToken.balanceOf(registry.address)
-
- assert.isTrue(keeperAfter.balance.gt(keeperBefore.balance))
- assert.isTrue(registrationBefore.balance.gt(registrationAfter.balance))
- assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore))
- assert.isTrue(registryLinkBefore.eq(registryLinkAfter))
- })
-
- it('only pays for gas used [ @skip-coverage ]', async () => {
- const before = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- const tx = await registry.connect(keeper1).performUpkeep(id, '0x')
- const receipt = await tx.wait()
- const after = (await registry.getKeeperInfo(await keeper1.getAddress()))
- .balance
-
- const max = linkForGas(executeGas.toNumber())
- const totalTx = linkForGas(receipt.gasUsed.toNumber())
- const difference = after.sub(before)
- assert.isTrue(max.gt(totalTx))
- assert.isTrue(totalTx.gt(difference))
- assert.isTrue(linkForGas(5700).lt(difference)) // exact number is flaky
- assert.isTrue(linkForGas(6000).gt(difference)) // instead test a range
- })
-
- it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => {
- const multiplier = BigNumber.from(10)
- const gasPrice = BigNumber.from('1000000000') // 10M x the gas feed's rate
- await registry
- .connect(owner)
- .setConfig(
- paymentPremiumPPB,
- flatFeeMicroLink,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- multiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
-
- const before = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- const tx = await registry
- .connect(keeper1)
- .performUpkeep(id, '0x', { gasPrice })
- const receipt = await tx.wait()
- const after = (await registry.getKeeperInfo(await keeper1.getAddress()))
- .balance
-
- const max = linkForGas(executeGas).mul(multiplier)
- const totalTx = linkForGas(receipt.gasUsed).mul(multiplier)
- const difference = after.sub(before)
- assert.isTrue(max.gt(totalTx))
- assert.isTrue(totalTx.gt(difference))
- assert.isTrue(linkForGas(5700).mul(multiplier).lt(difference))
- assert.isTrue(linkForGas(6000).mul(multiplier).gt(difference))
- })
-
- it('only pays as much as the node spent [ @skip-coverage ]', async () => {
- const multiplier = BigNumber.from(10)
- const gasPrice = BigNumber.from(200) // 2X the gas feed's rate
- const effectiveMultiplier = BigNumber.from(2)
- await registry
- .connect(owner)
- .setConfig(
- paymentPremiumPPB,
- flatFeeMicroLink,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- multiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
-
- const before = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- const tx = await registry
- .connect(keeper1)
- .performUpkeep(id, '0x', { gasPrice })
- const receipt = await tx.wait()
- const after = (await registry.getKeeperInfo(await keeper1.getAddress()))
- .balance
-
- const max = linkForGas(executeGas.toNumber()).mul(effectiveMultiplier)
- const totalTx = linkForGas(receipt.gasUsed).mul(effectiveMultiplier)
- const difference = after.sub(before)
- assert.isTrue(max.gt(totalTx))
- assert.isTrue(totalTx.gt(difference))
- assert.isTrue(linkForGas(5700).mul(effectiveMultiplier).lt(difference))
- assert.isTrue(linkForGas(6000).mul(effectiveMultiplier).gt(difference))
- })
-
- it('pays the caller even if the target function fails', async () => {
- const tx = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- const id = await getUpkeepID(tx)
- await linkToken.connect(owner).approve(registry.address, toWei('100'))
- await registry.connect(owner).addFunds(id, toWei('100'))
- const keeperBalanceBefore = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
-
- // Do the thing
- await registry.connect(keeper1).performUpkeep(id, '0x')
-
- const keeperBalanceAfter = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- assert.isTrue(keeperBalanceAfter.gt(keeperBalanceBefore))
- })
-
- it('reverts if called by a non-keeper', async () => {
- await evmRevert(
- registry.connect(nonkeeper).performUpkeep(id, '0x'),
- 'only active keepers',
- )
- })
-
- it('reverts if the upkeep has been canceled', async () => {
- await mock.setCanPerform(true)
-
- await registry.connect(owner).cancelUpkeep(id)
-
- await evmRevert(
- registry.connect(keeper1).performUpkeep(id, '0x'),
- 'invalid upkeep id',
- )
- })
-
- it('uses the fallback gas price if the feed price is stale [ @skip-coverage ]', async () => {
- const normalAmount = await getPerformPaymentAmount()
- const roundId = 99
- const answer = 100
- const updatedAt = 946684800 // New Years 2000 🥳
- const startedAt = 946684799
- await gasPriceFeed
- .connect(owner)
- .updateRoundData(roundId, answer, updatedAt, startedAt)
- const amountWithStaleFeed = await getPerformPaymentAmount()
- assert.isTrue(normalAmount.lt(amountWithStaleFeed))
- })
-
- it('uses the fallback gas price if the feed price is non-sensical [ @skip-coverage ]', async () => {
- const normalAmount = await getPerformPaymentAmount()
- const roundId = 99
- const updatedAt = Math.floor(Date.now() / 1000)
- const startedAt = 946684799
- await gasPriceFeed
- .connect(owner)
- .updateRoundData(roundId, -100, updatedAt, startedAt)
- const amountWithNegativeFeed = await getPerformPaymentAmount()
- await gasPriceFeed
- .connect(owner)
- .updateRoundData(roundId, 0, updatedAt, startedAt)
- const amountWithZeroFeed = await getPerformPaymentAmount()
- assert.isTrue(normalAmount.lt(amountWithNegativeFeed))
- assert.isTrue(normalAmount.lt(amountWithZeroFeed))
- })
-
- it('uses the fallback if the link price feed is stale', async () => {
- const normalAmount = await getPerformPaymentAmount()
- const roundId = 99
- const answer = 100
- const updatedAt = 946684800 // New Years 2000 🥳
- const startedAt = 946684799
- await linkEthFeed
- .connect(owner)
- .updateRoundData(roundId, answer, updatedAt, startedAt)
- const amountWithStaleFeed = await getPerformPaymentAmount()
- assert.isTrue(normalAmount.lt(amountWithStaleFeed))
- })
-
- it('uses the fallback link price if the feed price is non-sensical', async () => {
- const normalAmount = await getPerformPaymentAmount()
- const roundId = 99
- const updatedAt = Math.floor(Date.now() / 1000)
- const startedAt = 946684799
- await linkEthFeed
- .connect(owner)
- .updateRoundData(roundId, -100, updatedAt, startedAt)
- const amountWithNegativeFeed = await getPerformPaymentAmount()
- await linkEthFeed
- .connect(owner)
- .updateRoundData(roundId, 0, updatedAt, startedAt)
- const amountWithZeroFeed = await getPerformPaymentAmount()
- assert.isTrue(normalAmount.lt(amountWithNegativeFeed))
- assert.isTrue(normalAmount.lt(amountWithZeroFeed))
- })
-
- it('reverts if the same caller calls twice in a row', async () => {
- await registry.connect(keeper1).performUpkeep(id, '0x')
- await evmRevert(
- registry.connect(keeper1).performUpkeep(id, '0x'),
- 'keepers must take turns',
- )
- await registry.connect(keeper2).performUpkeep(id, '0x')
- await evmRevert(
- registry.connect(keeper2).performUpkeep(id, '0x'),
- 'keepers must take turns',
- )
- await registry.connect(keeper1).performUpkeep(id, '0x')
- })
-
- it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => {
- await mock.setPerformGasToBurn(executeGas)
- await mock.setCanPerform(true)
- const gas = executeGas.add(PERFORM_GAS_OVERHEAD)
- const performData = '0xc0ffeec0ffee'
- const tx = await registry
- .connect(keeper1)
- .performUpkeep(id, performData, { gasLimit: gas })
- const receipt = await tx.wait()
- const eventLog = receipt?.events
-
- assert.equal(eventLog?.length, 2)
- assert.equal(eventLog?.[1].event, 'UpkeepPerformed')
- assert.equal(eventLog?.[1].args?.[0].toNumber(), id.toNumber())
- assert.equal(eventLog?.[1].args?.[1], true)
- assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress())
- assert.isNotEmpty(eventLog?.[1].args?.[3])
- assert.equal(eventLog?.[1].args?.[4], performData)
- })
- })
- })
-
- describe('#withdrawFunds', () => {
- beforeEach(async () => {
- await linkToken.connect(keeper1).approve(registry.address, toWei('100'))
- await registry.connect(keeper1).addFunds(id, toWei('1'))
- })
-
- it('reverts if called by anyone but the admin', async () => {
- await evmRevert(
- registry
- .connect(owner)
- .withdrawFunds(id.add(1).toNumber(), await payee1.getAddress()),
- 'only callable by admin',
- )
- })
-
- it('reverts if called on an uncanceled upkeep', async () => {
- await evmRevert(
- registry.connect(admin).withdrawFunds(id, await payee1.getAddress()),
- 'upkeep must be canceled',
- )
- })
-
- it('reverts if called with the 0 address', async () => {
- await evmRevert(
- registry.connect(admin).withdrawFunds(id, zeroAddress),
- 'cannot send to zero address',
- )
- })
-
- describe('after the registration is cancelled', () => {
- beforeEach(async () => {
- await registry.connect(owner).cancelUpkeep(id)
- })
-
- it('moves the funds out and updates the balance', async () => {
- const payee1Before = await linkToken.balanceOf(
- await payee1.getAddress(),
- )
- const registryBefore = await linkToken.balanceOf(registry.address)
-
- let registration = await registry.getUpkeep(id)
- assert.isTrue(toWei('1').eq(registration.balance))
-
- await registry
- .connect(admin)
- .withdrawFunds(id, await payee1.getAddress())
-
- const payee1After = await linkToken.balanceOf(await payee1.getAddress())
- const registryAfter = await linkToken.balanceOf(registry.address)
-
- assert.isTrue(payee1Before.add(toWei('1')).eq(payee1After))
- assert.isTrue(registryBefore.sub(toWei('1')).eq(registryAfter))
-
- registration = await registry.getUpkeep(id)
- assert.equal(0, registration.balance.toNumber())
- })
- })
- })
-
- describe('#cancelUpkeep', () => {
- it('reverts if the ID is not valid', async () => {
- await evmRevert(
- registry.connect(owner).cancelUpkeep(id.add(1).toNumber()),
- 'too late to cancel upkeep',
- )
- })
-
- it('reverts if called by a non-owner/non-admin', async () => {
- await evmRevert(
- registry.connect(keeper1).cancelUpkeep(id),
- 'only owner or admin',
- )
- })
-
- describe('when called by the owner', async () => {
- it('sets the registration to invalid immediately', async () => {
- const tx = await registry.connect(owner).cancelUpkeep(id)
- const receipt = await tx.wait()
- const registration = await registry.getUpkeep(id)
- assert.equal(
- registration.maxValidBlocknumber.toNumber(),
- receipt.blockNumber,
- )
- })
-
- it('emits an event', async () => {
- const tx = await registry.connect(owner).cancelUpkeep(id)
- const receipt = await tx.wait()
- await expect(tx)
- .to.emit(registry, 'UpkeepCanceled')
- .withArgs(id, BigNumber.from(receipt.blockNumber))
- })
-
- it('updates the canceled registrations list', async () => {
- let canceled = await registry.callStatic.getCanceledUpkeepList()
- assert.deepEqual([], canceled)
-
- await registry.connect(owner).cancelUpkeep(id)
-
- canceled = await registry.callStatic.getCanceledUpkeepList()
- assert.deepEqual([id], canceled)
- })
-
- it('immediately prevents upkeep', async () => {
- await registry.connect(owner).cancelUpkeep(id)
-
- await evmRevert(
- registry.connect(keeper2).performUpkeep(id, '0x'),
- 'invalid upkeep id',
- )
- })
-
- it('does not revert if reverts if called multiple times', async () => {
- await registry.connect(owner).cancelUpkeep(id)
- await evmRevert(
- registry.connect(owner).cancelUpkeep(id),
- 'too late to cancel upkeep',
- )
- })
-
- describe('when called by the owner when the admin has just canceled', () => {
- let oldExpiration: BigNumber
-
- beforeEach(async () => {
- await registry.connect(admin).cancelUpkeep(id)
- const registration = await registry.getUpkeep(id)
- oldExpiration = registration.maxValidBlocknumber
- })
-
- it('allows the owner to cancel it more quickly', async () => {
- await registry.connect(owner).cancelUpkeep(id)
-
- const registration = await registry.getUpkeep(id)
- const newExpiration = registration.maxValidBlocknumber
- assert.isTrue(newExpiration.lt(oldExpiration))
- })
- })
- })
-
- describe('when called by the admin', async () => {
- const delay = 50
-
- it('sets the registration to invalid in 50 blocks', async () => {
- const tx = await registry.connect(admin).cancelUpkeep(id)
- const receipt = await tx.wait()
- const registration = await registry.getUpkeep(id)
- assert.equal(
- registration.maxValidBlocknumber.toNumber(),
- receipt.blockNumber + 50,
- )
- })
-
- it('emits an event', async () => {
- const tx = await registry.connect(admin).cancelUpkeep(id)
- const receipt = await tx.wait()
- await expect(tx)
- .to.emit(registry, 'UpkeepCanceled')
- .withArgs(id, BigNumber.from(receipt.blockNumber + delay))
- })
-
- it('updates the canceled registrations list', async () => {
- let canceled = await registry.callStatic.getCanceledUpkeepList()
- assert.deepEqual([], canceled)
-
- await registry.connect(admin).cancelUpkeep(id)
-
- canceled = await registry.callStatic.getCanceledUpkeepList()
- assert.deepEqual([id], canceled)
- })
-
- it('immediately prevents upkeep', async () => {
- await linkToken.connect(owner).approve(registry.address, toWei('100'))
- await registry.connect(owner).addFunds(id, toWei('100'))
- await registry.connect(admin).cancelUpkeep(id)
- await registry.connect(keeper2).performUpkeep(id, '0x') // still works
-
- for (let i = 0; i < delay; i++) {
- await ethers.provider.send('evm_mine', [])
- }
-
- await evmRevert(
- registry.connect(keeper2).performUpkeep(id, '0x'),
- 'invalid upkeep id',
- )
- })
-
- it('reverts if called again by the admin', async () => {
- await registry.connect(admin).cancelUpkeep(id)
-
- await evmRevert(
- registry.connect(admin).cancelUpkeep(id),
- 'too late to cancel upkeep',
- )
- })
-
- it('does not revert or double add the cancellation record if called by the owner immediately after', async () => {
- await registry.connect(admin).cancelUpkeep(id)
-
- await registry.connect(owner).cancelUpkeep(id)
-
- const canceled = await registry.callStatic.getCanceledUpkeepList()
- assert.deepEqual([id], canceled)
- })
-
- it('reverts if called by the owner after the timeout', async () => {
- await registry.connect(admin).cancelUpkeep(id)
-
- for (let i = 0; i < delay; i++) {
- await ethers.provider.send('evm_mine', [])
- }
-
- await evmRevert(
- registry.connect(owner).cancelUpkeep(id),
- 'too late to cancel upkeep',
- )
- })
- })
- })
-
- describe('#withdrawPayment', () => {
- beforeEach(async () => {
- await linkToken.connect(owner).approve(registry.address, toWei('100'))
- await registry.connect(owner).addFunds(id, toWei('100'))
- await registry.connect(keeper1).performUpkeep(id, '0x')
- })
-
- it('reverts if called by anyone but the payee', async () => {
- await evmRevert(
- registry
- .connect(payee2)
- .withdrawPayment(
- await keeper1.getAddress(),
- await nonkeeper.getAddress(),
- ),
- 'only callable by payee',
- )
- })
-
- it('reverts if called with the 0 address', async () => {
- await evmRevert(
- registry
- .connect(payee2)
- .withdrawPayment(await keeper1.getAddress(), zeroAddress),
- 'cannot send to zero address',
- )
- })
-
- it('updates the balances', async () => {
- const to = await nonkeeper.getAddress()
- const keeperBefore = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- const registrationBefore = (await registry.getUpkeep(id)).balance
- const toLinkBefore = await linkToken.balanceOf(to)
- const registryLinkBefore = await linkToken.balanceOf(registry.address)
-
- //// Do the thing
- await registry
- .connect(payee1)
- .withdrawPayment(await keeper1.getAddress(), to)
-
- const keeperAfter = (
- await registry.getKeeperInfo(await keeper1.getAddress())
- ).balance
- const registrationAfter = (await registry.getUpkeep(id)).balance
- const toLinkAfter = await linkToken.balanceOf(to)
- const registryLinkAfter = await linkToken.balanceOf(registry.address)
-
- assert.isTrue(keeperAfter.eq(BigNumber.from(0)))
- assert.isTrue(registrationBefore.eq(registrationAfter))
- assert.isTrue(toLinkBefore.add(keeperBefore).eq(toLinkAfter))
- assert.isTrue(registryLinkBefore.sub(keeperBefore).eq(registryLinkAfter))
- })
-
- it('emits a log announcing the withdrawal', async () => {
- const balance = (await registry.getKeeperInfo(await keeper1.getAddress()))
- .balance
- const tx = await registry
- .connect(payee1)
- .withdrawPayment(
- await keeper1.getAddress(),
- await nonkeeper.getAddress(),
- )
- await expect(tx)
- .to.emit(registry, 'PaymentWithdrawn')
- .withArgs(
- await keeper1.getAddress(),
- balance,
- await nonkeeper.getAddress(),
- await payee1.getAddress(),
- )
- })
- })
-
- describe('#transferPayeeship', () => {
- it('reverts when called by anyone but the current payee', async () => {
- await evmRevert(
- registry
- .connect(payee2)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- ),
- 'only callable by payee',
- )
- })
-
- it('reverts when transferring to self', async () => {
- await evmRevert(
- registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee1.getAddress(),
- ),
- 'cannot transfer to self',
- )
- })
-
- it('does not change the payee', async () => {
- await registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- )
-
- const info = await registry.getKeeperInfo(await keeper1.getAddress())
- assert.equal(await payee1.getAddress(), info.payee)
- })
-
- it('emits an event announcing the new payee', async () => {
- const tx = await registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- )
- await expect(tx)
- .to.emit(registry, 'PayeeshipTransferRequested')
- .withArgs(
- await keeper1.getAddress(),
- await payee1.getAddress(),
- await payee2.getAddress(),
- )
- })
-
- it('does not emit an event when called with the same proposal', async () => {
- await registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- )
-
- const tx = await registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- )
- const receipt = await tx.wait()
- assert.equal(0, receipt.logs.length)
- })
- })
-
- describe('#acceptPayeeship', () => {
- beforeEach(async () => {
- await registry
- .connect(payee1)
- .transferPayeeship(
- await keeper1.getAddress(),
- await payee2.getAddress(),
- )
- })
-
- it('reverts when called by anyone but the proposed payee', async () => {
- await evmRevert(
- registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()),
- 'only callable by proposed payee',
- )
- })
-
- it('emits an event announcing the new payee', async () => {
- const tx = await registry
- .connect(payee2)
- .acceptPayeeship(await keeper1.getAddress())
- await expect(tx)
- .to.emit(registry, 'PayeeshipTransferred')
- .withArgs(
- await keeper1.getAddress(),
- await payee1.getAddress(),
- await payee2.getAddress(),
- )
- })
-
- it('does change the payee', async () => {
- await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress())
-
- const info = await registry.getKeeperInfo(await keeper1.getAddress())
- assert.equal(await payee2.getAddress(), info.payee)
- })
- })
-
- describe('#setConfig', () => {
- const payment = BigNumber.from(1)
- const flatFee = BigNumber.from(2)
- const checks = BigNumber.from(3)
- const staleness = BigNumber.from(4)
- const ceiling = BigNumber.from(5)
- const maxGas = BigNumber.from(6)
- const fbGasEth = BigNumber.from(7)
- const fbLinkEth = BigNumber.from(8)
-
- it('reverts when called by anyone but the proposed owner', async () => {
- await evmRevert(
- registry
- .connect(payee1)
- .setConfig(
- payment,
- flatFee,
- checks,
- maxGas,
- staleness,
- gasCeilingMultiplier,
- fbGasEth,
- fbLinkEth,
- ),
- 'Only callable by owner',
- )
- })
-
- it('updates the config', async () => {
- const old = await registry.getConfig()
- const oldFlatFee = await registry.getFlatFee()
- assert.isTrue(paymentPremiumPPB.eq(old.paymentPremiumPPB))
- assert.isTrue(flatFeeMicroLink.eq(oldFlatFee))
- assert.isTrue(blockCountPerTurn.eq(old.blockCountPerTurn))
- assert.isTrue(stalenessSeconds.eq(old.stalenessSeconds))
- assert.isTrue(gasCeilingMultiplier.eq(old.gasCeilingMultiplier))
-
- await registry
- .connect(owner)
- .setConfig(
- payment,
- flatFee,
- checks,
- maxGas,
- staleness,
- ceiling,
- fbGasEth,
- fbLinkEth,
- )
-
- const updated = await registry.getConfig()
- const newFlatFee = await registry.getFlatFee()
- assert.equal(updated.paymentPremiumPPB, payment.toNumber())
- assert.equal(newFlatFee, flatFee.toNumber())
- assert.equal(updated.blockCountPerTurn, checks.toNumber())
- assert.equal(updated.stalenessSeconds, staleness.toNumber())
- assert.equal(updated.gasCeilingMultiplier, ceiling.toNumber())
- assert.equal(updated.checkGasLimit, maxGas.toNumber())
- assert.equal(updated.fallbackGasPrice.toNumber(), fbGasEth.toNumber())
- assert.equal(updated.fallbackLinkPrice.toNumber(), fbLinkEth.toNumber())
- })
-
- it('emits an event', async () => {
- const tx = await registry
- .connect(owner)
- .setConfig(
- payment,
- flatFee,
- checks,
- maxGas,
- staleness,
- ceiling,
- fbGasEth,
- fbLinkEth,
- )
- await expect(tx)
- .to.emit(registry, 'ConfigSet')
- .withArgs(
- payment,
- checks,
- maxGas,
- staleness,
- ceiling,
- fbGasEth,
- fbLinkEth,
- )
- })
- })
-
- describe('#onTokenTransfer', () => {
- const amount = toWei('1')
-
- it('reverts if not called by the LINK token', async () => {
- const data = ethers.utils.defaultAbiCoder.encode(
- ['uint256'],
- [id.toNumber().toString()],
- )
-
- await evmRevert(
- registry
- .connect(keeper1)
- .onTokenTransfer(await keeper1.getAddress(), amount, data),
- 'only callable through LINK',
- )
- })
-
- it('reverts if not called with more or less than 32 bytes', async () => {
- const longData = ethers.utils.defaultAbiCoder.encode(
- ['uint256', 'uint256'],
- ['33', '34'],
- )
- const shortData = '0x12345678'
-
- await evmRevert(
- linkToken
- .connect(owner)
- .transferAndCall(registry.address, amount, longData),
- )
- await evmRevert(
- linkToken
- .connect(owner)
- .transferAndCall(registry.address, amount, shortData),
- )
- })
-
- it('reverts if the upkeep is canceled', async () => {
- await registry.connect(admin).cancelUpkeep(id)
- await evmRevert(
- registry.connect(keeper1).addFunds(id, amount),
- 'upkeep must be active',
- )
- })
-
- it('updates the funds of the job id passed', async () => {
- const data = ethers.utils.defaultAbiCoder.encode(
- ['uint256'],
- [id.toNumber().toString()],
- )
-
- const before = (await registry.getUpkeep(id)).balance
- await linkToken
- .connect(owner)
- .transferAndCall(registry.address, amount, data)
- const after = (await registry.getUpkeep(id)).balance
-
- assert.isTrue(before.add(amount).eq(after))
- })
- })
-
- describe('#recoverFunds', () => {
- const sent = toWei('7')
-
- beforeEach(async () => {
- await linkToken.connect(keeper1).approve(registry.address, toWei('100'))
-
- // add funds to upkeep 1 and perform and withdraw some payment
- const tx = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- const id1 = await getUpkeepID(tx)
- await registry.connect(keeper1).addFunds(id1, toWei('5'))
- await registry.connect(keeper1).performUpkeep(id1, '0x')
- await registry.connect(keeper2).performUpkeep(id1, '0x')
- await registry.connect(keeper3).performUpkeep(id1, '0x')
- await registry
- .connect(payee1)
- .withdrawPayment(
- await keeper1.getAddress(),
- await nonkeeper.getAddress(),
- )
-
- // transfer funds directly to the registry
- await linkToken.connect(keeper1).transfer(registry.address, sent)
-
- // add funds to upkeep 2 and perform and withdraw some payment
- const tx2 = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- const id2 = await getUpkeepID(tx2)
- await registry.connect(keeper1).addFunds(id2, toWei('5'))
- await registry.connect(keeper1).performUpkeep(id2, '0x')
- await registry.connect(keeper2).performUpkeep(id2, '0x')
- await registry.connect(keeper3).performUpkeep(id2, '0x')
- await registry
- .connect(payee2)
- .withdrawPayment(
- await keeper2.getAddress(),
- await nonkeeper.getAddress(),
- )
-
- // transfer funds using onTokenTransfer
- const data = ethers.utils.defaultAbiCoder.encode(
- ['uint256'],
- [id2.toNumber().toString()],
- )
- await linkToken
- .connect(owner)
- .transferAndCall(registry.address, toWei('1'), data)
-
- // remove a keeper
- await registry
- .connect(owner)
- .setKeepers(
- [await keeper1.getAddress(), await keeper2.getAddress()],
- [await payee1.getAddress(), await payee2.getAddress()],
- )
-
- // withdraw some funds
- await registry.connect(owner).cancelUpkeep(id1)
- await registry.connect(admin).withdrawFunds(id1, await admin.getAddress())
- })
-
- it('reverts if not called by owner', async () => {
- await evmRevert(
- registry.connect(keeper1).recoverFunds(),
- 'Only callable by owner',
- )
- })
-
- it('allows any funds that have been accidentally transfered to be moved', async () => {
- const balanceBefore = await linkToken.balanceOf(registry.address)
-
- await linkToken.balanceOf(registry.address)
-
- await registry.connect(owner).recoverFunds()
- const balanceAfter = await linkToken.balanceOf(registry.address)
- assert.isTrue(balanceBefore.eq(balanceAfter.add(sent)))
- })
- })
-
- describe('#pause', () => {
- it('reverts if called by a non-owner', async () => {
- await evmRevert(
- registry.connect(keeper1).pause(),
- 'Only callable by owner',
- )
- })
-
- it('marks the contract as paused', async () => {
- assert.isFalse(await registry.paused())
-
- await registry.connect(owner).pause()
-
- assert.isTrue(await registry.paused())
- })
- })
-
- describe('#unpause', () => {
- beforeEach(async () => {
- await registry.connect(owner).pause()
- })
-
- it('reverts if called by a non-owner', async () => {
- await evmRevert(
- registry.connect(keeper1).unpause(),
- 'Only callable by owner',
- )
- })
-
- it('marks the contract as not paused', async () => {
- assert.isTrue(await registry.paused())
-
- await registry.connect(owner).unpause()
-
- assert.isFalse(await registry.paused())
- })
- })
-
- describe('#getMaxPaymentForGas', () => {
- const gasAmounts = [100000, 10000000]
- const premiums = [0, 250000000]
- const flatFees = [0, 1000000]
- it('calculates the max fee approptiately', async () => {
- for (let idx = 0; idx < gasAmounts.length; idx++) {
- const gas = gasAmounts[idx]
- for (let jdx = 0; jdx < premiums.length; jdx++) {
- const premium = premiums[jdx]
- for (let kdx = 0; kdx < flatFees.length; kdx++) {
- const flatFee = flatFees[kdx]
- await registry
- .connect(owner)
- .setConfig(
- premium,
- flatFee,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- gasCeilingMultiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
- const price = await registry.getMaxPaymentForGas(gas)
- expect(price).to.equal(linkForGas(gas, premium, flatFee))
- }
- }
- }
- })
- })
-
- describe('#checkUpkeep / #performUpkeep', () => {
- const performData = '0xc0ffeec0ffee'
- const multiplier = BigNumber.from(10)
- const flatFee = BigNumber.from('100000') //0.1 LINK
- const callGasPrice = 1
-
- it('uses the same minimum balance calculation [ @skip-coverage ]', async () => {
- await registry
- .connect(owner)
- .setConfig(
- paymentPremiumPPB,
- flatFee,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- multiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
- await linkToken.connect(owner).approve(registry.address, toWei('100'))
-
- const tx1 = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- const upkeepID1 = await getUpkeepID(tx1)
- const tx2 = await registry
- .connect(owner)
- .registerUpkeep(
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- )
- const upkeepID2 = await getUpkeepID(tx2)
- await mock.setCanCheck(true)
- await mock.setCanPerform(true)
- // upkeep 1 is underfunded, 2 is funded
- const minBalance1 = (await registry.getMaxPaymentForGas(executeGas)).sub(
- 1,
- )
- const minBalance2 = await registry.getMaxPaymentForGas(executeGas)
- await registry.connect(owner).addFunds(upkeepID1, minBalance1)
- await registry.connect(owner).addFunds(upkeepID2, minBalance2)
- // upkeep 1 check should revert, 2 should succeed
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(upkeepID1, await keeper1.getAddress(), {
- gasPrice: callGasPrice,
- }),
- )
- await registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(upkeepID2, await keeper1.getAddress(), {
- gasPrice: callGasPrice,
- })
- // upkeep 1 perform should revert, 2 should succeed
- await evmRevert(
- registry
- .connect(keeper1)
- .performUpkeep(upkeepID1, performData, { gasLimit: extraGas }),
- 'insufficient funds',
- )
- await registry
- .connect(keeper1)
- .performUpkeep(upkeepID2, performData, { gasLimit: extraGas })
- })
- })
-
- describe('#getMinBalanceForUpkeep / #checkUpkeep', () => {
- it('calculates the minimum balance appropriately', async () => {
- const oneWei = BigNumber.from('1')
- await linkToken.connect(keeper1).approve(registry.address, toWei('100'))
- await mock.setCanCheck(true)
- await mock.setCanPerform(true)
- const minBalance = await registry.getMinBalanceForUpkeep(id)
- const tooLow = minBalance.sub(oneWei)
- await registry.connect(keeper1).addFunds(id, tooLow)
- await evmRevert(
- registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress()),
- 'insufficient funds',
- )
- await registry.connect(keeper1).addFunds(id, oneWei)
- await registry
- .connect(zeroAddress)
- .callStatic.checkUpkeep(id, await keeper1.getAddress())
- })
- })
-})
diff --git a/contracts/test/v0.7/Operator.test.ts b/contracts/test/v0.7/Operator.test.ts
deleted file mode 100644
index 4af846576b3..00000000000
--- a/contracts/test/v0.7/Operator.test.ts
+++ /dev/null
@@ -1,3819 +0,0 @@
-import { ethers } from 'hardhat'
-import {
- publicAbi,
- toBytes32String,
- toWei,
- stringToBytes,
- increaseTime5Minutes,
- getLog,
-} from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import {
- BigNumber,
- constants,
- Contract,
- ContractFactory,
- ContractReceipt,
- ContractTransaction,
- Signer,
-} from 'ethers'
-import { getUsers, Roles } from '../test-helpers/setup'
-import { bigNumEquals, evmRevert } from '../test-helpers/matchers'
-import type { providers } from 'ethers'
-import {
- convertCancelParams,
- convertCancelByRequesterParams,
- convertFufillParams,
- convertFulfill2Params,
- decodeRunRequest,
- encodeOracleRequest,
- encodeRequestOracleData,
- RunRequest,
-} from '../test-helpers/oracle'
-
-let v7ConsumerFactory: ContractFactory
-let basicConsumerFactory: ContractFactory
-let multiWordConsumerFactory: ContractFactory
-let gasGuzzlingConsumerFactory: ContractFactory
-let getterSetterFactory: ContractFactory
-let maliciousRequesterFactory: ContractFactory
-let maliciousConsumerFactory: ContractFactory
-let maliciousMultiWordConsumerFactory: ContractFactory
-let operatorFactory: ContractFactory
-let forwarderFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-const zeroAddress = ethers.constants.AddressZero
-
-let roles: Roles
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- v7ConsumerFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/Consumer.sol:Consumer',
- )
- basicConsumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/BasicConsumer.sol:BasicConsumer',
- )
- multiWordConsumerFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MultiWordConsumer.sol:MultiWordConsumer',
- )
- gasGuzzlingConsumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/GasGuzzlingConsumer.sol:GasGuzzlingConsumer',
- )
- getterSetterFactory = await ethers.getContractFactory(
- 'src/v0.4/tests/GetterSetter.sol:GetterSetter',
- )
- maliciousRequesterFactory = await ethers.getContractFactory(
- 'src/v0.4/tests/MaliciousRequester.sol:MaliciousRequester',
- )
- maliciousConsumerFactory = await ethers.getContractFactory(
- 'src/v0.4/tests/MaliciousConsumer.sol:MaliciousConsumer',
- )
- maliciousMultiWordConsumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/MaliciousMultiWordConsumer.sol:MaliciousMultiWordConsumer',
- )
- operatorFactory = await ethers.getContractFactory(
- 'src/v0.7/Operator.sol:Operator',
- )
- forwarderFactory = await ethers.getContractFactory(
- 'src/v0.7/AuthorizedForwarder.sol:AuthorizedForwarder',
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- )
-})
-
-describe('Operator', () => {
- let fHash: string
- let specId: string
- let to: string
- let link: Contract
- let operator: Contract
- let forwarder1: Contract
- let forwarder2: Contract
- let owner: Signer
-
- beforeEach(async () => {
- fHash = getterSetterFactory.interface.getSighash('requestedBytes32')
- specId =
- '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000'
- to = '0x80e29acb842498fe6591f020bd82766dce619d43'
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- owner = roles.defaultAccount
- operator = await operatorFactory
- .connect(owner)
- .deploy(link.address, await owner.getAddress())
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.oracleNode.getAddress()])
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(operator, [
- 'acceptAuthorizedReceivers',
- 'acceptOwnableContracts',
- 'cancelOracleRequest',
- 'cancelOracleRequestByRequester',
- 'distributeFunds',
- 'fulfillOracleRequest',
- 'fulfillOracleRequest2',
- 'getAuthorizedSenders',
- 'getChainlinkToken',
- 'getExpiryTime',
- 'isAuthorizedSender',
- 'onTokenTransfer',
- 'operatorRequest',
- 'oracleRequest',
- 'ownerForward',
- 'ownerTransferAndCall',
- 'setAuthorizedSenders',
- 'setAuthorizedSendersOn',
- 'transferOwnableContracts',
- 'typeAndVersion',
- 'withdraw',
- 'withdrawable',
- // Ownable methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#typeAndVersion', () => {
- it('describes the operator', async () => {
- assert.equal(await operator.typeAndVersion(), 'Operator 1.0.0')
- })
- })
-
- describe('#transferOwnableContracts', () => {
- beforeEach(async () => {
- forwarder1 = await forwarderFactory
- .connect(owner)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- forwarder2 = await forwarderFactory
- .connect(owner)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- })
-
- describe('being called by the owner', () => {
- it('cannot transfer to self', async () => {
- await evmRevert(
- operator
- .connect(owner)
- .transferOwnableContracts([forwarder1.address], operator.address),
- 'Cannot transfer to self',
- )
- })
-
- it('emits an ownership transfer request event', async () => {
- const tx = await operator
- .connect(owner)
- .transferOwnableContracts(
- [forwarder1.address, forwarder2.address],
- await roles.oracleNode1.getAddress(),
- )
- const receipt = await tx.wait()
- assert.equal(receipt?.events?.length, 2)
- const log1 = receipt?.events?.[0]
- assert.equal(log1?.event, 'OwnershipTransferRequested')
- assert.equal(log1?.address, forwarder1.address)
- assert.equal(log1?.args?.[0], operator.address)
- assert.equal(log1?.args?.[1], await roles.oracleNode1.getAddress())
- const log2 = receipt?.events?.[1]
- assert.equal(log2?.event, 'OwnershipTransferRequested')
- assert.equal(log2?.address, forwarder2.address)
- assert.equal(log2?.args?.[0], operator.address)
- assert.equal(log2?.args?.[1], await roles.oracleNode1.getAddress())
- })
- })
-
- describe('being called by a non-owner', () => {
- it('reverts with message', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .transferOwnableContracts(
- [forwarder1.address],
- await roles.oracleNode2.getAddress(),
- ),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#acceptOwnableContracts', () => {
- describe('being called by the owner', () => {
- let operator2: Contract
- let receipt: ContractReceipt
-
- beforeEach(async () => {
- operator2 = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- forwarder1 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- forwarder2 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- await operator
- .connect(roles.defaultAccount)
- .transferOwnableContracts(
- [forwarder1.address, forwarder2.address],
- operator2.address,
- )
- const tx = await operator2
- .connect(roles.defaultAccount)
- .acceptOwnableContracts([forwarder1.address, forwarder2.address])
- receipt = await tx.wait()
- })
-
- it('sets the new owner on the forwarder', async () => {
- assert.equal(await forwarder1.owner(), operator2.address)
- })
-
- it('emits ownership transferred events', async () => {
- assert.equal(receipt?.events?.[0]?.event, 'OwnableContractAccepted')
- assert.equal(receipt?.events?.[0]?.args?.[0], forwarder1.address)
-
- assert.equal(receipt?.events?.[1]?.event, 'OwnershipTransferred')
- assert.equal(receipt?.events?.[1]?.address, forwarder1.address)
- assert.equal(receipt?.events?.[1]?.args?.[0], operator.address)
- assert.equal(receipt?.events?.[1]?.args?.[1], operator2.address)
-
- assert.equal(receipt?.events?.[2]?.event, 'OwnableContractAccepted')
- assert.equal(receipt?.events?.[2]?.args?.[0], forwarder2.address)
-
- assert.equal(receipt?.events?.[3]?.event, 'OwnershipTransferred')
- assert.equal(receipt?.events?.[3]?.address, forwarder2.address)
- assert.equal(receipt?.events?.[3]?.args?.[0], operator.address)
- assert.equal(receipt?.events?.[3]?.args?.[1], operator2.address)
- })
- })
-
- describe('being called by a non-owner authorized sender', () => {
- it('does not revert', async () => {
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.oracleNode1.getAddress()])
-
- await operator.connect(roles.oracleNode1).acceptOwnableContracts([])
- })
- })
-
- describe('being called by a non owner', () => {
- it('reverts with message', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .acceptOwnableContracts([await roles.oracleNode2.getAddress()]),
- 'Cannot set authorized senders',
- )
- })
- })
- })
-
- describe('#distributeFunds', () => {
- describe('when called with empty arrays', () => {
- it('reverts with invalid array message', async () => {
- await evmRevert(
- operator.connect(roles.defaultAccount).distributeFunds([], []),
- 'Invalid array length(s)',
- )
- })
- })
-
- describe('when called with unequal array lengths', () => {
- it('reverts with invalid array message', async () => {
- const receivers = [
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
- const amounts = [1, 2, 3]
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .distributeFunds(receivers, amounts),
- 'Invalid array length(s)',
- )
- })
- })
-
- describe('when called with not enough ETH', () => {
- it('reverts with subtraction overflow message', async () => {
- const amountToSend = toWei('2')
- const ethSent = toWei('1')
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .distributeFunds(
- [await roles.oracleNode2.getAddress()],
- [amountToSend],
- {
- value: ethSent,
- },
- ),
- 'SafeMath: subtraction overflow',
- )
- })
- })
-
- describe('when called with too much ETH', () => {
- it('reverts with too much ETH message', async () => {
- const amountToSend = toWei('2')
- const ethSent = toWei('3')
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .distributeFunds(
- [await roles.oracleNode2.getAddress()],
- [amountToSend],
- {
- value: ethSent,
- },
- ),
- 'Too much ETH sent',
- )
- })
- })
-
- describe('when called with correct values', () => {
- it('updates the balances', async () => {
- const node2BalanceBefore = await roles.oracleNode2.getBalance()
- const node3BalanceBefore = await roles.oracleNode3.getBalance()
- const receivers = [
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
- const sendNode2 = toWei('2')
- const sendNode3 = toWei('3')
- const totalAmount = toWei('5')
- const amounts = [sendNode2, sendNode3]
-
- await operator
- .connect(roles.defaultAccount)
- .distributeFunds(receivers, amounts, { value: totalAmount })
-
- const node2BalanceAfter = await roles.oracleNode2.getBalance()
- const node3BalanceAfter = await roles.oracleNode3.getBalance()
-
- assert.equal(
- node2BalanceAfter.sub(node2BalanceBefore).toString(),
- sendNode2.toString(),
- )
-
- assert.equal(
- node3BalanceAfter.sub(node3BalanceBefore).toString(),
- sendNode3.toString(),
- )
- })
- })
- })
-
- describe('#setAuthorizedSenders', () => {
- let newSenders: string[]
- let receipt: ContractReceipt
- describe('when called by the owner', () => {
- describe('setting 3 authorized senders', () => {
- beforeEach(async () => {
- newSenders = [
- await roles.oracleNode1.getAddress(),
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
- const tx = await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders)
- receipt = await tx.wait()
- })
-
- it('adds the authorized nodes', async () => {
- const authorizedSenders = await operator.getAuthorizedSenders()
- assert.equal(newSenders.length, authorizedSenders.length)
- for (let i = 0; i < authorizedSenders.length; i++) {
- assert.equal(authorizedSenders[i], newSenders[i])
- }
- })
-
- it('emits an event on the Operator', async () => {
- assert.equal(receipt.events?.length, 1)
-
- const encodedSenders1 = ethers.utils.defaultAbiCoder.encode(
- ['address[]', 'address'],
- [newSenders, await roles.defaultAccount.getAddress()],
- )
-
- const responseEvent1 = receipt.events?.[0]
- assert.equal(responseEvent1?.event, 'AuthorizedSendersChanged')
- assert.equal(responseEvent1?.data, encodedSenders1)
- })
-
- it('replaces the authorized nodes', async () => {
- const originalAuthorization = await operator
- .connect(roles.defaultAccount)
- .isAuthorizedSender(await roles.oracleNode.getAddress())
- assert.isFalse(originalAuthorization)
- })
-
- after(async () => {
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.oracleNode.getAddress()])
- })
- })
-
- describe('setting 0 authorized senders', () => {
- beforeEach(async () => {
- newSenders = []
- })
-
- it('reverts with a minimum senders message', async () => {
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders),
- 'Must have at least 1 sender',
- )
- })
- })
- })
-
- describe('when called by an authorized sender', () => {
- beforeEach(async () => {
- newSenders = [await roles.oracleNode1.getAddress()]
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders(newSenders)
- })
-
- it('succeeds', async () => {
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.stranger.getAddress()])
- })
- })
-
- describe('when called by a non-owner', () => {
- it('cannot add an authorized node', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .setAuthorizedSenders([await roles.stranger.getAddress()]),
- 'Cannot set authorized senders',
- )
- })
- })
- })
-
- describe('#setAuthorizedSendersOn', () => {
- let newSenders: string[]
-
- beforeEach(async () => {
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSenders([await roles.oracleNode1.getAddress()])
- newSenders = [
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
-
- forwarder1 = await forwarderFactory
- .connect(owner)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- forwarder2 = await forwarderFactory
- .connect(owner)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- })
-
- describe('when called by a non-authorized sender', () => {
- it('reverts', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .setAuthorizedSendersOn(newSenders, [forwarder1.address]),
- 'Cannot set authorized senders',
- )
- })
- })
-
- describe('when called by an owner', () => {
- it('does not revert', async () => {
- await operator
- .connect(roles.defaultAccount)
- .setAuthorizedSendersOn(
- [forwarder1.address, forwarder2.address],
- newSenders,
- )
- })
- })
-
- describe('when called by an authorized sender', () => {
- it('does not revert', async () => {
- await operator
- .connect(roles.oracleNode1)
- .setAuthorizedSendersOn(
- [forwarder1.address, forwarder2.address],
- newSenders,
- )
- })
-
- it('does revert with 0 senders', async () => {
- await operator
- .connect(roles.oracleNode1)
- .setAuthorizedSendersOn(
- [forwarder1.address, forwarder2.address],
- newSenders,
- )
- })
-
- it('emits a log announcing the change and who made it', async () => {
- const targets = [forwarder1.address, forwarder2.address]
- const tx = await operator
- .connect(roles.oracleNode1)
- .setAuthorizedSendersOn(targets, newSenders)
-
- const receipt = await tx.wait()
- const encodedArgs = ethers.utils.defaultAbiCoder.encode(
- ['address[]', 'address[]', 'address'],
- [targets, newSenders, await roles.oracleNode1.getAddress()],
- )
-
- const event1 = receipt.events?.[0]
- assert.equal(event1?.event, 'TargetsUpdatedAuthorizedSenders')
- assert.equal(event1?.address, operator.address)
- assert.equal(event1?.data, encodedArgs)
- })
-
- it('updates the sender list on each of the targets', async () => {
- const tx = await operator
- .connect(roles.oracleNode1)
- .setAuthorizedSendersOn(
- [forwarder1.address, forwarder2.address],
- newSenders,
- )
-
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 3, receipt.toString())
- const encodedSenders = ethers.utils.defaultAbiCoder.encode(
- ['address[]', 'address'],
- [newSenders, operator.address],
- )
-
- const event1 = receipt.events?.[1]
- assert.equal(event1?.event, 'AuthorizedSendersChanged')
- assert.equal(event1?.address, forwarder1.address)
- assert.equal(event1?.data, encodedSenders)
-
- const event2 = receipt.events?.[2]
- assert.equal(event2?.event, 'AuthorizedSendersChanged')
- assert.equal(event2?.address, forwarder2.address)
- assert.equal(event2?.data, encodedSenders)
- })
- })
- })
-
- describe('#acceptAuthorizedReceivers', () => {
- let newSenders: string[]
-
- describe('being called by the owner', () => {
- let operator2: Contract
- let receipt: ContractReceipt
-
- beforeEach(async () => {
- operator2 = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- forwarder1 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- forwarder2 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, zeroAddress, '0x')
- await operator
- .connect(roles.defaultAccount)
- .transferOwnableContracts(
- [forwarder1.address, forwarder2.address],
- operator2.address,
- )
- newSenders = [
- await roles.oracleNode2.getAddress(),
- await roles.oracleNode3.getAddress(),
- ]
-
- const tx = await operator2
- .connect(roles.defaultAccount)
- .acceptAuthorizedReceivers(
- [forwarder1.address, forwarder2.address],
- newSenders,
- )
- receipt = await tx.wait()
- })
-
- it('sets the new owner on the forwarder', async () => {
- assert.equal(await forwarder1.owner(), operator2.address)
- })
-
- it('emits ownership transferred events', async () => {
- assert.equal(receipt?.events?.[0]?.event, 'OwnableContractAccepted')
- assert.equal(receipt?.events?.[0]?.args?.[0], forwarder1.address)
-
- assert.equal(receipt?.events?.[1]?.event, 'OwnershipTransferred')
- assert.equal(receipt?.events?.[1]?.address, forwarder1.address)
- assert.equal(receipt?.events?.[1]?.args?.[0], operator.address)
- assert.equal(receipt?.events?.[1]?.args?.[1], operator2.address)
-
- assert.equal(receipt?.events?.[2]?.event, 'OwnableContractAccepted')
- assert.equal(receipt?.events?.[2]?.args?.[0], forwarder2.address)
-
- assert.equal(receipt?.events?.[3]?.event, 'OwnershipTransferred')
- assert.equal(receipt?.events?.[3]?.address, forwarder2.address)
- assert.equal(receipt?.events?.[3]?.args?.[0], operator.address)
- assert.equal(receipt?.events?.[3]?.args?.[1], operator2.address)
-
- assert.equal(
- receipt?.events?.[4]?.event,
- 'TargetsUpdatedAuthorizedSenders',
- )
-
- const encodedSenders = ethers.utils.defaultAbiCoder.encode(
- ['address[]', 'address'],
- [newSenders, operator2.address],
- )
- assert.equal(receipt?.events?.[5]?.event, 'AuthorizedSendersChanged')
- assert.equal(receipt?.events?.[5]?.address, forwarder1.address)
- assert.equal(receipt?.events?.[5]?.data, encodedSenders)
-
- assert.equal(receipt?.events?.[6]?.event, 'AuthorizedSendersChanged')
- assert.equal(receipt?.events?.[6]?.address, forwarder2.address)
- assert.equal(receipt?.events?.[6]?.data, encodedSenders)
- })
- })
-
- describe('being called by a non owner', () => {
- it('reverts with message', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .acceptAuthorizedReceivers(
- [forwarder1.address, forwarder2.address],
- newSenders,
- ),
- 'Cannot set authorized senders',
- )
- })
- })
- })
-
- describe('#onTokenTransfer', () => {
- describe('when called from any address but the LINK token', () => {
- it('triggers the intended method', async () => {
- const callData = encodeOracleRequest(
- specId,
- to,
- fHash,
- 0,
- constants.HashZero,
- )
-
- await evmRevert(
- operator.onTokenTransfer(
- await roles.defaultAccount.getAddress(),
- 0,
- callData,
- ),
- )
- })
- })
-
- describe('when called from the LINK token', () => {
- it('triggers the intended method', async () => {
- const callData = encodeOracleRequest(
- specId,
- to,
- fHash,
- 0,
- constants.HashZero,
- )
-
- const tx = await link.transferAndCall(operator.address, 0, callData, {
- value: 0,
- })
- const receipt = await tx.wait()
-
- assert.equal(3, receipt.logs?.length)
- })
-
- describe('with no data', () => {
- it('reverts', async () => {
- await evmRevert(
- link.transferAndCall(operator.address, 0, '0x', {
- value: 0,
- }),
- )
- })
- })
- })
-
- describe('malicious requester', () => {
- let mock: Contract
- let requester: Contract
- const paymentAmount = toWei('1')
-
- beforeEach(async () => {
- mock = await maliciousRequesterFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(mock.address, paymentAmount)
- })
-
- it('cannot withdraw from oracle', async () => {
- const operatorOriginalBalance = await link.balanceOf(operator.address)
- const mockOriginalBalance = await link.balanceOf(mock.address)
-
- await evmRevert(mock.maliciousWithdraw())
-
- const operatorNewBalance = await link.balanceOf(operator.address)
- const mockNewBalance = await link.balanceOf(mock.address)
-
- bigNumEquals(operatorOriginalBalance, operatorNewBalance)
- bigNumEquals(mockNewBalance, mockOriginalBalance)
- })
-
- describe('if the requester tries to create a requestId for another contract', () => {
- it('the requesters ID will not match with the oracle contract', async () => {
- const tx = await mock.maliciousTargetConsumer(to)
- const receipt = await tx.wait()
-
- const mockRequestId = receipt.logs?.[0].data
- const requestId = (receipt.events?.[0].args as any).requestId
- assert.notEqual(mockRequestId, requestId)
- })
-
- it('the target requester can still create valid requests', async () => {
- requester = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- await link.transfer(requester.address, paymentAmount)
- await mock.maliciousTargetConsumer(requester.address)
- await requester.requestEthereumPrice('USD', paymentAmount)
- })
- })
- })
-
- it('does not allow recursive calls of onTokenTransfer', async () => {
- const requestPayload = encodeOracleRequest(
- specId,
- to,
- fHash,
- 0,
- constants.HashZero,
- )
-
- const ottSelector =
- operatorFactory.interface.getSighash('onTokenTransfer')
- const header =
- '000000000000000000000000c5fdf4076b8f3a5357c5e395ab970b5b54098fef' + // to
- '0000000000000000000000000000000000000000000000000000000000000539' + // amount
- '0000000000000000000000000000000000000000000000000000000000000060' + // offset
- '0000000000000000000000000000000000000000000000000000000000000136' // length
-
- const maliciousPayload = ottSelector + header + requestPayload.slice(2)
-
- await evmRevert(
- link.transferAndCall(operator.address, 0, maliciousPayload, {
- value: 0,
- }),
- )
- })
- })
-
- describe('#oracleRequest', () => {
- describe('when called through the LINK token', () => {
- const paid = 100
- let log: providers.Log | undefined
- let receipt: providers.TransactionReceipt
-
- beforeEach(async () => {
- const args = encodeOracleRequest(
- specId,
- to,
- fHash,
- 1,
- constants.HashZero,
- )
- const tx = await link.transferAndCall(operator.address, paid, args)
- receipt = await tx.wait()
- assert.equal(3, receipt?.logs?.length)
-
- log = receipt.logs && receipt.logs[2]
- })
-
- it('logs an event', async () => {
- assert.equal(operator.address, log?.address)
-
- assert.equal(log?.topics?.[1], specId)
-
- const req = decodeRunRequest(receipt?.logs?.[2])
- assert.equal(await roles.defaultAccount.getAddress(), req.requester)
- bigNumEquals(paid, req.payment)
- })
-
- it('uses the expected event signature', async () => {
- // If updating this test, be sure to update models.RunLogTopic.
- const eventSignature =
- '0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65'
- assert.equal(eventSignature, log?.topics?.[0])
- })
-
- it('does not allow the same requestId to be used twice', async () => {
- const args2 = encodeOracleRequest(
- specId,
- to,
- fHash,
- 1,
- constants.HashZero,
- )
- await evmRevert(link.transferAndCall(operator.address, paid, args2))
- })
-
- describe('when called with a payload less than 2 EVM words + function selector', () => {
- it('throws an error', async () => {
- const funcSelector =
- operatorFactory.interface.getSighash('oracleRequest')
- const maliciousData =
- funcSelector +
- '0000000000000000000000000000000000000000000000000000000000000000000'
- await evmRevert(
- link.transferAndCall(operator.address, paid, maliciousData),
- )
- })
- })
-
- describe('when called with a payload between 3 and 9 EVM words', () => {
- it('throws an error', async () => {
- const funcSelector =
- operatorFactory.interface.getSighash('oracleRequest')
- const maliciousData =
- funcSelector +
- '000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'
- await evmRevert(
- link.transferAndCall(operator.address, paid, maliciousData),
- )
- })
- })
- })
-
- describe('when dataVersion is higher than 255', () => {
- it('throws an error', async () => {
- const paid = 100
- const args = encodeOracleRequest(
- specId,
- to,
- fHash,
- 1,
- constants.HashZero,
- 256,
- )
- await evmRevert(link.transferAndCall(operator.address, paid, args))
- })
- })
-
- describe('when not called through the LINK token', () => {
- it('reverts', async () => {
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .oracleRequest(
- '0x0000000000000000000000000000000000000000',
- 0,
- specId,
- to,
- fHash,
- 1,
- 1,
- '0x',
- ),
- )
- })
- })
- })
-
- describe('#operatorRequest', () => {
- describe('when called through the LINK token', () => {
- const paid = 100
- let log: providers.Log | undefined
- let receipt: providers.TransactionReceipt
-
- beforeEach(async () => {
- const args = encodeRequestOracleData(
- specId,
- fHash,
- 1,
- constants.HashZero,
- )
- const tx = await link.transferAndCall(operator.address, paid, args)
- receipt = await tx.wait()
- assert.equal(3, receipt?.logs?.length)
-
- log = receipt.logs && receipt.logs[2]
- })
-
- it('logs an event', async () => {
- assert.equal(operator.address, log?.address)
-
- assert.equal(log?.topics?.[1], specId)
-
- const req = decodeRunRequest(receipt?.logs?.[2])
- assert.equal(await roles.defaultAccount.getAddress(), req.requester)
- bigNumEquals(paid, req.payment)
- })
-
- it('uses the expected event signature', async () => {
- // If updating this test, be sure to update models.RunLogTopic.
- const eventSignature =
- '0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65'
- assert.equal(eventSignature, log?.topics?.[0])
- })
-
- it('does not allow the same requestId to be used twice', async () => {
- const args2 = encodeRequestOracleData(
- specId,
- fHash,
- 1,
- constants.HashZero,
- )
- await evmRevert(link.transferAndCall(operator.address, paid, args2))
- })
-
- describe('when called with a payload less than 2 EVM words + function selector', () => {
- it('throws an error', async () => {
- const funcSelector =
- operatorFactory.interface.getSighash('oracleRequest')
- const maliciousData =
- funcSelector +
- '0000000000000000000000000000000000000000000000000000000000000000000'
- await evmRevert(
- link.transferAndCall(operator.address, paid, maliciousData),
- )
- })
- })
-
- describe('when called with a payload between 3 and 9 EVM words', () => {
- it('throws an error', async () => {
- const funcSelector =
- operatorFactory.interface.getSighash('oracleRequest')
- const maliciousData =
- funcSelector +
- '000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'
- await evmRevert(
- link.transferAndCall(operator.address, paid, maliciousData),
- )
- })
- })
- })
-
- describe('when dataVersion is higher than 255', () => {
- it('throws an error', async () => {
- const paid = 100
- const args = encodeRequestOracleData(
- specId,
- fHash,
- 1,
- constants.HashZero,
- 256,
- )
- await evmRevert(link.transferAndCall(operator.address, paid, args))
- })
- })
-
- describe('when not called through the LINK token', () => {
- it('reverts', async () => {
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .oracleRequest(
- '0x0000000000000000000000000000000000000000',
- 0,
- specId,
- to,
- fHash,
- 1,
- 1,
- '0x',
- ),
- )
- })
- })
- })
-
- describe('#fulfillOracleRequest', () => {
- const response = 'Hi Mom!'
- let maliciousRequester: Contract
- let basicConsumer: Contract
- let maliciousConsumer: Contract
- let gasGuzzlingConsumer: Contract
- let request: ReturnType
-
- describe('gas guzzling consumer [ @skip-coverage ]', () => {
- beforeEach(async () => {
- gasGuzzlingConsumer = await gasGuzzlingConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(gasGuzzlingConsumer.address, paymentAmount)
- const tx =
- await gasGuzzlingConsumer.gassyRequestEthereumPrice(paymentAmount)
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('emits an OracleResponse event', async () => {
- const fulfillParams = convertFufillParams(request, response)
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 1)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
- })
-
- describe('cooperative consumer', () => {
- beforeEach(async () => {
- basicConsumer = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(basicConsumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await basicConsumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- describe('when called by an unauthorized node', () => {
- beforeEach(async () => {
- assert.equal(
- false,
- await operator.isAuthorizedSender(
- await roles.stranger.getAddress(),
- ),
- )
- })
-
- it('raises an error', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .fulfillOracleRequest(...convertFufillParams(request, response)),
- )
- })
- })
-
- describe('when fulfilled with the wrong function', () => {
- let v7Consumer
- beforeEach(async () => {
- v7Consumer = await v7ConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(v7Consumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await v7Consumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('raises an error', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .fulfillOracleRequest(...convertFufillParams(request, response)),
- )
- })
- })
-
- describe('when called by an authorized node', () => {
- it('raises an error if the request ID does not exist', async () => {
- request.requestId = ethers.utils.formatBytes32String('DOESNOTEXIST')
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response)),
- )
- })
-
- it('sets the value on the requested contract', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- const currentValue = await basicConsumer.currentPrice()
- assert.equal(response, ethers.utils.parseBytes32String(currentValue))
- })
-
- it('emits an OracleResponse event', async () => {
- const fulfillParams = convertFufillParams(request, response)
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 3)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
-
- it('does not allow a request to be fulfilled twice', async () => {
- const response2 = response + ' && Hello World!!'
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response2)),
- )
-
- const currentValue = await basicConsumer.currentPrice()
- assert.equal(response, ethers.utils.parseBytes32String(currentValue))
- })
- })
-
- describe('when the oracle does not provide enough gas', () => {
- // if updating this defaultGasLimit, be sure it matches with the
- // defaultGasLimit specified in store/tx_manager.go
- const defaultGasLimit = 500000
-
- beforeEach(async () => {
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it('does not allow the oracle to withdraw the payment', async () => {
- await evmRevert(
- operator.connect(roles.oracleNode).fulfillOracleRequest(
- ...convertFufillParams(request, response, {
- gasLimit: 70000,
- }),
- ),
- )
-
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => {
- await operator.connect(roles.oracleNode).fulfillOracleRequest(
- ...convertFufillParams(request, response, {
- gasLimit: defaultGasLimit,
- }),
- )
-
- bigNumEquals(request.payment, await operator.withdrawable())
- })
- })
- })
-
- describe('with a malicious requester', () => {
- beforeEach(async () => {
- const paymentAmount = toWei('1')
- maliciousRequester = await maliciousRequesterFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousRequester.address, paymentAmount)
- })
-
- it('cannot cancel before the expiration', async () => {
- await evmRevert(
- maliciousRequester.maliciousRequestCancel(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- ),
- )
- })
-
- it('cannot call functions on the LINK token through callbacks', async () => {
- await evmRevert(
- maliciousRequester.request(
- specId,
- link.address,
- ethers.utils.toUtf8Bytes('transfer(address,uint256)'),
- ),
- )
- })
-
- describe('requester lies about amount of LINK sent', () => {
- it('the oracle uses the amount of LINK actually paid', async () => {
- const tx = await maliciousRequester.maliciousPrice(specId)
- const receipt = await tx.wait()
- const req = decodeRunRequest(receipt.logs?.[3])
-
- assert(toWei('1').eq(req.payment))
- })
- })
- })
-
- describe('with a malicious consumer', () => {
- const paymentAmount = toWei('1')
-
- beforeEach(async () => {
- maliciousConsumer = await maliciousConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousConsumer.address, paymentAmount)
- })
-
- describe('fails during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
-
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response2)),
- )
- })
- })
-
- describe('calls selfdestruct', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- await maliciousConsumer.remove()
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
- })
-
- describe('request is canceled during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('cancelRequestOnFulfill(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- bigNumEquals(0, await link.balanceOf(maliciousConsumer.address))
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- const mockBalance = await link.balanceOf(maliciousConsumer.address)
- bigNumEquals(mockBalance, 0)
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response2)),
- )
- })
- })
-
- describe('tries to steal funds from node', () => {
- it('is not successful with call', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
-
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with send', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with transfer', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, response))
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
- })
-
- describe('when calling an owned contract', () => {
- beforeEach(async () => {
- forwarder1 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, link.address, operator.address, '0x')
- })
-
- it('does not allow the contract to callback to owned contracts', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('whatever(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- let request = decodeRunRequest(receipt.logs?.[3])
- let responseParams = convertFufillParams(request, response)
- // set the params to be the owned address
- responseParams[2] = forwarder1.address
-
- //accept ownership
- await operator
- .connect(roles.defaultAccount)
- .acceptOwnableContracts([forwarder1.address])
-
- // do the thing
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...responseParams),
- 'Cannot call owned contract',
- )
-
- await operator
- .connect(roles.defaultAccount)
- .transferOwnableContracts([forwarder1.address], link.address)
- //reverts for a different reason after transferring ownership
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...responseParams),
- 'Params do not match request ID',
- )
- })
- })
- })
- })
-
- describe('#fulfillOracleRequest2', () => {
- describe('single word fulfils', () => {
- const response = 'Hi mom!'
- const responseTypes = ['bytes32']
- const responseValues = [toBytes32String(response)]
- let maliciousRequester: Contract
- let basicConsumer: Contract
- let maliciousConsumer: Contract
- let gasGuzzlingConsumer: Contract
- let request: ReturnType
- let request2: ReturnType
-
- describe('gas guzzling consumer [ @skip-coverage ]', () => {
- beforeEach(async () => {
- gasGuzzlingConsumer = await gasGuzzlingConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(gasGuzzlingConsumer.address, paymentAmount)
- const tx =
- await gasGuzzlingConsumer.gassyRequestEthereumPrice(paymentAmount)
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 1)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
- })
-
- describe('cooperative consumer', () => {
- beforeEach(async () => {
- basicConsumer = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(basicConsumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await basicConsumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- describe('when called by an unauthorized node', () => {
- beforeEach(async () => {
- assert.equal(
- false,
- await operator.isAuthorizedSender(
- await roles.stranger.getAddress(),
- ),
- )
- })
-
- it('raises an error', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
- })
-
- describe('when called by an authorized node', () => {
- it('raises an error if the request ID does not exist', async () => {
- request.requestId = ethers.utils.formatBytes32String('DOESNOTEXIST')
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
-
- it('sets the value on the requested contract', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const currentValue = await basicConsumer.currentPrice()
- assert.equal(
- response,
- ethers.utils.parseBytes32String(currentValue),
- )
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 3)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
-
- it('does not allow a request to be fulfilled twice', async () => {
- const response2 = response + ' && Hello World!!'
- const response2Values = [toBytes32String(response2)]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
-
- const currentValue = await basicConsumer.currentPrice()
- assert.equal(
- response,
- ethers.utils.parseBytes32String(currentValue),
- )
- })
- })
-
- describe('when the oracle does not provide enough gas', () => {
- // if updating this defaultGasLimit, be sure it matches with the
- // defaultGasLimit specified in store/tx_manager.go
- const defaultGasLimit = 500000
-
- beforeEach(async () => {
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it('does not allow the oracle to withdraw the payment', async () => {
- await evmRevert(
- operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- {
- gasLimit: 70000,
- },
- ),
- ),
- )
-
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => {
- await operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(request, responseTypes, responseValues, {
- gasLimit: defaultGasLimit,
- }),
- )
-
- bigNumEquals(request.payment, await operator.withdrawable())
- })
- })
- })
-
- describe('with a malicious oracle', () => {
- beforeEach(async () => {
- // Setup Request 1
- basicConsumer = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(basicConsumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await basicConsumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- // Setup Request 2
- await link.transfer(basicConsumer.address, paymentAmount)
- const tx2 = await basicConsumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt2 = await tx2.wait()
- request2 = decodeRunRequest(receipt2.logs?.[3])
- })
-
- it('cannot spoof requestId in response data by moving calldata offset', async () => {
- // Malicious Oracle Fulfill 2
- const functionSelector = '0x6ae0bc76' // fulfillOracleRequest2
- const dataOffset =
- '0000000000000000000000000000000000000000000000000000000000000100' // Moved to 0x0124
- const fillerBytes =
- '0000000000000000000000000000000000000000000000000000000000000000'
- const expectedCalldataStart = request.requestId.slice(2) // 0xe4, this is checked against requestId in validateMultiWordResponseId
- const dataSize =
- '0000000000000000000000000000000000000000000000000000000000000040' // Two 32 byte blocks
- const maliciousCalldataId = request2.requestId.slice(2) // 0x0124, set to a different requestId
- const calldataData =
- '1122334455667788991122334455667788991122334455667788991122334455' // some garbage value as response value
-
- const data =
- functionSelector +
- /** Input Params - slice off 0x prefix and pad with 0's */
- request.requestId.slice(2) +
- request.payment.slice(2).padStart(64, '0') +
- request.callbackAddr.slice(2).padStart(64, '0') +
- request.callbackFunc.slice(2).padEnd(64, '0') +
- request.expiration.slice(2).padStart(64, '0') +
- // calldata "data"
- dataOffset +
- fillerBytes +
- expectedCalldataStart +
- dataSize +
- maliciousCalldataId +
- calldataData
-
- await evmRevert(
- operator.connect(roles.oracleNode).signer.sendTransaction({
- to: operator.address,
- data,
- }),
- )
- })
- })
-
- describe('with a malicious requester', () => {
- beforeEach(async () => {
- const paymentAmount = toWei('1')
- maliciousRequester = await maliciousRequesterFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousRequester.address, paymentAmount)
- })
-
- it('cannot cancel before the expiration', async () => {
- await evmRevert(
- maliciousRequester.maliciousRequestCancel(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- ),
- )
- })
-
- it('cannot call functions on the LINK token through callbacks', async () => {
- await evmRevert(
- maliciousRequester.request(
- specId,
- link.address,
- ethers.utils.toUtf8Bytes('transfer(address,uint256)'),
- ),
- )
- })
-
- describe('requester lies about amount of LINK sent', () => {
- it('the oracle uses the amount of LINK actually paid', async () => {
- const tx = await maliciousRequester.maliciousPrice(specId)
- const receipt = await tx.wait()
- const req = decodeRunRequest(receipt.logs?.[3])
-
- assert(toWei('1').eq(req.payment))
- })
- })
- })
-
- describe('with a malicious consumer', () => {
- const paymentAmount = toWei('1')
-
- beforeEach(async () => {
- maliciousConsumer = await maliciousConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousConsumer.address, paymentAmount)
- })
-
- describe('fails during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
-
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
- const response2Values = [toBytes32String(response2)]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
- })
- })
-
- describe('calls selfdestruct', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- await maliciousConsumer.remove()
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
- })
-
- describe('request is canceled during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes(
- 'cancelRequestOnFulfill(bytes32,bytes32)',
- ),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- bigNumEquals(0, await link.balanceOf(maliciousConsumer.address))
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const mockBalance = await link.balanceOf(maliciousConsumer.address)
- bigNumEquals(mockBalance, 0)
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
- const response2Values = [toBytes32String(response2)]
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
- })
- })
-
- describe('tries to steal funds from node', () => {
- it('is not successful with call', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with send', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with transfer', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
- })
-
- describe('when calling an owned contract', () => {
- beforeEach(async () => {
- forwarder1 = await forwarderFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, link.address, operator.address, '0x')
- })
-
- it('does not allow the contract to callback to owned contracts', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('whatever(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- let request = decodeRunRequest(receipt.logs?.[3])
- let responseParams = convertFufillParams(request, response)
- // set the params to be the owned address
- responseParams[2] = forwarder1.address
-
- //accept ownership
- await operator
- .connect(roles.defaultAccount)
- .acceptOwnableContracts([forwarder1.address])
-
- // do the thing
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...responseParams),
- 'Cannot call owned contract',
- )
-
- await operator
- .connect(roles.defaultAccount)
- .transferOwnableContracts([forwarder1.address], link.address)
- //reverts for a different reason after transferring ownership
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...responseParams),
- 'Params do not match request ID',
- )
- })
- })
- })
- })
-
- describe('multi word fulfils', () => {
- describe('one bytes parameter', () => {
- const response =
- 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.\
- Fusce euismod malesuada ligula, eget semper metus ultrices sit amet.'
- const responseTypes = ['bytes']
- const responseValues = [stringToBytes(response)]
- let maliciousRequester: Contract
- let multiConsumer: Contract
- let maliciousConsumer: Contract
- let gasGuzzlingConsumer: Contract
- let request: ReturnType
-
- describe('gas guzzling consumer [ @skip-coverage ]', () => {
- beforeEach(async () => {
- gasGuzzlingConsumer = await gasGuzzlingConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(gasGuzzlingConsumer.address, paymentAmount)
- const tx =
- await gasGuzzlingConsumer.gassyMultiWordRequest(paymentAmount)
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 1)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
- })
-
- describe('cooperative consumer', () => {
- beforeEach(async () => {
- multiConsumer = await multiWordConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(multiConsumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await multiConsumer.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it("matches the consumer's request ID", async () => {
- const nonce = await multiConsumer.publicGetNextRequestCount()
- const tx = await multiConsumer.requestEthereumPrice('USD', 0)
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- const packed = ethers.utils.solidityPack(
- ['address', 'uint256'],
- [multiConsumer.address, nonce],
- )
- const expected = ethers.utils.keccak256(packed)
- assert.equal(expected, request.requestId)
- })
-
- describe('when called by an unauthorized node', () => {
- beforeEach(async () => {
- assert.equal(
- false,
- await operator.isAuthorizedSender(
- await roles.stranger.getAddress(),
- ),
- )
- })
-
- it('raises an error', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
- })
-
- describe('when called by an authorized node', () => {
- it('raises an error if the request ID does not exist', async () => {
- request.requestId =
- ethers.utils.formatBytes32String('DOESNOTEXIST')
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
-
- it('sets the value on the requested contract', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const currentValue = await multiConsumer.currentPrice()
- assert.equal(response, ethers.utils.toUtf8String(currentValue))
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 3)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
-
- it('does not allow a request to be fulfilled twice', async () => {
- const response2 = response + ' && Hello World!!'
- const response2Values = [stringToBytes(response2)]
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
-
- const currentValue = await multiConsumer.currentPrice()
- assert.equal(response, ethers.utils.toUtf8String(currentValue))
- })
- })
-
- describe('when the oracle does not provide enough gas', () => {
- // if updating this defaultGasLimit, be sure it matches with the
- // defaultGasLimit specified in store/tx_manager.go
- const defaultGasLimit = 500000
-
- beforeEach(async () => {
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it('does not allow the oracle to withdraw the payment', async () => {
- await evmRevert(
- operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- {
- gasLimit: 70000,
- },
- ),
- ),
- )
-
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => {
- await operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- {
- gasLimit: defaultGasLimit,
- },
- ),
- )
-
- bigNumEquals(request.payment, await operator.withdrawable())
- })
- })
- })
-
- describe('with a malicious requester', () => {
- beforeEach(async () => {
- const paymentAmount = toWei('1')
- maliciousRequester = await maliciousRequesterFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousRequester.address, paymentAmount)
- })
-
- it('cannot cancel before the expiration', async () => {
- await evmRevert(
- maliciousRequester.maliciousRequestCancel(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- ),
- )
- })
-
- it('cannot call functions on the LINK token through callbacks', async () => {
- await evmRevert(
- maliciousRequester.request(
- specId,
- link.address,
- ethers.utils.toUtf8Bytes('transfer(address,uint256)'),
- ),
- )
- })
-
- describe('requester lies about amount of LINK sent', () => {
- it('the oracle uses the amount of LINK actually paid', async () => {
- const tx = await maliciousRequester.maliciousPrice(specId)
- const receipt = await tx.wait()
- const req = decodeRunRequest(receipt.logs?.[3])
-
- assert(toWei('1').eq(req.payment))
- })
- })
- })
-
- describe('with a malicious consumer', () => {
- const paymentAmount = toWei('1')
-
- beforeEach(async () => {
- maliciousConsumer = await maliciousMultiWordConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousConsumer.address, paymentAmount)
- })
-
- describe('fails during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
-
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
- const response2Values = [stringToBytes(response2)]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
- })
- })
-
- describe('calls selfdestruct', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- await maliciousConsumer.remove()
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
- })
-
- describe('request is canceled during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes(
- 'cancelRequestOnFulfill(bytes32,bytes32)',
- ),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- bigNumEquals(0, await link.balanceOf(maliciousConsumer.address))
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const mockBalance = await link.balanceOf(
- maliciousConsumer.address,
- )
- bigNumEquals(mockBalance, 0)
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response2 = 'hack the planet 102'
- const response2Values = [stringToBytes(response2)]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- response2Values,
- ),
- ),
- )
- })
- })
-
- describe('tries to steal funds from node', () => {
- it('is not successful with call', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with send', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with transfer', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
- })
- })
- })
-
- describe('multiple bytes32 parameters', () => {
- const response1 = '100'
- const response2 = '7777777'
- const response3 = 'forty two'
- const responseTypes = ['bytes32', 'bytes32', 'bytes32']
- const responseValues = [
- toBytes32String(response1),
- toBytes32String(response2),
- toBytes32String(response3),
- ]
- let maliciousRequester: Contract
- let multiConsumer: Contract
- let maliciousConsumer: Contract
- let gasGuzzlingConsumer: Contract
- let request: ReturnType
-
- describe('gas guzzling consumer [ @skip-coverage ]', () => {
- beforeEach(async () => {
- gasGuzzlingConsumer = await gasGuzzlingConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(gasGuzzlingConsumer.address, paymentAmount)
- const tx =
- await gasGuzzlingConsumer.gassyMultiWordRequest(paymentAmount)
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 1)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
- })
-
- describe('cooperative consumer', () => {
- beforeEach(async () => {
- multiConsumer = await multiWordConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(multiConsumer.address, paymentAmount)
- const currency = 'USD'
- const tx = await multiConsumer.requestMultipleParameters(
- currency,
- paymentAmount,
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- describe('when called by an unauthorized node', () => {
- beforeEach(async () => {
- assert.equal(
- false,
- await operator.isAuthorizedSender(
- await roles.stranger.getAddress(),
- ),
- )
- })
-
- it('raises an error', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
- })
-
- describe('when called by an authorized node', () => {
- it('raises an error if the request ID does not exist', async () => {
- request.requestId =
- ethers.utils.formatBytes32String('DOESNOTEXIST')
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- ),
- )
- })
-
- it('sets the value on the requested contract', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const firstValue = await multiConsumer.usd()
- const secondValue = await multiConsumer.eur()
- const thirdValue = await multiConsumer.jpy()
- assert.equal(
- response1,
- ethers.utils.parseBytes32String(firstValue),
- )
- assert.equal(
- response2,
- ethers.utils.parseBytes32String(secondValue),
- )
- assert.equal(
- response3,
- ethers.utils.parseBytes32String(thirdValue),
- )
- })
-
- it('emits an OracleResponse2 event', async () => {
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- const tx = await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams)
- const receipt = await tx.wait()
- assert.equal(receipt.events?.length, 3)
- const responseEvent = receipt.events?.[0]
- assert.equal(responseEvent?.event, 'OracleResponse')
- assert.equal(responseEvent?.args?.[0], request.requestId)
- })
-
- it('does not allow a request to be fulfilled twice', async () => {
- const response4 = response3 + ' && Hello World!!'
- const repeatedResponseValues = [
- toBytes32String(response1),
- toBytes32String(response2),
- toBytes32String(response4),
- ]
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- repeatedResponseValues,
- ),
- ),
- )
-
- const firstValue = await multiConsumer.usd()
- const secondValue = await multiConsumer.eur()
- const thirdValue = await multiConsumer.jpy()
- assert.equal(
- response1,
- ethers.utils.parseBytes32String(firstValue),
- )
- assert.equal(
- response2,
- ethers.utils.parseBytes32String(secondValue),
- )
- assert.equal(
- response3,
- ethers.utils.parseBytes32String(thirdValue),
- )
- })
- })
-
- describe('when the oracle does not provide enough gas', () => {
- // if updating this defaultGasLimit, be sure it matches with the
- // defaultGasLimit specified in store/tx_manager.go
- const defaultGasLimit = 500000
-
- beforeEach(async () => {
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it('does not allow the oracle to withdraw the payment', async () => {
- await evmRevert(
- operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- {
- gasLimit: 70000,
- },
- ),
- ),
- )
-
- bigNumEquals(0, await operator.withdrawable())
- })
-
- it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => {
- await operator.connect(roles.oracleNode).fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- {
- gasLimit: defaultGasLimit,
- },
- ),
- )
-
- bigNumEquals(request.payment, await operator.withdrawable())
- })
- })
- })
-
- describe('with a malicious requester', () => {
- beforeEach(async () => {
- const paymentAmount = toWei('1')
- maliciousRequester = await maliciousRequesterFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousRequester.address, paymentAmount)
- })
-
- it('cannot cancel before the expiration', async () => {
- await evmRevert(
- maliciousRequester.maliciousRequestCancel(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- ),
- )
- })
-
- it('cannot call functions on the LINK token through callbacks', async () => {
- await evmRevert(
- maliciousRequester.request(
- specId,
- link.address,
- ethers.utils.toUtf8Bytes('transfer(address,uint256)'),
- ),
- )
- })
-
- describe('requester lies about amount of LINK sent', () => {
- it('the oracle uses the amount of LINK actually paid', async () => {
- const tx = await maliciousRequester.maliciousPrice(specId)
- const receipt = await tx.wait()
- const req = decodeRunRequest(receipt.logs?.[3])
-
- assert(toWei('1').eq(req.payment))
- })
- })
- })
-
- describe('with a malicious consumer', () => {
- const paymentAmount = toWei('1')
-
- beforeEach(async () => {
- maliciousConsumer = await maliciousMultiWordConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address)
- await link.transfer(maliciousConsumer.address, paymentAmount)
- })
-
- describe('fails during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
-
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response4 = 'hack the planet 102'
- const repeatedResponseValues = [
- toBytes32String(response1),
- toBytes32String(response2),
- toBytes32String(response4),
- ]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- repeatedResponseValues,
- ),
- ),
- )
- })
- })
-
- describe('calls selfdestruct', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
- await maliciousConsumer.remove()
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
- })
-
- describe('request is canceled during fulfillment', () => {
- beforeEach(async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes(
- 'cancelRequestOnFulfill(bytes32,bytes32)',
- ),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- bigNumEquals(0, await link.balanceOf(maliciousConsumer.address))
- })
-
- it('allows the oracle node to receive their payment', async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- const mockBalance = await link.balanceOf(
- maliciousConsumer.address,
- )
- bigNumEquals(mockBalance, 0)
-
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(balance, 0)
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), paymentAmount)
- const newBalance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- bigNumEquals(paymentAmount, newBalance)
- })
-
- it("can't fulfill the data again", async () => {
- const response4 = 'hack the planet 102'
- const repeatedResponseValues = [
- toBytes32String(response1),
- toBytes32String(response2),
- toBytes32String(response4),
- ]
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- repeatedResponseValues,
- ),
- ),
- )
- })
- })
-
- describe('tries to steal funds from node', () => {
- it('is not successful with call', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
-
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with send', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
-
- it('is not successful with transfer', async () => {
- const tx = await maliciousConsumer.requestData(
- specId,
- ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'),
- )
- const receipt = await tx.wait()
- request = decodeRunRequest(receipt.logs?.[3])
-
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- ),
- )
- bigNumEquals(
- 0,
- await ethers.provider.getBalance(maliciousConsumer.address),
- )
- })
- })
- })
- })
- })
-
- describe('when the response data is too short', () => {
- const response = 'Hi mom!'
- const responseTypes = ['bytes32']
- const responseValues = [toBytes32String(response)]
-
- it('reverts', async () => {
- let basicConsumer = await basicConsumerFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, operator.address, specId)
- const paymentAmount = toWei('1')
- await link.transfer(basicConsumer.address, paymentAmount)
- const tx = await basicConsumer.requestEthereumPrice(
- 'USD',
- paymentAmount,
- )
- const receipt = await tx.wait()
- let request = decodeRunRequest(receipt.logs?.[3])
-
- const fulfillParams = convertFulfill2Params(
- request,
- responseTypes,
- responseValues,
- )
- fulfillParams[5] = '0x' // overwrite the data to be of lenght 0
- await evmRevert(
- operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(...fulfillParams),
- 'Response must be > 32 bytes',
- )
- })
- })
- })
-
- describe('#withdraw', () => {
- describe('without reserving funds via oracleRequest', () => {
- it('does nothing', async () => {
- let balance = await link.balanceOf(await roles.oracleNode.getAddress())
- assert.equal(0, balance.toNumber())
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), toWei('1')),
- )
- balance = await link.balanceOf(await roles.oracleNode.getAddress())
- assert.equal(0, balance.toNumber())
- })
-
- describe('recovering funds that were mistakenly sent', () => {
- const paid = 1
- beforeEach(async () => {
- await link.transfer(operator.address, paid)
- })
-
- it('withdraws funds', async () => {
- const operatorBalanceBefore = await link.balanceOf(operator.address)
- const accountBalanceBefore = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.defaultAccount.getAddress(), paid)
-
- const operatorBalanceAfter = await link.balanceOf(operator.address)
- const accountBalanceAfter = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- const accountDifference =
- accountBalanceAfter.sub(accountBalanceBefore)
- const operatorDifference =
- operatorBalanceBefore.sub(operatorBalanceAfter)
-
- bigNumEquals(operatorDifference, paid)
- bigNumEquals(accountDifference, paid)
- })
- })
- })
-
- describe('reserving funds via oracleRequest', () => {
- const payment = 15
- let request: ReturnType
-
- beforeEach(async () => {
- const requester = await roles.defaultAccount.getAddress()
- const args = encodeOracleRequest(
- specId,
- requester,
- fHash,
- 0,
- constants.HashZero,
- )
- const tx = await link.transferAndCall(operator.address, payment, args)
- const receipt = await tx.wait()
- assert.equal(3, receipt.logs?.length)
- request = decodeRunRequest(receipt.logs?.[2])
- })
-
- describe('but not freeing funds w fulfillOracleRequest', () => {
- it('does not transfer funds', async () => {
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.oracleNode.getAddress(), payment),
- )
- const balance = await link.balanceOf(
- await roles.oracleNode.getAddress(),
- )
- assert.equal(0, balance.toNumber())
- })
-
- describe('recovering funds that were mistakenly sent', () => {
- const paid = 1
- beforeEach(async () => {
- await link.transfer(operator.address, paid)
- })
-
- it('withdraws funds', async () => {
- const operatorBalanceBefore = await link.balanceOf(operator.address)
- const accountBalanceBefore = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.defaultAccount.getAddress(), paid)
-
- const operatorBalanceAfter = await link.balanceOf(operator.address)
- const accountBalanceAfter = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- const accountDifference =
- accountBalanceAfter.sub(accountBalanceBefore)
- const operatorDifference =
- operatorBalanceBefore.sub(operatorBalanceAfter)
-
- bigNumEquals(operatorDifference, paid)
- bigNumEquals(accountDifference, paid)
- })
- })
- })
-
- describe('and freeing funds', () => {
- beforeEach(async () => {
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(
- ...convertFufillParams(request, 'Hello World!'),
- )
- })
-
- it('does not allow input greater than the balance', async () => {
- const originalOracleBalance = await link.balanceOf(operator.address)
- const originalStrangerBalance = await link.balanceOf(
- await roles.stranger.getAddress(),
- )
- const withdrawalAmount = payment + 1
-
- assert.isAbove(withdrawalAmount, originalOracleBalance.toNumber())
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.stranger.getAddress(), withdrawalAmount),
- )
-
- const newOracleBalance = await link.balanceOf(operator.address)
- const newStrangerBalance = await link.balanceOf(
- await roles.stranger.getAddress(),
- )
-
- assert.equal(
- originalOracleBalance.toNumber(),
- newOracleBalance.toNumber(),
- )
- assert.equal(
- originalStrangerBalance.toNumber(),
- newStrangerBalance.toNumber(),
- )
- })
-
- it('allows transfer of partial balance by owner to specified address', async () => {
- const partialAmount = 6
- const difference = payment - partialAmount
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.stranger.getAddress(), partialAmount)
- const strangerBalance = await link.balanceOf(
- await roles.stranger.getAddress(),
- )
- const oracleBalance = await link.balanceOf(operator.address)
- assert.equal(partialAmount, strangerBalance.toNumber())
- assert.equal(difference, oracleBalance.toNumber())
- })
-
- it('allows transfer of entire balance by owner to specified address', async () => {
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.stranger.getAddress(), payment)
- const balance = await link.balanceOf(
- await roles.stranger.getAddress(),
- )
- assert.equal(payment, balance.toNumber())
- })
-
- it('does not allow a transfer of funds by non-owner', async () => {
- await evmRevert(
- operator
- .connect(roles.stranger)
- .withdraw(await roles.stranger.getAddress(), payment),
- )
- const balance = await link.balanceOf(
- await roles.stranger.getAddress(),
- )
- assert.isTrue(ethers.constants.Zero.eq(balance))
- })
-
- describe('recovering funds that were mistakenly sent', () => {
- const paid = 1
- beforeEach(async () => {
- await link.transfer(operator.address, paid)
- })
-
- it('withdraws funds', async () => {
- const operatorBalanceBefore = await link.balanceOf(operator.address)
- const accountBalanceBefore = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- await operator
- .connect(roles.defaultAccount)
- .withdraw(await roles.defaultAccount.getAddress(), paid)
-
- const operatorBalanceAfter = await link.balanceOf(operator.address)
- const accountBalanceAfter = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
-
- const accountDifference =
- accountBalanceAfter.sub(accountBalanceBefore)
- const operatorDifference =
- operatorBalanceBefore.sub(operatorBalanceAfter)
-
- bigNumEquals(operatorDifference, paid)
- bigNumEquals(accountDifference, paid)
- })
- })
- })
- })
- })
-
- describe('#withdrawable', () => {
- let request: ReturnType
- const amount = toWei('1')
-
- beforeEach(async () => {
- const requester = await roles.defaultAccount.getAddress()
- const args = encodeOracleRequest(
- specId,
- requester,
- fHash,
- 0,
- constants.HashZero,
- )
- const tx = await link.transferAndCall(operator.address, amount, args)
- const receipt = await tx.wait()
- assert.equal(3, receipt.logs?.length)
- request = decodeRunRequest(receipt.logs?.[2])
- await operator
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request, 'Hello World!'))
- })
-
- it('returns the correct value', async () => {
- const withdrawAmount = await operator.withdrawable()
- bigNumEquals(withdrawAmount, request.payment)
- })
-
- describe('funds that were mistakenly sent', () => {
- const paid = 1
- beforeEach(async () => {
- await link.transfer(operator.address, paid)
- })
-
- it('returns the correct value', async () => {
- const withdrawAmount = await operator.withdrawable()
-
- const expectedAmount = amount.add(paid)
- bigNumEquals(withdrawAmount, expectedAmount)
- })
- })
- })
-
- describe('#ownerTransferAndCall', () => {
- let operator2: Contract
- let args: string
- let to: string
- const startingBalance = 1000
- const payment = 20
-
- beforeEach(async () => {
- operator2 = await operatorFactory
- .connect(roles.oracleNode2)
- .deploy(link.address, await roles.oracleNode2.getAddress())
- to = operator2.address
- args = encodeOracleRequest(
- specId,
- operator.address,
- operatorFactory.interface.getSighash('fulfillOracleRequest'),
- 1,
- constants.HashZero,
- )
- })
-
- describe('when called by a non-owner', () => {
- it('reverts with owner error message', async () => {
- await link.transfer(operator.address, startingBalance)
- await evmRevert(
- operator
- .connect(roles.stranger)
- .ownerTransferAndCall(to, payment, args),
- 'Only callable by owner',
- )
- })
- })
-
- describe('when called by the owner', () => {
- beforeEach(async () => {
- await link.transfer(operator.address, startingBalance)
- })
-
- describe('without sufficient funds in contract', () => {
- it('reverts with funds message', async () => {
- const tooMuch = startingBalance * 2
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .ownerTransferAndCall(to, tooMuch, args),
- 'Amount requested is greater than withdrawable balance',
- )
- })
- })
-
- describe('with sufficient funds', () => {
- let tx: ContractTransaction
- let receipt: ContractReceipt
- let requesterBalanceBefore: BigNumber
- let requesterBalanceAfter: BigNumber
- let receiverBalanceBefore: BigNumber
- let receiverBalanceAfter: BigNumber
-
- before(async () => {
- requesterBalanceBefore = await link.balanceOf(operator.address)
- receiverBalanceBefore = await link.balanceOf(operator2.address)
- tx = await operator
- .connect(roles.defaultAccount)
- .ownerTransferAndCall(to, payment, args)
- receipt = await tx.wait()
- requesterBalanceAfter = await link.balanceOf(operator.address)
- receiverBalanceAfter = await link.balanceOf(operator2.address)
- })
-
- it('emits an event', async () => {
- assert.equal(3, receipt.logs?.length)
- const transferLog = await getLog(tx, 1)
- const parsedLog = link.interface.parseLog({
- data: transferLog.data,
- topics: transferLog.topics,
- })
- await expect(parsedLog.name).to.equal('Transfer')
- })
-
- it('transfers the tokens', async () => {
- bigNumEquals(
- requesterBalanceBefore.sub(requesterBalanceAfter),
- payment,
- )
- bigNumEquals(receiverBalanceAfter.sub(receiverBalanceBefore), payment)
- })
- })
- })
- })
-
- describe('#cancelOracleRequestByRequester', () => {
- const nonce = 17
-
- describe('with no pending requests', () => {
- it('fails', async () => {
- const fakeRequest: RunRequest = {
- requestId: ethers.utils.formatBytes32String('1337'),
- payment: '0',
- callbackFunc:
- getterSetterFactory.interface.getSighash('requestedBytes32'),
- expiration: '999999999999',
-
- callbackAddr: '',
- data: Buffer.from(''),
- dataVersion: 0,
- specId: '',
- requester: '',
- topic: '',
- }
- await increaseTime5Minutes(ethers.provider)
-
- await evmRevert(
- operator
- .connect(roles.stranger)
- .cancelOracleRequestByRequester(
- ...convertCancelByRequesterParams(fakeRequest, nonce),
- ),
- )
- })
- })
-
- describe('with a pending request', () => {
- const startingBalance = 100
- let request: ReturnType
- let receipt: providers.TransactionReceipt
-
- beforeEach(async () => {
- const requestAmount = 20
-
- await link.transfer(await roles.consumer.getAddress(), startingBalance)
-
- const args = encodeOracleRequest(
- specId,
- await roles.consumer.getAddress(),
- fHash,
- nonce,
- constants.HashZero,
- )
- const tx = await link
- .connect(roles.consumer)
- .transferAndCall(operator.address, requestAmount, args)
- receipt = await tx.wait()
-
- assert.equal(3, receipt.logs?.length)
- request = decodeRunRequest(receipt.logs?.[2])
-
- // pre conditions
- const oracleBalance = await link.balanceOf(operator.address)
- bigNumEquals(request.payment, oracleBalance)
-
- const consumerAmount = await link.balanceOf(
- await roles.consumer.getAddress(),
- )
- assert.equal(
- startingBalance - Number(request.payment),
- consumerAmount.toNumber(),
- )
- })
-
- describe('from a stranger', () => {
- it('fails', async () => {
- await evmRevert(
- operator
- .connect(roles.consumer)
- .cancelOracleRequestByRequester(
- ...convertCancelByRequesterParams(request, nonce),
- ),
- )
- })
- })
-
- describe('from the requester', () => {
- it('refunds the correct amount', async () => {
- await increaseTime5Minutes(ethers.provider)
- await operator
- .connect(roles.consumer)
- .cancelOracleRequestByRequester(
- ...convertCancelByRequesterParams(request, nonce),
- )
- const balance = await link.balanceOf(
- await roles.consumer.getAddress(),
- )
-
- assert.equal(startingBalance, balance.toNumber()) // 100
- })
-
- it('triggers a cancellation event', async () => {
- await increaseTime5Minutes(ethers.provider)
- const tx = await operator
- .connect(roles.consumer)
- .cancelOracleRequestByRequester(
- ...convertCancelByRequesterParams(request, nonce),
- )
- const receipt = await tx.wait()
-
- assert.equal(receipt.logs?.length, 2)
- assert.equal(request.requestId, receipt.logs?.[0].topics[1])
- })
-
- it('fails when called twice', async () => {
- await increaseTime5Minutes(ethers.provider)
- await operator
- .connect(roles.consumer)
- .cancelOracleRequestByRequester(
- ...convertCancelByRequesterParams(request, nonce),
- )
-
- await evmRevert(
- operator
- .connect(roles.consumer)
- .cancelOracleRequestByRequester(...convertCancelParams(request)),
- )
- })
- })
- })
- })
-
- describe('#cancelOracleRequest', () => {
- describe('with no pending requests', () => {
- it('fails', async () => {
- const fakeRequest: RunRequest = {
- requestId: ethers.utils.formatBytes32String('1337'),
- payment: '0',
- callbackFunc:
- getterSetterFactory.interface.getSighash('requestedBytes32'),
- expiration: '999999999999',
-
- callbackAddr: '',
- data: Buffer.from(''),
- dataVersion: 0,
- specId: '',
- requester: '',
- topic: '',
- }
- await increaseTime5Minutes(ethers.provider)
-
- await evmRevert(
- operator
- .connect(roles.stranger)
- .cancelOracleRequest(...convertCancelParams(fakeRequest)),
- )
- })
- })
-
- describe('with a pending request', () => {
- const startingBalance = 100
- let request: ReturnType
- let receipt: providers.TransactionReceipt
-
- beforeEach(async () => {
- const requestAmount = 20
-
- await link.transfer(await roles.consumer.getAddress(), startingBalance)
-
- const args = encodeOracleRequest(
- specId,
- await roles.consumer.getAddress(),
- fHash,
- 1,
- constants.HashZero,
- )
- const tx = await link
- .connect(roles.consumer)
- .transferAndCall(operator.address, requestAmount, args)
- receipt = await tx.wait()
-
- assert.equal(3, receipt.logs?.length)
- request = decodeRunRequest(receipt.logs?.[2])
- })
-
- it('has correct initial balances', async () => {
- const oracleBalance = await link.balanceOf(operator.address)
- bigNumEquals(request.payment, oracleBalance)
-
- const consumerAmount = await link.balanceOf(
- await roles.consumer.getAddress(),
- )
- assert.equal(
- startingBalance - Number(request.payment),
- consumerAmount.toNumber(),
- )
- })
-
- describe('from a stranger', () => {
- it('fails', async () => {
- await evmRevert(
- operator
- .connect(roles.consumer)
- .cancelOracleRequest(...convertCancelParams(request)),
- )
- })
- })
-
- describe('from the requester', () => {
- it('refunds the correct amount', async () => {
- await increaseTime5Minutes(ethers.provider)
- await operator
- .connect(roles.consumer)
- .cancelOracleRequest(...convertCancelParams(request))
- const balance = await link.balanceOf(
- await roles.consumer.getAddress(),
- )
-
- assert.equal(startingBalance, balance.toNumber()) // 100
- })
-
- it('triggers a cancellation event', async () => {
- await increaseTime5Minutes(ethers.provider)
- const tx = await operator
- .connect(roles.consumer)
- .cancelOracleRequest(...convertCancelParams(request))
- const receipt = await tx.wait()
-
- assert.equal(receipt.logs?.length, 2)
- assert.equal(request.requestId, receipt.logs?.[0].topics[1])
- })
-
- it('fails when called twice', async () => {
- await increaseTime5Minutes(ethers.provider)
- await operator
- .connect(roles.consumer)
- .cancelOracleRequest(...convertCancelParams(request))
-
- await evmRevert(
- operator
- .connect(roles.consumer)
- .cancelOracleRequest(...convertCancelParams(request)),
- )
- })
- })
- })
- })
-
- describe('#ownerForward', () => {
- let bytes: string
- let payload: string
- let mock: Contract
-
- beforeEach(async () => {
- bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100))
- payload = getterSetterFactory.interface.encodeFunctionData(
- getterSetterFactory.interface.getFunction('setBytes'),
- [bytes],
- )
- mock = await getterSetterFactory.connect(roles.defaultAccount).deploy()
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- operator.connect(roles.stranger).ownerForward(mock.address, payload),
- )
- })
- })
-
- describe('when called by owner', () => {
- describe('when attempting to forward to the link token', () => {
- it('reverts', async () => {
- const sighash = linkTokenFactory.interface.getSighash('name')
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .ownerForward(link.address, sighash),
- 'Cannot call to LINK',
- )
- })
- })
-
- describe('when forwarding to any other address', () => {
- it('forwards the data', async () => {
- const tx = await operator
- .connect(roles.defaultAccount)
- .ownerForward(mock.address, payload)
- await tx.wait()
- assert.equal(await mock.getBytes(), bytes)
- })
-
- it('reverts when sending to a non-contract address', async () => {
- await evmRevert(
- operator
- .connect(roles.defaultAccount)
- .ownerForward(zeroAddress, payload),
- 'Must forward to a contract',
- )
- })
-
- it('perceives the message is sent by the Operator', async () => {
- const tx = await operator
- .connect(roles.defaultAccount)
- .ownerForward(mock.address, payload)
- const receipt = await tx.wait()
- const log: any = receipt.logs?.[0]
- const logData = mock.interface.decodeEventLog(
- mock.interface.getEvent('SetBytes'),
- log.data,
- log.topics,
- )
- assert.equal(ethers.utils.getAddress(logData.from), operator.address)
- })
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/OperatorFactory.test.ts b/contracts/test/v0.7/OperatorFactory.test.ts
deleted file mode 100644
index d2a24600e23..00000000000
--- a/contracts/test/v0.7/OperatorFactory.test.ts
+++ /dev/null
@@ -1,293 +0,0 @@
-import { ethers } from 'hardhat'
-import { evmWordToAddress, publicAbi } from '../test-helpers/helpers'
-import { assert } from 'chai'
-import { Contract, ContractFactory, ContractReceipt } from 'ethers'
-import { getUsers, Roles } from '../test-helpers/setup'
-
-let linkTokenFactory: ContractFactory
-let operatorGeneratorFactory: ContractFactory
-let operatorFactory: ContractFactory
-let forwarderFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
- operatorGeneratorFactory = await ethers.getContractFactory(
- 'src/v0.7/OperatorFactory.sol:OperatorFactory',
- roles.defaultAccount,
- )
- operatorFactory = await ethers.getContractFactory(
- 'src/v0.7/Operator.sol:Operator',
- roles.defaultAccount,
- )
- forwarderFactory = await ethers.getContractFactory(
- 'src/v0.7/AuthorizedForwarder.sol:AuthorizedForwarder',
- roles.defaultAccount,
- )
-})
-
-describe('OperatorFactory', () => {
- let link: Contract
- let operatorGenerator: Contract
- let operator: Contract
- let forwarder: Contract
- let receipt: ContractReceipt
- let emittedOperator: string
- let emittedForwarder: string
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- operatorGenerator = await operatorGeneratorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(operatorGenerator, [
- 'created',
- 'deployNewOperator',
- 'deployNewOperatorAndForwarder',
- 'deployNewForwarder',
- 'deployNewForwarderAndTransferOwnership',
- 'getChainlinkToken',
- 'typeAndVersion',
- ])
- })
-
- describe('#typeAndVersion', () => {
- it('describes the authorized forwarder', async () => {
- assert.equal(
- await operatorGenerator.typeAndVersion(),
- 'OperatorFactory 1.0.0',
- )
- })
- })
-
- describe('#deployNewOperator', () => {
- beforeEach(async () => {
- const tx = await operatorGenerator
- .connect(roles.oracleNode)
- .deployNewOperator()
-
- receipt = await tx.wait()
- emittedOperator = evmWordToAddress(receipt.logs?.[0].topics?.[1])
- })
-
- it('emits an event', async () => {
- assert.equal(receipt?.events?.[0]?.event, 'OperatorCreated')
- assert.equal(emittedOperator, receipt.events?.[0].args?.[0])
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[1],
- )
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[2],
- )
- })
-
- it('sets the correct owner', async () => {
- operator = await operatorFactory
- .connect(roles.defaultAccount)
- .attach(emittedOperator)
- const ownerString = await operator.owner()
- assert.equal(ownerString, await roles.oracleNode.getAddress())
- })
-
- it('records that it deployed that address', async () => {
- assert.isTrue(await operatorGenerator.created(emittedOperator))
- })
- })
-
- describe('#deployNewOperatorAndForwarder', () => {
- beforeEach(async () => {
- const tx = await operatorGenerator
- .connect(roles.oracleNode)
- .deployNewOperatorAndForwarder()
-
- receipt = await tx.wait()
- emittedOperator = evmWordToAddress(receipt.logs?.[0].topics?.[1])
- emittedForwarder = evmWordToAddress(receipt.logs?.[3].topics?.[1])
- })
-
- it('emits an event recording that the operator was deployed', async () => {
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[1],
- )
- assert.equal(receipt?.events?.[0]?.event, 'OperatorCreated')
- assert.equal(receipt?.events?.[0]?.args?.[0], emittedOperator)
- assert.equal(
- receipt?.events?.[0]?.args?.[1],
- await roles.oracleNode.getAddress(),
- )
- assert.equal(
- receipt?.events?.[0]?.args?.[2],
- await roles.oracleNode.getAddress(),
- )
- })
-
- it('proposes the transfer of the forwarder to the operator', async () => {
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[1],
- )
- assert.equal(
- receipt?.events?.[1]?.topics?.[0],
- '0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278', //OwnershipTransferRequested(address,address)
- )
- assert.equal(
- evmWordToAddress(receipt?.events?.[1]?.topics?.[1]),
- operatorGenerator.address,
- )
- assert.equal(
- evmWordToAddress(receipt?.events?.[1]?.topics?.[2]),
- emittedOperator,
- )
-
- assert.equal(
- receipt?.events?.[2]?.topics?.[0],
- '0x4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e', //OwnershipTransferRequestedWithMessage(address,address,bytes)
- )
- assert.equal(
- evmWordToAddress(receipt?.events?.[2]?.topics?.[1]),
- operatorGenerator.address,
- )
- assert.equal(
- evmWordToAddress(receipt?.events?.[2]?.topics?.[2]),
- emittedOperator,
- )
- })
-
- it('emits an event recording that the forwarder was deployed', async () => {
- assert.equal(receipt?.events?.[3]?.event, 'AuthorizedForwarderCreated')
- assert.equal(receipt?.events?.[3]?.args?.[0], emittedForwarder)
- assert.equal(receipt?.events?.[3]?.args?.[1], operatorGenerator.address)
- assert.equal(
- receipt?.events?.[3]?.args?.[2],
- await roles.oracleNode.getAddress(),
- )
- })
-
- it('sets the correct owner on the operator', async () => {
- operator = await operatorFactory
- .connect(roles.defaultAccount)
- .attach(receipt?.events?.[0]?.args?.[0])
- assert.equal(await roles.oracleNode.getAddress(), await operator.owner())
- })
-
- it('sets the operator as the owner of the forwarder', async () => {
- forwarder = await forwarderFactory
- .connect(roles.defaultAccount)
- .attach(emittedForwarder)
- assert.equal(operatorGenerator.address, await forwarder.owner())
- })
-
- it('records that it deployed that address', async () => {
- assert.isTrue(await operatorGenerator.created(emittedOperator))
- assert.isTrue(await operatorGenerator.created(emittedForwarder))
- })
- })
-
- describe('#deployNewForwarder', () => {
- beforeEach(async () => {
- const tx = await operatorGenerator
- .connect(roles.oracleNode)
- .deployNewForwarder()
-
- receipt = await tx.wait()
- emittedForwarder = receipt.events?.[0].args?.[0]
- })
-
- it('emits an event', async () => {
- assert.equal(receipt?.events?.[0]?.event, 'AuthorizedForwarderCreated')
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[1],
- ) // owner
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[0].args?.[2],
- ) // sender
- })
-
- it('sets the caller as the owner', async () => {
- forwarder = await forwarderFactory
- .connect(roles.defaultAccount)
- .attach(emittedForwarder)
- const ownerString = await forwarder.owner()
- assert.equal(ownerString, await roles.oracleNode.getAddress())
- })
-
- it('records that it deployed that address', async () => {
- assert.isTrue(await operatorGenerator.created(emittedForwarder))
- })
- })
-
- describe('#deployNewForwarderAndTransferOwnership', () => {
- const message = '0x42'
-
- beforeEach(async () => {
- const tx = await operatorGenerator
- .connect(roles.oracleNode)
- .deployNewForwarderAndTransferOwnership(
- await roles.stranger.getAddress(),
- message,
- )
- receipt = await tx.wait()
-
- emittedForwarder = evmWordToAddress(receipt.logs?.[2].topics?.[1])
- })
-
- it('emits an event', async () => {
- assert.equal(receipt?.events?.[2]?.event, 'AuthorizedForwarderCreated')
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[2].args?.[1],
- ) // owner
- assert.equal(
- await roles.oracleNode.getAddress(),
- receipt.events?.[2].args?.[2],
- ) // sender
- })
-
- it('sets the caller as the owner', async () => {
- forwarder = await forwarderFactory
- .connect(roles.defaultAccount)
- .attach(emittedForwarder)
- const ownerString = await forwarder.owner()
- assert.equal(ownerString, await roles.oracleNode.getAddress())
- })
-
- it('proposes a transfer to the recipient', async () => {
- const emittedOwner = evmWordToAddress(receipt.logs?.[0].topics?.[1])
- assert.equal(emittedOwner, await roles.oracleNode.getAddress())
- const emittedRecipient = evmWordToAddress(receipt.logs?.[0].topics?.[2])
- assert.equal(emittedRecipient, await roles.stranger.getAddress())
- })
-
- it('proposes a transfer to the recipient with the specified message', async () => {
- const emittedOwner = evmWordToAddress(receipt.logs?.[1].topics?.[1])
- assert.equal(emittedOwner, await roles.oracleNode.getAddress())
- const emittedRecipient = evmWordToAddress(receipt.logs?.[1].topics?.[2])
- assert.equal(emittedRecipient, await roles.stranger.getAddress())
-
- const encodedMessage = ethers.utils.defaultAbiCoder.encode(
- ['bytes'],
- [message],
- )
- assert.equal(receipt?.logs?.[1]?.data, encodedMessage)
- })
-
- it('records that it deployed that address', async () => {
- assert.isTrue(await operatorGenerator.created(emittedForwarder))
- })
- })
-})
diff --git a/contracts/test/v0.7/StalenessFlaggingValidator.test.ts b/contracts/test/v0.7/StalenessFlaggingValidator.test.ts
deleted file mode 100644
index 8a5c4b67632..00000000000
--- a/contracts/test/v0.7/StalenessFlaggingValidator.test.ts
+++ /dev/null
@@ -1,632 +0,0 @@
-import { ethers } from 'hardhat'
-import {
- evmWordToAddress,
- getLog,
- getLogs,
- numToBytes32,
- publicAbi,
-} from '../test-helpers/helpers'
-import { assert, expect } from 'chai'
-import { BigNumber, Contract, ContractFactory } from 'ethers'
-import { Personas, getUsers } from '../test-helpers/setup'
-import { evmRevert } from '../test-helpers/matchers'
-
-let personas: Personas
-let validatorFactory: ContractFactory
-let flagsFactory: ContractFactory
-let acFactory: ContractFactory
-let aggregatorFactory: ContractFactory
-
-before(async () => {
- personas = (await getUsers()).personas
-
- validatorFactory = await ethers.getContractFactory(
- 'src/v0.7/dev/StalenessFlaggingValidator.sol:StalenessFlaggingValidator',
- personas.Carol,
- )
- flagsFactory = await ethers.getContractFactory(
- 'src/v0.6/Flags.sol:Flags',
- personas.Carol,
- )
- acFactory = await ethers.getContractFactory(
- 'src/v0.6/SimpleWriteAccessController.sol:SimpleWriteAccessController',
- personas.Carol,
- )
- aggregatorFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator',
- personas.Carol,
- )
-})
-
-describe('StalenessFlaggingValidator', () => {
- let validator: Contract
- let flags: Contract
- let ac: Contract
-
- const flaggingThreshold1 = 10000
- const flaggingThreshold2 = 20000
-
- beforeEach(async () => {
- ac = await acFactory.connect(personas.Carol).deploy()
- flags = await flagsFactory.connect(personas.Carol).deploy(ac.address)
- validator = await validatorFactory
- .connect(personas.Carol)
- .deploy(flags.address)
-
- await ac.connect(personas.Carol).addAccess(validator.address)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(validator, [
- 'update',
- 'check',
- 'setThresholds',
- 'setFlagsAddress',
- 'threshold',
- 'flags',
- // Upkeep methods:
- 'checkUpkeep',
- 'performUpkeep',
- // Owned methods:
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- ])
- })
-
- describe('#constructor', () => {
- it('sets the arguments passed in', async () => {
- assert.equal(await validator.flags(), flags.address)
- })
-
- it('sets the owner', async () => {
- assert.equal(await validator.owner(), await personas.Carol.getAddress())
- })
- })
-
- describe('#setFlagsAddress', () => {
- const newFlagsAddress = '0x0123456789012345678901234567890123456789'
-
- it('changes the flags address', async () => {
- assert.equal(flags.address, await validator.flags())
-
- await validator.connect(personas.Carol).setFlagsAddress(newFlagsAddress)
-
- assert.equal(newFlagsAddress, await validator.flags())
- })
-
- it('emits a log event only when actually changed', async () => {
- const tx = await validator
- .connect(personas.Carol)
- .setFlagsAddress(newFlagsAddress)
- await expect(tx)
- .to.emit(validator, 'FlagsAddressUpdated')
- .withArgs(flags.address, newFlagsAddress)
-
- const sameChangeTx = await validator
- .connect(personas.Carol)
- .setFlagsAddress(newFlagsAddress)
-
- await expect(sameChangeTx).to.not.emit(validator, 'FlagsAddressUpdated')
- })
-
- describe('when called by a non-owner', () => {
- it('reverts', async () => {
- await evmRevert(
- validator.connect(personas.Neil).setFlagsAddress(newFlagsAddress),
- 'Only callable by owner',
- )
- })
- })
- })
-
- describe('#setThresholds', () => {
- let agg1: Contract
- let agg2: Contract
- let aggregators: Array
- let thresholds: Array
-
- beforeEach(async () => {
- const decimals = 8
- const initialAnswer = 10000000000
- agg1 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- agg2 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- })
-
- describe('failure', () => {
- beforeEach(() => {
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1]
- })
-
- it('reverts when called by a non-owner', async () => {
- await evmRevert(
- validator
- .connect(personas.Neil)
- .setThresholds(aggregators, thresholds),
- 'Only callable by owner',
- )
- })
-
- it('reverts when passed uneven arrays', async () => {
- await evmRevert(
- validator
- .connect(personas.Carol)
- .setThresholds(aggregators, thresholds),
- 'Different sized arrays',
- )
- })
- })
-
- describe('success', () => {
- let tx: any
-
- beforeEach(() => {
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1, flaggingThreshold2]
- })
-
- describe('when called with 2 new thresholds', () => {
- beforeEach(async () => {
- tx = await validator
- .connect(personas.Carol)
- .setThresholds(aggregators, thresholds)
- })
-
- it('sets the thresholds', async () => {
- const first = await validator.threshold(agg1.address)
- const second = await validator.threshold(agg2.address)
- assert.equal(first.toString(), flaggingThreshold1.toString())
- assert.equal(second.toString(), flaggingThreshold2.toString())
- })
-
- it('emits events', async () => {
- const firstEvent = await getLog(tx, 0)
- assert.equal(evmWordToAddress(firstEvent.topics[1]), agg1.address)
- assert.equal(firstEvent.topics[3], numToBytes32(flaggingThreshold1))
- const secondEvent = await getLog(tx, 1)
- assert.equal(evmWordToAddress(secondEvent.topics[1]), agg2.address)
- assert.equal(secondEvent.topics[3], numToBytes32(flaggingThreshold2))
- })
- })
-
- describe('when called with 2, but 1 has not changed', () => {
- it('emits only 1 event', async () => {
- tx = await validator
- .connect(personas.Carol)
- .setThresholds(aggregators, thresholds)
-
- const newThreshold = flaggingThreshold2 + 1
- tx = await validator
- .connect(personas.Carol)
- .setThresholds(aggregators, [flaggingThreshold1, newThreshold])
- const logs = await getLogs(tx)
- assert.equal(logs.length, 1)
- const log = logs[0]
- assert.equal(evmWordToAddress(log.topics[1]), agg2.address)
- assert.equal(log.topics[2], numToBytes32(flaggingThreshold2))
- assert.equal(log.topics[3], numToBytes32(newThreshold))
- })
- })
- })
- })
-
- describe('#check', () => {
- let agg1: Contract
- let agg2: Contract
- let aggregators: Array
- let thresholds: Array
- const decimals = 8
- const initialAnswer = 10000000000
- beforeEach(async () => {
- agg1 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- agg2 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1, flaggingThreshold2]
- await validator.setThresholds(aggregators, thresholds)
- })
-
- describe('when neither are stale', () => {
- it('returns an empty array', async () => {
- const response = await validator.check(aggregators)
- assert.equal(response.length, 0)
- })
- })
-
- describe('when threshold is not set in the validator', () => {
- it('returns an empty array', async () => {
- const agg3 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- const response = await validator.check([agg3.address])
- assert.equal(response.length, 0)
- })
- })
-
- describe('when one of the aggregators is stale', () => {
- it('returns an array with one stale aggregator', async () => {
- const currentTimestamp = await agg1.latestTimestamp()
- const staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
- const response = await validator.check(aggregators)
-
- assert.equal(response.length, 1)
- assert.equal(response[0], agg1.address)
- })
- })
-
- describe('When both aggregators are stale', () => {
- it('returns an array with both aggregators', async () => {
- let currentTimestamp = await agg1.latestTimestamp()
- let staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- currentTimestamp = await agg2.latestTimestamp()
- staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold2 + 1),
- )
- await agg2.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const response = await validator.check(aggregators)
-
- assert.equal(response.length, 2)
- assert.equal(response[0], agg1.address)
- assert.equal(response[1], agg2.address)
- })
- })
- })
-
- describe('#update', () => {
- let agg1: Contract
- let agg2: Contract
- let aggregators: Array
- let thresholds: Array
- const decimals = 8
- const initialAnswer = 10000000000
- beforeEach(async () => {
- agg1 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- agg2 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1, flaggingThreshold2]
- await validator.setThresholds(aggregators, thresholds)
- })
-
- describe('when neither are stale', () => {
- it('does not raise a flag', async () => {
- const tx = await validator.update(aggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
-
- describe('when threshold is not set in the validator', () => {
- it('does not raise a flag', async () => {
- const agg3 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- const tx = await validator.update([agg3.address])
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
-
- describe('when one is stale', () => {
- it('raises a flag for that aggregator', async () => {
- const currentTimestamp = await agg1.latestTimestamp()
- const staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const tx = await validator.update(aggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 1)
- assert.equal(evmWordToAddress(logs[0].topics[1]), agg1.address)
- })
- })
-
- describe('when both are stale', () => {
- it('raises 2 flags, one for each aggregator', async () => {
- let currentTimestamp = await agg1.latestTimestamp()
- let staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- currentTimestamp = await agg2.latestTimestamp()
- staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold2 + 1),
- )
- await agg2.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const tx = await validator.update(aggregators)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 2)
- assert.equal(evmWordToAddress(logs[0].topics[1]), agg1.address)
- assert.equal(evmWordToAddress(logs[1].topics[1]), agg2.address)
- })
- })
- })
-
- describe('#checkUpkeep', () => {
- let agg1: Contract
- let agg2: Contract
- let aggregators: Array
- let thresholds: Array
- const decimals = 8
- const initialAnswer = 10000000000
- beforeEach(async () => {
- agg1 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- agg2 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1, flaggingThreshold2]
- await validator.setThresholds(aggregators, thresholds)
- })
-
- describe('when neither are stale', () => {
- it('returns false and an empty array', async () => {
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const response = await validator.checkUpkeep(bytesData)
-
- assert.equal(response[0], false)
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- assert.equal(decodedResponse[0].length, 0)
- })
- })
-
- describe('when threshold is not set in the validator', () => {
- it('returns flase and an empty array', async () => {
- const agg3 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [[agg3.address]],
- )
- const response = await validator.checkUpkeep(bytesData)
-
- assert.equal(response[0], false)
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- assert.equal(decodedResponse[0].length, 0)
- })
- })
-
- describe('when one of the aggregators is stale', () => {
- it('returns true with an array with one stale aggregator', async () => {
- const currentTimestamp = await agg1.latestTimestamp()
- const staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const response = await validator.checkUpkeep(bytesData)
-
- assert.equal(response[0], true)
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- const decodedArray = decodedResponse[0]
- assert.equal(decodedArray.length, 1)
- assert.equal(decodedArray[0], agg1.address)
- })
- })
-
- describe('When both aggregators are stale', () => {
- it('returns true with an array with both aggregators', async () => {
- let currentTimestamp = await agg1.latestTimestamp()
- let staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- currentTimestamp = await agg2.latestTimestamp()
- staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold2 + 1),
- )
- await agg2.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const response = await validator.checkUpkeep(bytesData)
-
- assert.equal(response[0], true)
- const decodedResponse = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- response?.[1],
- )
- const decodedArray = decodedResponse[0]
- assert.equal(decodedArray.length, 2)
- assert.equal(decodedArray[0], agg1.address)
- assert.equal(decodedArray[1], agg2.address)
- })
- })
- })
-
- describe('#performUpkeep', () => {
- let agg1: Contract
- let agg2: Contract
- let aggregators: Array
- let thresholds: Array
- const decimals = 8
- const initialAnswer = 10000000000
- beforeEach(async () => {
- agg1 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- agg2 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- aggregators = [agg1.address, agg2.address]
- thresholds = [flaggingThreshold1, flaggingThreshold2]
- await validator.setThresholds(aggregators, thresholds)
- })
-
- describe('when neither are stale', () => {
- it('does not raise a flag', async () => {
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const tx = await validator.performUpkeep(bytesData)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
-
- describe('when threshold is not set in the validator', () => {
- it('does not raise a flag', async () => {
- const agg3 = await aggregatorFactory
- .connect(personas.Carol)
- .deploy(decimals, initialAnswer)
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [[agg3.address]],
- )
- const tx = await validator.performUpkeep(bytesData)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
-
- describe('when one is stale', () => {
- it('raises a flag for that aggregator', async () => {
- const currentTimestamp = await agg1.latestTimestamp()
- const staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const tx = await validator.performUpkeep(bytesData)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 1)
- assert.equal(evmWordToAddress(logs[0].topics[1]), agg1.address)
- })
- })
-
- describe('when both are stale', () => {
- it('raises 2 flags, one for each aggregator', async () => {
- let currentTimestamp = await agg1.latestTimestamp()
- let staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold1 + 1),
- )
- await agg1.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- currentTimestamp = await agg2.latestTimestamp()
- staleTimestamp = currentTimestamp.sub(
- BigNumber.from(flaggingThreshold2 + 1),
- )
- await agg2.updateRoundData(
- 99,
- initialAnswer,
- staleTimestamp,
- staleTimestamp,
- )
-
- const bytesData = ethers.utils.defaultAbiCoder.encode(
- ['address[]'],
- [aggregators],
- )
- const tx = await validator.performUpkeep(bytesData)
- const logs = await getLogs(tx)
- assert.equal(logs.length, 2)
- assert.equal(evmWordToAddress(logs[0].topics[1]), agg1.address)
- assert.equal(evmWordToAddress(logs[1].topics[1]), agg2.address)
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/UpkeepRegistrationRequests.test.ts b/contracts/test/v0.7/UpkeepRegistrationRequests.test.ts
deleted file mode 100644
index 5ec9306c668..00000000000
--- a/contracts/test/v0.7/UpkeepRegistrationRequests.test.ts
+++ /dev/null
@@ -1,603 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import { evmRevert } from '../test-helpers/matchers'
-import { getUsers, Personas } from '../test-helpers/setup'
-import { BigNumber, Signer } from 'ethers'
-import { LinkToken__factory as LinkTokenFactory } from '../../typechain/factories/LinkToken__factory'
-import { KeeperRegistry1_1__factory as KeeperRegistryFactory } from '../../typechain/factories/KeeperRegistry1_1__factory'
-import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory'
-import { UpkeepRegistrationRequests__factory as UpkeepRegistrationRequestsFactory } from '../../typechain/factories/UpkeepRegistrationRequests__factory'
-import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory'
-import { KeeperRegistry1_1 as KeeperRegistry } from '../../typechain/KeeperRegistry1_1'
-import { UpkeepRegistrationRequests } from '../../typechain/UpkeepRegistrationRequests'
-import { MockV3Aggregator } from '../../typechain/MockV3Aggregator'
-import { LinkToken } from '../../typechain/LinkToken'
-import { UpkeepMock } from '../../typechain/UpkeepMock'
-
-let linkTokenFactory: LinkTokenFactory
-let mockV3AggregatorFactory: MockV3AggregatorFactory
-let keeperRegistryFactory: KeeperRegistryFactory
-let upkeepRegistrationRequestsFactory: UpkeepRegistrationRequestsFactory
-let upkeepMockFactory: UpkeepMockFactory
-
-let personas: Personas
-
-before(async () => {
- personas = (await getUsers()).personas
-
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- )
- mockV3AggregatorFactory = (await ethers.getContractFactory(
- 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator',
- )) as unknown as MockV3AggregatorFactory
- // @ts-ignore bug in autogen file
- keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry1_1')
- upkeepRegistrationRequestsFactory = await ethers.getContractFactory(
- 'UpkeepRegistrationRequests',
- )
- upkeepMockFactory = await ethers.getContractFactory('UpkeepMock')
-})
-
-const errorMsgs = {
- onlyOwner: 'revert Only callable by owner',
- onlyAdmin: 'only admin / owner can cancel',
- hashPayload: 'hash and payload do not match',
- requestNotFound: 'request not found',
-}
-
-describe('UpkeepRegistrationRequests', () => {
- const upkeepName = 'SampleUpkeep'
-
- const linkEth = BigNumber.from(300000000)
- const gasWei = BigNumber.from(100)
- const executeGas = BigNumber.from(100000)
- const source = BigNumber.from(100)
- const paymentPremiumPPB = BigNumber.from(250000000)
- const flatFeeMicroLink = BigNumber.from(0)
-
- const window_big = BigNumber.from(1000)
- const window_small = BigNumber.from(2)
- const threshold_big = BigNumber.from(1000)
- const threshold_small = BigNumber.from(5)
-
- const blockCountPerTurn = BigNumber.from(3)
- const emptyBytes = '0x00'
- const stalenessSeconds = BigNumber.from(43820)
- const gasCeilingMultiplier = BigNumber.from(1)
- const maxCheckGas = BigNumber.from(20000000)
- const fallbackGasPrice = BigNumber.from(200)
- const fallbackLinkPrice = BigNumber.from(200000000)
- const minLINKJuels = BigNumber.from('1000000000000000000')
- const amount = BigNumber.from('5000000000000000000')
- const amount1 = BigNumber.from('6000000000000000000')
-
- let owner: Signer
- let admin: Signer
- let someAddress: Signer
- let registrarOwner: Signer
- let stranger: Signer
-
- let linkToken: LinkToken
- let linkEthFeed: MockV3Aggregator
- let gasPriceFeed: MockV3Aggregator
- let registry: KeeperRegistry
- let mock: UpkeepMock
- let registrar: UpkeepRegistrationRequests
-
- beforeEach(async () => {
- owner = personas.Default
- admin = personas.Neil
- someAddress = personas.Ned
- registrarOwner = personas.Nelly
- stranger = personas.Nancy
-
- linkToken = await linkTokenFactory.connect(owner).deploy()
- gasPriceFeed = await mockV3AggregatorFactory
- .connect(owner)
- .deploy(0, gasWei)
- linkEthFeed = await mockV3AggregatorFactory
- .connect(owner)
- .deploy(9, linkEth)
- registry = await keeperRegistryFactory
- .connect(owner)
- .deploy(
- linkToken.address,
- linkEthFeed.address,
- gasPriceFeed.address,
- paymentPremiumPPB,
- flatFeeMicroLink,
- blockCountPerTurn,
- maxCheckGas,
- stalenessSeconds,
- gasCeilingMultiplier,
- fallbackGasPrice,
- fallbackLinkPrice,
- )
-
- mock = await upkeepMockFactory.deploy()
-
- registrar = await upkeepRegistrationRequestsFactory
- .connect(registrarOwner)
- .deploy(linkToken.address, minLINKJuels)
-
- await registry.setRegistrar(registrar.address)
- })
-
- describe('#typeAndVersion', () => {
- it('uses the correct type and version', async () => {
- const typeAndVersion = await registrar.typeAndVersion()
- assert.equal(typeAndVersion, 'UpkeepRegistrationRequests 1.0.0')
- })
- })
-
- describe('#register', () => {
- it('reverts if not called by the LINK token', async () => {
- await evmRevert(
- registrar
- .connect(someAddress)
- .register(
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ),
- 'Must use LINK token',
- )
- })
-
- it('reverts if the amount passed in data mismatches actual amount sent', async () => {
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- true,
- window_small,
- threshold_big,
- registry.address,
- minLINKJuels,
- )
-
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount1,
- source,
- ],
- )
-
- await evmRevert(
- linkToken.transferAndCall(registrar.address, amount, abiEncodedBytes),
- 'Amount mismatch',
- )
- })
-
- it('reverts if the admin address is 0x0000...', async () => {
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- '0x0000000000000000000000000000000000000000',
- emptyBytes,
- amount,
- source,
- ],
- )
-
- await evmRevert(
- linkToken.transferAndCall(registrar.address, amount, abiEncodedBytes),
- 'Unable to create request',
- )
- })
-
- it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => {
- //get current upkeep count
- const upkeepCount = await registry.getUpkeepCount()
-
- //set auto approve ON with high threshold limits
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- true,
- window_small,
- threshold_big,
- registry.address,
- minLINKJuels,
- )
-
- //register with auto approve ON
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ],
- )
- const tx = await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
-
- //confirm if a new upkeep has been registered and the details are the same as the one just registered
- const newupkeep = await registry.getUpkeep(upkeepCount)
- assert.equal(newupkeep.target, mock.address)
- assert.equal(newupkeep.admin, await admin.getAddress())
- assert.equal(newupkeep.checkData, emptyBytes)
- assert.equal(newupkeep.balance.toString(), amount.toString())
- assert.equal(newupkeep.executeGas, executeGas.toNumber())
-
- await expect(tx).to.emit(registrar, 'RegistrationRequested')
- await expect(tx).to.emit(registrar, 'RegistrationApproved')
- })
-
- it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => {
- //get upkeep count before attempting registration
- const beforeCount = await registry.getUpkeepCount()
-
- //set auto approve OFF, threshold limits dont matter in this case
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- false,
- window_small,
- threshold_big,
- registry.address,
- minLINKJuels,
- )
-
- //register with auto approve OFF
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ],
- )
- const tx = await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- const receipt = await tx.wait()
-
- //get upkeep count after attempting registration
- const afterCount = await registry.getUpkeepCount()
- //confirm that a new upkeep has NOT been registered and upkeep count is still the same
- assert.deepEqual(beforeCount, afterCount)
-
- //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not
- await expect(tx).to.emit(registrar, 'RegistrationRequested')
- await expect(tx).not.to.emit(registrar, 'RegistrationApproved')
-
- const hash = receipt.logs[2].topics[1]
- const pendingRequest = await registrar.getPendingRequest(hash)
- assert.equal(await admin.getAddress(), pendingRequest[0])
- assert.ok(amount.eq(pendingRequest[1]))
- })
-
- it('Auto Approve ON - Throttle max approvals - does not registers an upkeep on KeeperRegistry beyond the throttle limit, emits only RegistrationRequested event after throttle starts', async () => {
- //get upkeep count before attempting registration
- const beforeCount = await registry.getUpkeepCount()
-
- //set auto approve on, with low threshold limits
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- true,
- window_big,
- threshold_small,
- registry.address,
- minLINKJuels,
- )
-
- let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ])
-
- //register within threshold, new upkeep should be registered
- await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- const intermediateCount = await registry.getUpkeepCount()
- //make sure 1 upkeep was registered
- assert.equal(beforeCount.toNumber() + 1, intermediateCount.toNumber())
-
- //try registering more than threshold(say 2x), new upkeeps should not be registered after the threshold amount is reached
- for (let step = 0; step < threshold_small.toNumber() * 2; step++) {
- abiEncodedBytes = registrar.interface.encodeFunctionData('register', [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas.toNumber() + step, // make unique hash
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ])
-
- await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- }
- const afterCount = await registry.getUpkeepCount()
- //count of newly registered upkeeps should be equal to the threshold set for auto approval
- const newRegistrationsCount =
- afterCount.toNumber() - beforeCount.toNumber()
- assert(
- newRegistrationsCount == threshold_small.toNumber(),
- 'Registrations beyond threshold',
- )
- })
- })
-
- describe('#approve', () => {
- let hash: string
-
- beforeEach(async () => {
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- false,
- window_small,
- threshold_big,
- registry.address,
- minLINKJuels,
- )
-
- //register with auto approve OFF
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ],
- )
-
- const tx = await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- const receipt = await tx.wait()
- hash = receipt.logs[2].topics[1]
- })
-
- it('reverts if not called by the owner', async () => {
- const tx = registrar
- .connect(stranger)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await evmRevert(tx, 'Only callable by owner')
- })
-
- it('reverts if the hash does not exist', async () => {
- const tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44',
- )
- await evmRevert(tx, errorMsgs.requestNotFound)
- })
-
- it('reverts if any member of the payload changes', async () => {
- let tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- ethers.Wallet.createRandom().address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await evmRevert(tx, errorMsgs.hashPayload)
- tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- 10000,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await evmRevert(tx, errorMsgs.hashPayload)
- tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- ethers.Wallet.createRandom().address,
- emptyBytes,
- hash,
- )
- await evmRevert(tx, errorMsgs.hashPayload)
- tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- '0x1234',
- hash,
- )
- await evmRevert(tx, errorMsgs.hashPayload)
- })
-
- it('approves an existing registration request', async () => {
- const tx = await registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await expect(tx).to.emit(registrar, 'RegistrationApproved')
- })
-
- it('deletes the request afterwards / reverts if the request DNE', async () => {
- await registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- const tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await evmRevert(tx, errorMsgs.requestNotFound)
- })
- })
-
- describe('#cancel', () => {
- let hash: string
-
- beforeEach(async () => {
- await registrar
- .connect(registrarOwner)
- .setRegistrationConfig(
- false,
- window_small,
- threshold_big,
- registry.address,
- minLINKJuels,
- )
-
- //register with auto approve OFF
- const abiEncodedBytes = registrar.interface.encodeFunctionData(
- 'register',
- [
- upkeepName,
- emptyBytes,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- amount,
- source,
- ],
- )
- const tx = await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- const receipt = await tx.wait()
- hash = receipt.logs[2].topics[1]
- // submit duplicate request (increase balance)
- await linkToken.transferAndCall(
- registrar.address,
- amount,
- abiEncodedBytes,
- )
- })
-
- it('reverts if not called by the admin / owner', async () => {
- const tx = registrar.connect(stranger).cancel(hash)
- await evmRevert(tx, errorMsgs.onlyAdmin)
- })
-
- it('reverts if the hash does not exist', async () => {
- const tx = registrar
- .connect(registrarOwner)
- .cancel(
- '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44',
- )
- await evmRevert(tx, 'request not found')
- })
-
- it('refunds the total request balance to the admin address', async () => {
- const before = await linkToken.balanceOf(await admin.getAddress())
- const tx = await registrar.connect(admin).cancel(hash)
- const after = await linkToken.balanceOf(await admin.getAddress())
- assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2))))
- await expect(tx).to.emit(registrar, 'RegistrationRejected')
- })
-
- it('deletes the request hash', async () => {
- await registrar.connect(registrarOwner).cancel(hash)
- let tx = registrar.connect(registrarOwner).cancel(hash)
- await evmRevert(tx, errorMsgs.requestNotFound)
- tx = registrar
- .connect(registrarOwner)
- .approve(
- upkeepName,
- mock.address,
- executeGas,
- await admin.getAddress(),
- emptyBytes,
- hash,
- )
- await evmRevert(tx, errorMsgs.requestNotFound)
- })
- })
-})
diff --git a/contracts/test/v0.7/VRFD20.test.ts b/contracts/test/v0.7/VRFD20.test.ts
deleted file mode 100644
index f1e0e9ab0a8..00000000000
--- a/contracts/test/v0.7/VRFD20.test.ts
+++ /dev/null
@@ -1,303 +0,0 @@
-import { ethers } from 'hardhat'
-import { assert, expect } from 'chai'
-import {
- BigNumber,
- constants,
- Contract,
- ContractFactory,
- ContractTransaction,
-} from 'ethers'
-import { getUsers, Personas, Roles } from '../test-helpers/setup'
-import {
- evmWordToAddress,
- getLog,
- publicAbi,
- toBytes32String,
- toWei,
- numToBytes32,
- getLogs,
-} from '../test-helpers/helpers'
-
-let roles: Roles
-let personas: Personas
-let linkTokenFactory: ContractFactory
-let vrfCoordinatorMockFactory: ContractFactory
-let vrfD20Factory: ContractFactory
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- personas = users.personas
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
- vrfCoordinatorMockFactory = await ethers.getContractFactory(
- 'src/v0.7/tests/VRFCoordinatorMock.sol:VRFCoordinatorMock',
- roles.defaultAccount,
- )
- vrfD20Factory = await ethers.getContractFactory(
- 'src/v0.6/examples/VRFD20.sol:VRFD20',
- roles.defaultAccount,
- )
-})
-
-describe('VRFD20', () => {
- const deposit = toWei('1')
- const fee = toWei('0.1')
- const keyHash = toBytes32String('keyHash')
-
- let link: Contract
- let vrfCoordinator: Contract
- let vrfD20: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
- vrfCoordinator = await vrfCoordinatorMockFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
- vrfD20 = await vrfD20Factory
- .connect(roles.defaultAccount)
- .deploy(vrfCoordinator.address, link.address, keyHash, fee)
- await link.transfer(vrfD20.address, deposit)
- })
-
- it('has a limited public interface [ @skip-coverage ]', () => {
- publicAbi(vrfD20, [
- // Owned
- 'acceptOwnership',
- 'owner',
- 'transferOwnership',
- //VRFConsumerBase
- 'rawFulfillRandomness',
- // VRFD20
- 'rollDice',
- 'house',
- 'withdrawLINK',
- 'keyHash',
- 'fee',
- 'setKeyHash',
- 'setFee',
- ])
- })
-
- describe('#withdrawLINK', () => {
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20
- .connect(roles.stranger)
- .withdrawLINK(await roles.stranger.getAddress(), deposit),
- ).to.be.revertedWith('Only callable by owner')
- })
-
- it('reverts when not enough LINK in the contract', async () => {
- const withdrawAmount = deposit.mul(2)
- await expect(
- vrfD20
- .connect(roles.defaultAccount)
- .withdrawLINK(
- await roles.defaultAccount.getAddress(),
- withdrawAmount,
- ),
- ).to.be.reverted
- })
- })
-
- describe('success', () => {
- it('withdraws LINK', async () => {
- const startingAmount = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
- const expectedAmount = BigNumber.from(startingAmount).add(deposit)
- await vrfD20
- .connect(roles.defaultAccount)
- .withdrawLINK(await roles.defaultAccount.getAddress(), deposit)
- const actualAmount = await link.balanceOf(
- await roles.defaultAccount.getAddress(),
- )
- assert.equal(actualAmount.toString(), expectedAmount.toString())
- })
- })
- })
-
- describe('#setKeyHash', () => {
- const newHash = toBytes32String('newhash')
-
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20.connect(roles.stranger).setKeyHash(newHash),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('success', () => {
- it('sets the key hash', async () => {
- await vrfD20.setKeyHash(newHash)
- const actualHash = await vrfD20.keyHash()
- assert.equal(actualHash, newHash)
- })
- })
- })
-
- describe('#setFee', () => {
- const newFee = 1234
-
- describe('failure', () => {
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20.connect(roles.stranger).setFee(newFee),
- ).to.be.revertedWith('Only callable by owner')
- })
- })
-
- describe('success', () => {
- it('sets the fee', async () => {
- await vrfD20.setFee(newFee)
- const actualFee = await vrfD20.fee()
- assert.equal(actualFee.toString(), newFee.toString())
- })
- })
- })
-
- describe('#house', () => {
- describe('failure', () => {
- it('reverts when dice not rolled', async () => {
- await expect(
- vrfD20.house(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Dice not rolled')
- })
-
- it('reverts when dice roll is in progress', async () => {
- await vrfD20.rollDice(await personas.Nancy.getAddress())
- await expect(
- vrfD20.house(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Roll in progress')
- })
- })
-
- describe('success', () => {
- it('returns the correct house', async () => {
- const randomness = 98765
- const expectedHouse = 'Martell'
- const tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- const log = await getLog(tx, 3)
- const eventRequestId = log?.topics?.[1]
- await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- const response = await vrfD20.house(await personas.Nancy.getAddress())
- assert.equal(response.toString(), expectedHouse)
- })
- })
- })
-
- describe('#rollDice', () => {
- describe('success', () => {
- let tx: ContractTransaction
- beforeEach(async () => {
- tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- })
-
- it('emits a RandomnessRequest event from the VRFCoordinator', async () => {
- const log = await getLog(tx, 2)
- const topics = log?.topics
- assert.equal(evmWordToAddress(topics?.[1]), vrfD20.address)
- assert.equal(topics?.[2], keyHash)
- assert.equal(topics?.[3], constants.HashZero)
- })
- })
-
- describe('failure', () => {
- it('reverts when LINK balance is zero', async () => {
- const vrfD202 = await vrfD20Factory
- .connect(roles.defaultAccount)
- .deploy(vrfCoordinator.address, link.address, keyHash, fee)
- await expect(
- vrfD202.rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Not enough LINK to pay fee')
- })
-
- it('reverts when called by a non-owner', async () => {
- await expect(
- vrfD20
- .connect(roles.stranger)
- .rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Only callable by owner')
- })
-
- it('reverts when the roller rolls more than once', async () => {
- await vrfD20.rollDice(await personas.Nancy.getAddress())
- await expect(
- vrfD20.rollDice(await personas.Nancy.getAddress()),
- ).to.be.revertedWith('Already rolled')
- })
- })
- })
-
- describe('#fulfillRandomness', () => {
- const randomness = 98765
- const expectedModResult = (randomness % 20) + 1
- const expectedHouse = 'Martell'
- let eventRequestId: string
- beforeEach(async () => {
- const tx = await vrfD20.rollDice(await personas.Nancy.getAddress())
- const log = await getLog(tx, 3)
- eventRequestId = log?.topics?.[1]
- })
-
- describe('success', () => {
- let tx: ContractTransaction
- beforeEach(async () => {
- tx = await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- })
-
- it('emits a DiceLanded event', async () => {
- const log = await getLog(tx, 0)
- assert.equal(log?.topics[1], eventRequestId)
- assert.equal(log?.topics[2], numToBytes32(expectedModResult))
- })
-
- it('sets the correct dice roll result', async () => {
- const response = await vrfD20.house(await personas.Nancy.getAddress())
- assert.equal(response.toString(), expectedHouse)
- })
-
- it('allows someone else to roll', async () => {
- const secondRandomness = 55555
- tx = await vrfD20.rollDice(await personas.Ned.getAddress())
- const log = await getLog(tx, 3)
- eventRequestId = log?.topics?.[1]
- tx = await vrfCoordinator.callBackWithRandomness(
- eventRequestId,
- secondRandomness,
- vrfD20.address,
- )
- })
- })
-
- describe('failure', () => {
- it('does not fulfill when fulfilled by the wrong VRFcoordinator', async () => {
- const vrfCoordinator2 = await vrfCoordinatorMockFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
-
- const tx = await vrfCoordinator2.callBackWithRandomness(
- eventRequestId,
- randomness,
- vrfD20.address,
- )
- const logs = await getLogs(tx)
- assert.equal(logs.length, 0)
- })
- })
- })
-})
diff --git a/contracts/test/v0.7/gasUsage.test.ts b/contracts/test/v0.7/gasUsage.test.ts
deleted file mode 100644
index 97146622d06..00000000000
--- a/contracts/test/v0.7/gasUsage.test.ts
+++ /dev/null
@@ -1,178 +0,0 @@
-import { ethers } from 'hardhat'
-import { toBytes32String, toWei } from '../test-helpers/helpers'
-import { Contract, ContractFactory } from 'ethers'
-import { getUsers, Roles } from '../test-helpers/setup'
-import {
- convertFufillParams,
- convertFulfill2Params,
- decodeRunRequest,
-} from '../test-helpers/oracle'
-import { gasDiffLessThan } from '../test-helpers/matchers'
-
-let operatorFactory: ContractFactory
-let oracleFactory: ContractFactory
-let basicConsumerFactory: ContractFactory
-let linkTokenFactory: ContractFactory
-
-let roles: Roles
-
-before(async () => {
- const users = await getUsers()
-
- roles = users.roles
- operatorFactory = await ethers.getContractFactory(
- 'src/v0.7/Operator.sol:Operator',
- roles.defaultAccount,
- )
- oracleFactory = await ethers.getContractFactory(
- 'src/v0.6/Oracle.sol:Oracle',
- roles.defaultAccount,
- )
- basicConsumerFactory = await ethers.getContractFactory(
- 'src/v0.6/tests/BasicConsumer.sol:BasicConsumer',
- roles.defaultAccount,
- )
- linkTokenFactory = await ethers.getContractFactory(
- 'src/v0.4/LinkToken.sol:LinkToken',
- roles.defaultAccount,
- )
-})
-
-describe('Operator Gas Tests [ @skip-coverage ]', () => {
- const specId =
- '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000'
- let link: Contract
- let oracle1: Contract
- let operator1: Contract
- let operator2: Contract
-
- beforeEach(async () => {
- link = await linkTokenFactory.connect(roles.defaultAccount).deploy()
-
- operator1 = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- await operator1.setAuthorizedSenders([await roles.oracleNode.getAddress()])
-
- operator2 = await operatorFactory
- .connect(roles.defaultAccount)
- .deploy(link.address, await roles.defaultAccount.getAddress())
- await operator2.setAuthorizedSenders([await roles.oracleNode.getAddress()])
-
- oracle1 = await oracleFactory
- .connect(roles.defaultAccount)
- .deploy(link.address)
- await oracle1.setFulfillmentPermission(
- await roles.oracleNode.getAddress(),
- true,
- )
- })
-
- // Test Oracle.fulfillOracleRequest vs Operator.fulfillOracleRequest
- describe('v0.6/Oracle vs v0.7/Operator #fulfillOracleRequest', () => {
- const response = 'Hi Mom!'
- let basicConsumer1: Contract
- let basicConsumer2: Contract
-
- let request1: ReturnType
- let request2: ReturnType
-
- beforeEach(async () => {
- basicConsumer1 = await basicConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, oracle1.address, specId)
- basicConsumer2 = await basicConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator1.address, specId)
-
- const paymentAmount = toWei('1')
- const currency = 'USD'
-
- await link.transfer(basicConsumer1.address, paymentAmount)
- const tx1 = await basicConsumer1.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt1 = await tx1.wait()
- request1 = decodeRunRequest(receipt1.logs?.[3])
-
- await link.transfer(basicConsumer2.address, paymentAmount)
- const tx2 = await basicConsumer2.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt2 = await tx2.wait()
- request2 = decodeRunRequest(receipt2.logs?.[3])
- })
-
- it('uses acceptable gas', async () => {
- const tx1 = await oracle1
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request1, response))
- const tx2 = await operator1
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request2, response))
- const receipt1 = await tx1.wait()
- const receipt2 = await tx2.wait()
- // 38014 vs 40260
- gasDiffLessThan(3900, receipt1, receipt2)
- })
- })
-
- // Test Operator1.fulfillOracleRequest vs Operator2.fulfillOracleRequest2
- // with single word response
- describe('Operator #fulfillOracleRequest vs #fulfillOracleRequest2', () => {
- const response = 'Hi Mom!'
- let basicConsumer1: Contract
- let basicConsumer2: Contract
-
- let request1: ReturnType
- let request2: ReturnType
-
- beforeEach(async () => {
- basicConsumer1 = await basicConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator1.address, specId)
- basicConsumer2 = await basicConsumerFactory
- .connect(roles.consumer)
- .deploy(link.address, operator2.address, specId)
-
- const paymentAmount = toWei('1')
- const currency = 'USD'
-
- await link.transfer(basicConsumer1.address, paymentAmount)
- const tx1 = await basicConsumer1.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt1 = await tx1.wait()
- request1 = decodeRunRequest(receipt1.logs?.[3])
-
- await link.transfer(basicConsumer2.address, paymentAmount)
- const tx2 = await basicConsumer2.requestEthereumPrice(
- currency,
- paymentAmount,
- )
- const receipt2 = await tx2.wait()
- request2 = decodeRunRequest(receipt2.logs?.[3])
- })
-
- it('uses acceptable gas', async () => {
- const tx1 = await operator1
- .connect(roles.oracleNode)
- .fulfillOracleRequest(...convertFufillParams(request1, response))
-
- const responseTypes = ['bytes32']
- const responseValues = [toBytes32String(response)]
- const tx2 = await operator2
- .connect(roles.oracleNode)
- .fulfillOracleRequest2(
- ...convertFulfill2Params(request2, responseTypes, responseValues),
- )
-
- const receipt1 = await tx1.wait()
- const receipt2 = await tx2.wait()
- gasDiffLessThan(1240, receipt1, receipt2)
- })
- })
-})
diff --git a/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts b/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts
index af0063fb503..76a3dcfff1b 100644
--- a/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts
+++ b/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts
@@ -34,6 +34,7 @@ const PAUSED_ERR = 'Pausable: paused'
const zeroLINK = ethers.utils.parseEther('0')
const oneLINK = ethers.utils.parseEther('1')
const twoLINK = ethers.utils.parseEther('2')
+const fourLINK = ethers.utils.parseEther('4')
const fiveLINK = ethers.utils.parseEther('5')
const tenLINK = ethers.utils.parseEther('10')
const oneHundredLINK = ethers.utils.parseEther('100')
@@ -59,6 +60,7 @@ let directTarget2: MockContract
let watchListAddresses: string[]
let watchListMinBalances: BigNumber[]
+let watchListTopUpAmounts: BigNumber[]
async function assertContractLinkBalances(
balance1: BigNumber,
@@ -120,6 +122,7 @@ const setup = async () => {
directTarget2.address,
]
watchListMinBalances = [oneLINK, oneLINK, oneLINK, twoLINK, twoLINK]
+ watchListTopUpAmounts = [twoLINK, twoLINK, twoLINK, twoLINK, twoLINK]
await proxy1.mock.aggregator.returns(aggregator1.address)
await proxy2.mock.aggregator.returns(aggregator2.address)
@@ -144,9 +147,17 @@ const setup = async () => {
// New parameters needed by the constructor
const maxPerform = 5
const maxCheck = 20
+ const minWaitPeriodSeconds = 0
+ const upkeepInterval = 10
lt = (await ltFactory.deploy()) as LinkToken
- labm = await labmFactory.deploy(lt.address, twoLINK, maxPerform, maxCheck)
+ labm = await labmFactory.deploy(
+ lt.address,
+ minWaitPeriodSeconds,
+ maxPerform,
+ maxCheck,
+ upkeepInterval,
+ )
await labm.deployed()
for (let i = 1; i <= 4; i++) {
@@ -156,7 +167,11 @@ const setup = async () => {
const setTx = await labm
.connect(owner)
- .setWatchList(watchListAddresses, watchListMinBalances)
+ .setWatchList(
+ watchListAddresses,
+ watchListMinBalances,
+ watchListTopUpAmounts,
+ )
await setTx.wait()
}
@@ -174,19 +189,27 @@ describe('LinkAvailableBalanceMonitor', () => {
describe('setTopUpAmount()', () => {
it('configures the top-up amount', async () => {
- await labm.connect(owner).setTopUpAmount(100)
- assert.equal((await labm.getTopUpAmount()).toNumber(), 100)
+ await labm
+ .connect(owner)
+ .setTopUpAmount(directTarget1.address, BigNumber.from(100))
+ const report = await labm.getAccountInfo(directTarget1.address)
+ assert.equal(report.topUpAmount.toString(), '100')
})
it('configuresis only callable by the owner', async () => {
- await expect(labm.connect(stranger).setTopUpAmount(100)).to.be.reverted
+ await expect(
+ labm.connect(stranger).setTopUpAmount(directTarget1.address, 100),
+ ).to.be.reverted
})
})
describe('setMinBalance()', () => {
it('configures the min balance', async () => {
- await labm.connect(owner).setMinBalance(proxy1.address, 100)
- assert.equal((await labm.getMinBalance(proxy1.address)).toNumber(), 100)
+ await labm
+ .connect(owner)
+ .setMinBalance(proxy1.address, BigNumber.from(100))
+ const report = await labm.getAccountInfo(proxy1.address)
+ assert.equal(report.minBalance.toString(), '100')
})
it('reverts if address is not in the watchlist', async () => {
@@ -266,66 +289,29 @@ describe('LinkAvailableBalanceMonitor', () => {
beforeEach(async () => {
// reset watchlist to empty before running these tests
- await labm.connect(owner).setWatchList([], [])
- let watchList = await labm.getWatchList()
- assert.deepEqual(watchList, [[], []])
+ await labm.connect(owner).setWatchList([], [], [])
+ const watchList = await labm.getWatchList()
+ assert.deepEqual(watchList, [])
})
it('Should allow owner to adjust the watchlist', async () => {
// add first watchlist
let tx = await labm
.connect(owner)
- .setWatchList([watchAddress1], [oneLINK])
+ .setWatchList([watchAddress1], [oneLINK], [oneLINK])
let watchList = await labm.getWatchList()
- assert.deepEqual(watchList[0], [watchAddress1])
- assert.deepEqual(
- watchList[1].map((x) => x.toString()),
- [oneLINK].map((x) => x.toString()),
- )
+ assert.deepEqual(watchList[0], watchAddress1)
// add more to watchlist
tx = await labm
.connect(owner)
.setWatchList(
[watchAddress1, watchAddress2, watchAddress3],
[oneLINK, oneLINK, oneLINK],
+ [oneLINK, oneLINK, oneLINK],
)
await tx.wait()
watchList = await labm.getWatchList()
- assert.deepEqual(watchList[0], [
- watchAddress1,
- watchAddress2,
- watchAddress3,
- ])
- assert.deepEqual(
- watchList[1].map((x) => x.toString()),
- [oneLINK, oneLINK, oneLINK].map((x) => x.toString()),
- )
- // remove some from watchlist
- tx = await labm
- .connect(owner)
- .removeFromWatchlist([watchAddress3, watchAddress1])
- await tx.wait()
- watchList = await labm.getWatchList()
- assert.deepEqual(watchList[0], [watchAddress2])
- assert.deepEqual(
- watchList[1].map((x) => x.toString()),
- [oneLINK].map((x) => x.toString()),
- )
- // add some to watchlist
- tx = await labm
- .connect(owner)
- .addToWatchList([watchAddress1, watchAddress3], [twoLINK, twoLINK])
- await tx.wait()
- watchList = await labm.getWatchList()
- assert.deepEqual(watchList[0], [
- watchAddress2,
- watchAddress1,
- watchAddress3,
- ])
- assert.deepEqual(
- watchList[1].map((x) => x.toString()),
- [oneLINK, twoLINK, twoLINK].map((x) => x.toString()),
- )
+ assert.deepEqual(watchList, [watchAddress1, watchAddress2, watchAddress3])
})
it('Should not allow different length arrays in the watchlist', async () => {
@@ -335,6 +321,7 @@ describe('LinkAvailableBalanceMonitor', () => {
.setWatchList(
[watchAddress1, watchAddress2, watchAddress1],
[oneLINK, oneLINK],
+ [oneLINK, oneLINK],
)
await expect(tx).to.be.revertedWith(errMsg)
})
@@ -346,12 +333,6 @@ describe('LinkAvailableBalanceMonitor', () => {
.setWatchList(
[watchAddress1, watchAddress2, watchAddress1],
[oneLINK, oneLINK, oneLINK],
- )
- await expect(tx).to.be.revertedWith(errMsg)
- tx = labm
- .connect(owner)
- .addToWatchList(
- [watchAddress1, watchAddress2, watchAddress1],
[oneLINK, oneLINK, oneLINK],
)
await expect(tx).to.be.revertedWith(errMsg)
@@ -360,14 +341,8 @@ describe('LinkAvailableBalanceMonitor', () => {
it('Should not allow strangers to set the watchlist', async () => {
const setTxStranger = labm
.connect(stranger)
- .setWatchList([watchAddress1], [oneLINK])
+ .setWatchList([watchAddress1], [oneLINK], [oneLINK])
await expect(setTxStranger).to.be.revertedWith(OWNABLE_ERR)
- const addTxStranger = labm
- .connect(stranger)
- .addToWatchList([watchAddress1], [oneLINK])
- await expect(addTxStranger).to.be.revertedWith(OWNABLE_ERR)
- const removeTxStranger = labm.connect(stranger).removeFromWatchlist([])
- await expect(removeTxStranger).to.be.revertedWith(OWNABLE_ERR)
})
it('Should revert if any of the addresses are empty', async () => {
@@ -376,12 +351,6 @@ describe('LinkAvailableBalanceMonitor', () => {
.setWatchList(
[watchAddress1, ethers.constants.AddressZero],
[oneLINK, oneLINK],
- )
- await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR)
- tx = labm
- .connect(owner)
- .addToWatchList(
- [watchAddress1, ethers.constants.AddressZero],
[oneLINK, oneLINK],
)
await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR)
@@ -390,69 +359,68 @@ describe('LinkAvailableBalanceMonitor', () => {
describe('checkUpkeep() / sampleUnderfundedAddresses() [ @skip-coverage ]', () => {
it('Should return list of address that are underfunded', async () => {
- const fundTx = await lt.connect(owner).transfer(
- labm.address,
- tenLINK, // needs 10 total
- )
+ const fundTx = await lt
+ .connect(owner)
+ .transfer(labm.address, oneHundredLINK)
await fundTx.wait()
+
+ await labm.setWatchList(
+ watchListAddresses,
+ watchListMinBalances,
+ watchListTopUpAmounts,
+ )
+
const [should, payload] = await labm.checkUpkeep('0x')
assert.isTrue(should)
let [addresses] = ethers.utils.defaultAbiCoder.decode(
['address[]'],
payload,
)
+
expect(addresses).to.deep.equalInAnyOrder(watchListAddresses)
- // checkUpkeep payload should match sampleUnderfundedAddresses()
addresses = await labm.sampleUnderfundedAddresses()
expect(addresses).to.deep.equalInAnyOrder(watchListAddresses)
})
- it('Should return some results even if contract cannot fund all eligible targets', async () => {
+ it('Should omit aggregators that have sufficient funding', async () => {
const fundTx = await lt.connect(owner).transfer(
labm.address,
- fiveLINK, // needs 2Link per contract, so can fund 2 max
+ oneHundredLINK, // enough for anything that needs funding
)
await fundTx.wait()
- const [should, payload] = await labm.checkUpkeep('0x')
- assert.isTrue(should)
- let [addresses] = ethers.utils.defaultAbiCoder.decode(
- ['address[]'],
- payload,
+
+ await labm.setWatchList(
+ [aggregator2.address, directTarget1.address, directTarget2.address],
+ [oneLINK, twoLINK, twoLINK],
+ [oneLINK, oneLINK, oneLINK],
)
- assert.equal(addresses.length, 2)
- assert.notEqual(addresses[0], addresses[1])
- assert(watchListAddresses.includes(addresses[0]))
- assert(watchListAddresses.includes(addresses[1]))
- // underfunded sample should still match list
- addresses = await labm.sampleUnderfundedAddresses()
- expect(addresses).to.deep.equalInAnyOrder(watchListAddresses)
- })
- it('Should omit aggregators that have sufficient funding', async () => {
+ // all of them are underfunded, return 3
+ await aggregator2.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(zeroLINK)
+
let addresses = await labm.sampleUnderfundedAddresses()
- expect(addresses).to.deep.equalInAnyOrder(watchListAddresses)
- await aggregator2.mock.linkAvailableForPayment.returns(oneLINK) // aggregator2 is enough funded
- await directTarget1.mock.linkAvailableForPayment.returns(oneLINK) // directTarget1 is NOT enough funded
- await directTarget2.mock.linkAvailableForPayment.returns(twoLINK) // directTarget2 is enough funded
- addresses = await labm.sampleUnderfundedAddresses()
expect(addresses).to.deep.equalInAnyOrder([
- proxy1.address,
- proxy3.address,
+ aggregator2.address,
directTarget1.address,
+ directTarget2.address,
])
- await aggregator1.mock.linkAvailableForPayment.returns(tenLINK)
+ await aggregator2.mock.linkAvailableForPayment.returns(oneLINK) // aggregator2 is enough funded
+ await directTarget1.mock.linkAvailableForPayment.returns(oneLINK) // directTarget1 is NOT enough funded
+ await directTarget2.mock.linkAvailableForPayment.returns(oneLINK) // directTarget2 is NOT funded
addresses = await labm.sampleUnderfundedAddresses()
expect(addresses).to.deep.equalInAnyOrder([
- proxy3.address,
directTarget1.address,
+ directTarget2.address,
])
- await aggregator3.mock.linkAvailableForPayment.returns(tenLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(tenLINK)
addresses = await labm.sampleUnderfundedAddresses()
- expect(addresses).to.deep.equalInAnyOrder([directTarget1.address])
+ expect(addresses).to.deep.equalInAnyOrder([directTarget2.address])
- await directTarget1.mock.linkAvailableForPayment.returns(tenLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(tenLINK)
addresses = await labm.sampleUnderfundedAddresses()
expect(addresses).to.deep.equalInAnyOrder([])
})
@@ -471,6 +439,7 @@ describe('LinkAvailableBalanceMonitor', () => {
let MAX_CHECK: number
let proxyAddresses: string[]
let minBalances: BigNumber[]
+ let topUpAmount: BigNumber[]
let aggregators: MockContract[]
beforeEach(async () => {
@@ -478,6 +447,7 @@ describe('LinkAvailableBalanceMonitor', () => {
MAX_CHECK = await labm.getMaxCheck()
proxyAddresses = []
minBalances = []
+ topUpAmount = []
aggregators = []
const numAggregators = MAX_CHECK + 50
for (let idx = 0; idx < numAggregators; idx++) {
@@ -493,18 +463,18 @@ describe('LinkAvailableBalanceMonitor', () => {
await aggregator.mock.linkAvailableForPayment.returns(0)
proxyAddresses.push(proxy.address)
minBalances.push(oneLINK)
+ topUpAmount.push(oneLINK)
aggregators.push(aggregator)
}
- await labm.setWatchList(proxyAddresses, minBalances)
- expect(await labm.getWatchList()).to.deep.equalInAnyOrder([
- proxyAddresses,
- minBalances,
- ])
+ await labm.setWatchList(proxyAddresses, minBalances, topUpAmount)
+ let watchlist = await labm.getWatchList()
+ expect(watchlist).to.deep.equalInAnyOrder(proxyAddresses)
+ assert.equal(watchlist.length, minBalances.length)
})
it('Should not include more than MAX_PERFORM addresses', async () => {
const addresses = await labm.sampleUnderfundedAddresses()
- assert.equal(addresses.length, MAX_PERFORM)
+ expect(addresses.length).to.be.lessThanOrEqual(MAX_PERFORM)
})
it('Should sample from the list of addresses pseudorandomly', async () => {
@@ -547,7 +517,11 @@ describe('LinkAvailableBalanceMonitor', () => {
)
await labm
.connect(owner)
- .setWatchList(watchListAddresses, watchListMinBalances)
+ .setWatchList(
+ watchListAddresses,
+ watchListMinBalances,
+ watchListTopUpAmounts,
+ )
})
it('Should revert when paused', async () => {
@@ -557,32 +531,38 @@ describe('LinkAvailableBalanceMonitor', () => {
})
it('Should fund the appropriate addresses', async () => {
- await lt.connect(owner).transfer(labm.address, tenLINK)
- await assertContractLinkBalances(
- zeroLINK,
- zeroLINK,
- zeroLINK,
- zeroLINK,
- zeroLINK,
- )
+ await aggregator1.mock.linkAvailableForPayment.returns(zeroLINK)
+ await aggregator2.mock.linkAvailableForPayment.returns(zeroLINK)
+ await aggregator3.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(zeroLINK)
+
+ const fundTx = await lt.connect(owner).transfer(labm.address, tenLINK)
+ await fundTx.wait()
+
+ h.assertLinkTokenBalance(lt, aggregator1.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, aggregator2.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, aggregator3.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, directTarget1.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, directTarget2.address, zeroLINK)
+
const performTx = await labm
.connect(keeperRegistry)
- .performUpkeep(validPayload, { gasLimit: 2_500_000 })
+ .performUpkeep(validPayload, { gasLimit: 1_500_000 })
await performTx.wait()
- await assertContractLinkBalances(
- twoLINK,
- twoLINK,
- twoLINK,
- twoLINK,
- twoLINK,
- )
+
+ h.assertLinkTokenBalance(lt, aggregator1.address, twoLINK)
+ h.assertLinkTokenBalance(lt, aggregator2.address, twoLINK)
+ h.assertLinkTokenBalance(lt, aggregator3.address, twoLINK)
+ h.assertLinkTokenBalance(lt, directTarget1.address, twoLINK)
+ h.assertLinkTokenBalance(lt, directTarget2.address, twoLINK)
})
it('Can handle MAX_PERFORM proxies within gas limit', async () => {
- // add MAX_PERFORM number of proxies
const MAX_PERFORM = await labm.getMaxPerform()
const proxyAddresses = []
const minBalances = []
+ const topUpAmount = []
for (let idx = 0; idx < MAX_PERFORM; idx++) {
const proxy = await deployMockContract(
owner,
@@ -596,20 +576,29 @@ describe('LinkAvailableBalanceMonitor', () => {
await aggregator.mock.linkAvailableForPayment.returns(0)
proxyAddresses.push(proxy.address)
minBalances.push(oneLINK)
+ topUpAmount.push(oneLINK)
}
- await labm.setWatchList(proxyAddresses, minBalances)
- expect(await labm.getWatchList()).to.deep.equalInAnyOrder([
- proxyAddresses,
- minBalances,
- ])
+ await labm.setWatchList(proxyAddresses, minBalances, topUpAmount)
+ let watchlist = await labm.getWatchList()
+ expect(watchlist).to.deep.equalInAnyOrder(proxyAddresses)
+ assert.equal(watchlist.length, minBalances.length)
+
// add funds
- const fundsNeeded = (await labm.getTopUpAmount()).mul(MAX_PERFORM)
+ const wl = await labm.getWatchList()
+ let fundsNeeded = BigNumber.from(0)
+ for (let idx = 0; idx < wl.length; idx++) {
+ const targetInfo = await labm.getAccountInfo(wl[idx])
+ const targetTopUpAmount = targetInfo.topUpAmount
+ fundsNeeded.add(targetTopUpAmount)
+ }
await lt.connect(owner).transfer(labm.address, fundsNeeded)
+
// encode payload
const payload = ethers.utils.defaultAbiCoder.encode(
['address[]'],
[proxyAddresses],
)
+
// do the thing
await labm
.connect(keeperRegistry)
@@ -618,6 +607,11 @@ describe('LinkAvailableBalanceMonitor', () => {
})
describe('topUp()', () => {
+ it('Should revert topUp address(0)', async () => {
+ const tx = await labm.connect(owner).topUp([ethers.constants.AddressZero])
+ await expect(tx).to.emit(labm, 'TopUpBlocked')
+ })
+
context('when not paused', () => {
it('Should be callable by anyone', async () => {
const users = [owner, keeperRegistry, stranger]
@@ -654,13 +648,13 @@ describe('LinkAvailableBalanceMonitor', () => {
it('Should fund the appropriate addresses', async () => {
const tx = await labm.connect(keeperRegistry).topUp(watchListAddresses)
- await assertContractLinkBalances(
- twoLINK,
- twoLINK,
- twoLINK,
- twoLINK,
- twoLINK,
- )
+
+ await aggregator1.mock.linkAvailableForPayment.returns(twoLINK)
+ await aggregator2.mock.linkAvailableForPayment.returns(twoLINK)
+ await aggregator3.mock.linkAvailableForPayment.returns(twoLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(twoLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(twoLINK)
+
await expect(tx)
.to.emit(labm, 'TopUpSucceeded')
.withArgs(proxy1.address)
@@ -682,13 +676,12 @@ describe('LinkAvailableBalanceMonitor', () => {
await labm
.connect(keeperRegistry)
.topUp([proxy1.address, directTarget1.address])
- await assertContractLinkBalances(
- twoLINK,
- zeroLINK,
- zeroLINK,
- twoLINK,
- zeroLINK,
- )
+
+ await aggregator1.mock.linkAvailableForPayment.returns(twoLINK)
+ await aggregator2.mock.linkAvailableForPayment.returns(zeroLINK)
+ await aggregator3.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(twoLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(zeroLINK)
})
it('Should skip un-approved addresses', async () => {
@@ -697,6 +690,7 @@ describe('LinkAvailableBalanceMonitor', () => {
.setWatchList(
[proxy1.address, directTarget1.address],
[oneLINK, oneLINK],
+ [oneLINK, oneLINK],
)
const tx = await labm
.connect(keeperRegistry)
@@ -707,13 +701,13 @@ describe('LinkAvailableBalanceMonitor', () => {
directTarget1.address,
directTarget2.address,
])
- await assertContractLinkBalances(
- twoLINK,
- zeroLINK,
- zeroLINK,
- twoLINK,
- zeroLINK,
- )
+
+ h.assertLinkTokenBalance(lt, aggregator1.address, twoLINK)
+ h.assertLinkTokenBalance(lt, aggregator2.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, aggregator3.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, directTarget1.address, twoLINK)
+ h.assertLinkTokenBalance(lt, directTarget2.address, zeroLINK)
+
await expect(tx)
.to.emit(labm, 'TopUpSucceeded')
.withArgs(proxy1.address)
@@ -730,7 +724,11 @@ describe('LinkAvailableBalanceMonitor', () => {
it('Should skip an address if the proxy is invalid and it is not a direct target', async () => {
await labm
.connect(owner)
- .setWatchList([proxy1.address, proxy4.address], [oneLINK, oneLINK])
+ .setWatchList(
+ [proxy1.address, proxy4.address],
+ [oneLINK, oneLINK],
+ [oneLINK, oneLINK],
+ )
const tx = await labm
.connect(keeperRegistry)
.topUp([proxy1.address, proxy4.address])
@@ -744,7 +742,11 @@ describe('LinkAvailableBalanceMonitor', () => {
await proxy4.mock.aggregator.returns(aggregator4.address)
await labm
.connect(owner)
- .setWatchList([proxy1.address, proxy4.address], [oneLINK, oneLINK])
+ .setWatchList(
+ [proxy1.address, proxy4.address],
+ [oneLINK, oneLINK],
+ [oneLINK, oneLINK],
+ )
const tx = await labm
.connect(keeperRegistry)
.topUp([proxy1.address, proxy4.address])
@@ -759,7 +761,11 @@ describe('LinkAvailableBalanceMonitor', () => {
await aggregator4.mock.linkAvailableForPayment.returns(tenLINK)
await labm
.connect(owner)
- .setWatchList([proxy1.address, proxy4.address], [oneLINK, oneLINK])
+ .setWatchList(
+ [proxy1.address, proxy4.address],
+ [oneLINK, oneLINK],
+ [oneLINK, oneLINK],
+ )
const tx = await labm
.connect(keeperRegistry)
.topUp([proxy1.address, proxy4.address])
@@ -776,6 +782,7 @@ describe('LinkAvailableBalanceMonitor', () => {
.setWatchList(
[proxy1.address, directTarget1.address],
[oneLINK, oneLINK],
+ [oneLINK, oneLINK],
)
const tx = await labm
.connect(keeperRegistry)
@@ -790,25 +797,26 @@ describe('LinkAvailableBalanceMonitor', () => {
})
context('when partially funded', () => {
- it('Should fund as many addresses as possible', async () => {
+ it('Should fund as many addresses as possible T', async () => {
await lt.connect(owner).transfer(
labm.address,
- fiveLINK, // only enough LINK to fund 2 addresses
+ fourLINK, // only enough LINK to fund 2 addresses
)
+
+ await aggregator1.mock.linkAvailableForPayment.returns(twoLINK)
+ await aggregator2.mock.linkAvailableForPayment.returns(twoLINK)
+ await aggregator3.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget1.mock.linkAvailableForPayment.returns(zeroLINK)
+ await directTarget2.mock.linkAvailableForPayment.returns(zeroLINK)
+
+ h.assertLinkTokenBalance(lt, aggregator1.address, twoLINK)
+ h.assertLinkTokenBalance(lt, aggregator2.address, twoLINK)
+ h.assertLinkTokenBalance(lt, aggregator3.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, directTarget1.address, zeroLINK)
+ h.assertLinkTokenBalance(lt, directTarget2.address, zeroLINK)
+
const tx = await labm.connect(keeperRegistry).topUp(watchListAddresses)
- await assertContractLinkBalances(
- twoLINK,
- twoLINK,
- zeroLINK,
- zeroLINK,
- zeroLINK,
- )
- await expect(tx)
- .to.emit(labm, 'TopUpSucceeded')
- .withArgs(proxy1.address)
- await expect(tx)
- .to.emit(labm, 'TopUpSucceeded')
- .withArgs(proxy2.address)
+ await expect(tx).to.emit(labm, 'TopUpSucceeded')
})
})
})
diff --git a/contracts/test/v0.8/dev/ArbitrumValidator.test.ts b/contracts/test/v0.8/dev/ArbitrumValidator.test.ts
index 2f95a6f6fb0..232eea95839 100644
--- a/contracts/test/v0.8/dev/ArbitrumValidator.test.ts
+++ b/contracts/test/v0.8/dev/ArbitrumValidator.test.ts
@@ -12,7 +12,7 @@ import { abi as arbitrumSequencerStatusRecorderAbi } from '../../../artifacts/sr
// @ts-ignore
import { abi as arbitrumInboxAbi } from '../../../artifacts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol/IInbox.json'
// @ts-ignore
-import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json'
+import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json'
const truncateBigNumToAddress = (num: BigNumberish) => {
// Pad, then slice off '0x' prefix
diff --git a/contracts/test/v0.8/dev/OptimismValidator.test.ts b/contracts/test/v0.8/dev/OptimismValidator.test.ts
index 120b1057d14..ee69211f56d 100644
--- a/contracts/test/v0.8/dev/OptimismValidator.test.ts
+++ b/contracts/test/v0.8/dev/OptimismValidator.test.ts
@@ -8,7 +8,7 @@ import { abi as optimismSequencerStatusRecorderAbi } from '../../../artifacts/sr
// @ts-ignore
import { abi as optimismL1CrossDomainMessengerAbi } from '@eth-optimism/contracts/artifacts/contracts/L1/messaging/L1CrossDomainMessenger.sol'
// @ts-ignore
-import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json'
+import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json'
describe('OptimismValidator', () => {
const GAS_LIMIT = BigNumber.from(1_900_000)
diff --git a/core/bridges/orm.go b/core/bridges/orm.go
index 96801f4484c..cfad1da836e 100644
--- a/core/bridges/orm.go
+++ b/core/bridges/orm.go
@@ -6,8 +6,8 @@ import (
"sync"
"time"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/bridges/orm_test.go b/core/bridges/orm_test.go
index b110b4f519d..0b485764c8b 100644
--- a/core/bridges/orm_test.go
+++ b/core/bridges/orm_test.go
@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
diff --git a/core/cbor/cbor.go b/core/cbor/cbor.go
index 754e5729345..cc3f74e423e 100644
--- a/core/cbor/cbor.go
+++ b/core/cbor/cbor.go
@@ -17,7 +17,7 @@ func ParseDietCBOR(b []byte) (map[string]interface{}, error) {
b = autoAddMapDelimiters(b)
var m map[interface{}]interface{}
- if err := cbor.Unmarshal(b, &m); err != nil {
+ if _, err := cbor.UnmarshalFirst(b, &m); err != nil {
return nil, err
}
@@ -38,7 +38,8 @@ func ParseDietCBOR(b []byte) (map[string]interface{}, error) {
// "top-level map" requirement of "diet" CBOR.
func ParseDietCBORToStruct(b []byte, v interface{}) error {
b = autoAddMapDelimiters(b)
- return cbor.Unmarshal(b, v)
+ _, err := cbor.UnmarshalFirst(b, v)
+ return err
}
// ParseStandardCBOR parses CBOR in "standards compliant" mode.
@@ -49,7 +50,7 @@ func ParseStandardCBOR(b []byte) (a interface{}, err error) {
if len(b) == 0 {
return nil, nil
}
- if err = cbor.Unmarshal(b, &a); err != nil {
+ if _, err = cbor.UnmarshalFirst(b, &a); err != nil {
return nil, err
}
return
diff --git a/core/chains/cosmos/chain.go b/core/chains/cosmos/chain.go
deleted file mode 100644
index e11f95d356e..00000000000
--- a/core/chains/cosmos/chain.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package cosmos
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "math/big"
- "time"
-
- "github.com/pelletier/go-toml/v2"
- "github.com/pkg/errors"
- "go.uber.org/multierr"
-
- sdk "github.com/cosmos/cosmos-sdk/types"
- bank "github.com/cosmos/cosmos-sdk/x/bank/types"
-
- "github.com/smartcontractkit/sqlx"
-
- relaychains "github.com/smartcontractkit/chainlink-relay/pkg/chains"
- "github.com/smartcontractkit/chainlink-relay/pkg/logger"
- "github.com/smartcontractkit/chainlink-relay/pkg/loop"
- "github.com/smartcontractkit/chainlink-relay/pkg/services"
-
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/adapters"
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/client"
- coscfg "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/config"
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/db"
- relaytypes "github.com/smartcontractkit/chainlink-relay/pkg/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/chains/cosmos/cosmostxm"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay"
-)
-
-// defaultRequestTimeout is the default Cosmos client timeout.
-// Note that while the cosmos node is processing a heavy block,
-// requests can be delayed significantly (https://github.com/tendermint/tendermint/issues/6899),
-// however there's nothing we can do but wait until the block is processed.
-// So we set a fairly high timeout here.
-// TODO(BCI-979): Remove this, or make this configurable with the updated client.
-const defaultRequestTimeout = 30 * time.Second
-
-var (
- // ErrChainIDEmpty is returned when chain is required but was empty.
- ErrChainIDEmpty = errors.New("chain id empty")
- // ErrChainIDInvalid is returned when a chain id does not match any configured chains.
- ErrChainIDInvalid = errors.New("chain id does not match any local chains")
-)
-
-// Chain is a wrap for easy use in other places in the core node
-type Chain = adapters.Chain
-
-// ChainOpts holds options for configuring a Chain.
-type ChainOpts struct {
- QueryConfig pg.QConfig
- Logger logger.Logger
- DB *sqlx.DB
- KeyStore loop.Keystore
- EventBroadcaster pg.EventBroadcaster
-}
-
-func (o *ChainOpts) Validate() (err error) {
- required := func(s string) error {
- return fmt.Errorf("%s is required", s)
- }
- if o.QueryConfig == nil {
- err = multierr.Append(err, required("Config"))
- }
- if o.Logger == nil {
- err = multierr.Append(err, required("Logger'"))
- }
- if o.DB == nil {
- err = multierr.Append(err, required("DB"))
- }
- if o.KeyStore == nil {
- err = multierr.Append(err, required("KeyStore"))
- }
- if o.EventBroadcaster == nil {
- err = multierr.Append(err, required("EventBroadcaster"))
- }
- return
-}
-
-func NewChain(cfg *coscfg.TOMLConfig, opts ChainOpts) (adapters.Chain, error) {
- if !cfg.IsEnabled() {
- return nil, fmt.Errorf("cannot create new chain with ID %s, the chain is disabled", *cfg.ChainID)
- }
- c, err := newChain(*cfg.ChainID, cfg, opts.DB, opts.KeyStore, opts.QueryConfig, opts.EventBroadcaster, opts.Logger)
- if err != nil {
- return nil, err
- }
- return c, nil
-}
-
-var _ adapters.Chain = (*chain)(nil)
-
-type chain struct {
- services.StateMachine
- id string
- cfg *coscfg.TOMLConfig
- txm *cosmostxm.Txm
- lggr logger.Logger
-}
-
-func newChain(id string, cfg *coscfg.TOMLConfig, db *sqlx.DB, ks loop.Keystore, logCfg pg.QConfig, eb pg.EventBroadcaster, lggr logger.Logger) (*chain, error) {
- lggr = logger.With(lggr, "cosmosChainID", id)
- var ch = chain{
- id: id,
- cfg: cfg,
- lggr: logger.Named(lggr, "Chain"),
- }
- tc := func() (client.ReaderWriter, error) {
- return ch.getClient("")
- }
- gpe := client.NewMustGasPriceEstimator([]client.GasPricesEstimator{
- client.NewClosureGasPriceEstimator(func() (map[string]sdk.DecCoin, error) {
- return map[string]sdk.DecCoin{
- cfg.GasToken(): sdk.NewDecCoinFromDec(cfg.GasToken(), cfg.FallbackGasPrice()),
- }, nil
- }),
- }, lggr)
- ch.txm = cosmostxm.NewTxm(db, tc, *gpe, ch.id, cfg, ks, lggr, logCfg, eb)
-
- return &ch, nil
-}
-
-func (c *chain) Name() string {
- return c.lggr.Name()
-}
-
-func (c *chain) ID() string {
- return c.id
-}
-
-func (c *chain) ChainID() string {
- return c.id
-}
-
-func (c *chain) Config() coscfg.Config {
- return c.cfg
-}
-
-func (c *chain) TxManager() adapters.TxManager {
- return c.txm
-}
-
-func (c *chain) Reader(name string) (client.Reader, error) {
- return c.getClient(name)
-}
-
-// getClient returns a client, optionally requiring a specific node by name.
-func (c *chain) getClient(name string) (client.ReaderWriter, error) {
- var node db.Node
- if name == "" { // Any node
- nodes, err := c.cfg.ListNodes()
- if err != nil {
- return nil, fmt.Errorf("failed to list nodes: %w", err)
- }
- if len(nodes) == 0 {
- return nil, errors.New("no nodes available")
- }
- nodeIndex, err := rand.Int(rand.Reader, big.NewInt(int64(len(nodes))))
- if err != nil {
- return nil, fmt.Errorf("could not generate a random node index: %w", err)
- }
- node = nodes[nodeIndex.Int64()]
- } else { // Named node
- var err error
- node, err = c.cfg.GetNode(name)
- if err != nil {
- return nil, fmt.Errorf("failed to get node named %s: %w", name, err)
- }
- if node.CosmosChainID != c.id {
- return nil, fmt.Errorf("failed to create client for chain %s with node %s: wrong chain id %s", c.id, name, node.CosmosChainID)
- }
- }
- client, err := client.NewClient(c.id, node.TendermintURL, defaultRequestTimeout, logger.Named(c.lggr, "Client."+name))
- if err != nil {
- return nil, fmt.Errorf("failed to create client: %w", err)
- }
- c.lggr.Debugw("Created client", "name", node.Name, "tendermint-url", node.TendermintURL)
- return client, nil
-}
-
-// Start starts cosmos chain.
-func (c *chain) Start(ctx context.Context) error {
- return c.StartOnce("Chain", func() error {
- c.lggr.Debug("Starting")
- return c.txm.Start(ctx)
- })
-}
-
-func (c *chain) Close() error {
- return c.StopOnce("Chain", func() error {
- c.lggr.Debug("Stopping")
- return c.txm.Close()
- })
-}
-
-func (c *chain) Ready() error {
- return multierr.Combine(
- c.StateMachine.Ready(),
- c.txm.Ready(),
- )
-}
-
-func (c *chain) HealthReport() map[string]error {
- m := map[string]error{c.Name(): c.Healthy()}
- services.CopyHealth(m, c.txm.HealthReport())
- return m
-}
-
-// ChainService interface
-func (c *chain) GetChainStatus(ctx context.Context) (relaytypes.ChainStatus, error) {
- toml, err := c.cfg.TOMLString()
- if err != nil {
- return relaytypes.ChainStatus{}, err
- }
- return relaytypes.ChainStatus{
- ID: c.id,
- Enabled: *c.cfg.Enabled,
- Config: toml,
- }, nil
-}
-func (c *chain) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) (stats []relaytypes.NodeStatus, nextPageToken string, total int, err error) {
- return relaychains.ListNodeStatuses(int(pageSize), pageToken, c.listNodeStatuses)
-}
-
-func (c *chain) Transact(ctx context.Context, from, to string, amount *big.Int, balanceCheck bool) error {
- fromAcc, err := sdk.AccAddressFromBech32(from)
- if err != nil {
- return fmt.Errorf("failed to parse from account: %s", fromAcc)
- }
- toAcc, err := sdk.AccAddressFromBech32(to)
- if err != nil {
- return fmt.Errorf("failed to parse from account: %s", toAcc)
- }
- coin := sdk.Coin{Amount: sdk.NewIntFromBigInt(amount), Denom: c.Config().GasToken()}
-
- txm := c.TxManager()
-
- if balanceCheck {
- var reader client.Reader
- reader, err = c.Reader("")
- if err != nil {
- return fmt.Errorf("chain unreachable: %v", err)
- }
- gasPrice, err2 := txm.GasPrice()
- if err2 != nil {
- return fmt.Errorf("gas price unavailable: %v", err2)
- }
-
- err = validateBalance(reader, gasPrice, fromAcc, coin)
- if err != nil {
- return fmt.Errorf("failed to validate balance: %v", err)
- }
- }
-
- sendMsg := bank.NewMsgSend(fromAcc, toAcc, sdk.Coins{coin})
- _, err = txm.Enqueue("", sendMsg)
- if err != nil {
- return fmt.Errorf("failed to enqueue tx: %w", err)
- }
- return nil
-}
-
-// TODO BCF-2602 statuses are static for non-evm chain and should be dynamic
-func (c *chain) listNodeStatuses(start, end int) ([]relaytypes.NodeStatus, int, error) {
- stats := make([]relaytypes.NodeStatus, 0)
- total := len(c.cfg.Nodes)
- if start >= total {
- return stats, total, relaychains.ErrOutOfRange
- }
- if end > total {
- end = total
- }
- nodes := c.cfg.Nodes[start:end]
- for _, node := range nodes {
- stat, err := nodeStatus(node, c.ChainID())
- if err != nil {
- return stats, total, err
- }
- stats = append(stats, stat)
- }
- return stats, total, nil
-}
-
-func nodeStatus(n *coscfg.Node, id relay.ChainID) (relaytypes.NodeStatus, error) {
- var s relaytypes.NodeStatus
- s.ChainID = id
- s.Name = *n.Name
- b, err := toml.Marshal(n)
- if err != nil {
- return relaytypes.NodeStatus{}, err
- }
- s.Config = string(b)
- return s, nil
-}
-
-// maxGasUsedTransfer is an upper bound on how much gas we expect a MsgSend for a single coin to use.
-const maxGasUsedTransfer = 100_000
-
-// validateBalance validates that fromAddr's balance can cover coin, including fees at gasPrice.
-func validateBalance(reader client.Reader, gasPrice sdk.DecCoin, fromAddr sdk.AccAddress, coin sdk.Coin) error {
- balance, err := reader.Balance(fromAddr, coin.GetDenom())
- if err != nil {
- return err
- }
-
- fee := gasPrice.Amount.MulInt64(maxGasUsedTransfer).RoundInt()
- need := coin.Amount.Add(fee)
-
- if balance.Amount.LT(need) {
- return errors.Errorf("balance %q is too low for this transaction to be executed: need %s total, including %s fee", balance, need, fee)
- }
- return nil
-}
diff --git a/core/chains/cosmos/cosmostxm/helpers_test.go b/core/chains/cosmos/cosmostxm/helpers_test.go
deleted file mode 100644
index a2dfbbeed84..00000000000
--- a/core/chains/cosmos/cosmostxm/helpers_test.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package cosmostxm
-
-import "golang.org/x/exp/maps"
-
-func (ka *keystoreAdapter) Accounts() ([]string, error) {
- ka.mutex.Lock()
- defer ka.mutex.Unlock()
- err := ka.updateMappingLocked()
- if err != nil {
- return nil, err
- }
- addresses := maps.Keys(ka.addressToPubKey)
-
- return addresses, nil
-}
diff --git a/core/chains/cosmos/cosmostxm/key_wrapper.go b/core/chains/cosmos/cosmostxm/key_wrapper.go
deleted file mode 100644
index e03dfd89b89..00000000000
--- a/core/chains/cosmos/cosmostxm/key_wrapper.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package cosmostxm
-
-import (
- "bytes"
- "context"
-
- "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
- cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
-)
-
-// KeyWrapper uses a keystoreAdapter to implement the cosmos-sdk PrivKey interface for a specific key.
-type KeyWrapper struct {
- adapter *keystoreAdapter
- account string
-}
-
-var _ cryptotypes.PrivKey = &KeyWrapper{}
-
-func NewKeyWrapper(adapter *keystoreAdapter, account string) *KeyWrapper {
- return &KeyWrapper{
- adapter: adapter,
- account: account,
- }
-}
-
-func (a *KeyWrapper) Bytes() []byte {
- // don't expose the private key.
- return nil
-}
-
-func (a *KeyWrapper) Sign(msg []byte) ([]byte, error) {
- return a.adapter.Sign(context.Background(), a.account, msg)
-}
-
-func (a *KeyWrapper) PubKey() cryptotypes.PubKey {
- pubKey, err := a.adapter.PubKey(a.account)
- if err != nil {
- // return an empty pubkey if it's not found.
- return &secp256k1.PubKey{Key: []byte{}}
- }
- return pubKey
-}
-
-func (a *KeyWrapper) Equals(other cryptotypes.LedgerPrivKey) bool {
- return bytes.Equal(a.PubKey().Bytes(), other.PubKey().Bytes())
-}
-
-func (a *KeyWrapper) Type() string {
- return "secp256k1"
-}
-
-func (a *KeyWrapper) Reset() {
- // no-op
-}
-
-func (a *KeyWrapper) String() string {
- return ""
-}
-
-func (a *KeyWrapper) ProtoMessage() {
- // no-op
-}
diff --git a/core/chains/cosmos/cosmostxm/keystore_adapter.go b/core/chains/cosmos/cosmostxm/keystore_adapter.go
deleted file mode 100644
index 6b360dde98c..00000000000
--- a/core/chains/cosmos/cosmostxm/keystore_adapter.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package cosmostxm
-
-import (
- "context"
- "crypto/sha256"
- "encoding/hex"
- "sync"
-
- "github.com/cometbft/cometbft/crypto"
- "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
- cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
- "github.com/cosmos/cosmos-sdk/types/bech32"
- "github.com/pkg/errors"
- "golang.org/x/crypto/ripemd160" //nolint: staticcheck
-
- "github.com/smartcontractkit/chainlink-relay/pkg/loop"
-)
-
-type accountInfo struct {
- Account string
- PubKey *secp256k1.PubKey
-}
-
-// keystoreAdapter adapts a Cosmos loop.Keystore to translate public keys into bech32-prefixed account addresses.
-type keystoreAdapter struct {
- keystore loop.Keystore
- accountPrefix string
- mutex sync.RWMutex
- addressToPubKey map[string]*accountInfo
-}
-
-func newKeystoreAdapter(keystore loop.Keystore, accountPrefix string) *keystoreAdapter {
- return &keystoreAdapter{
- keystore: keystore,
- accountPrefix: accountPrefix,
- addressToPubKey: make(map[string]*accountInfo),
- }
-}
-
-func (ka *keystoreAdapter) updateMappingLocked() error {
- accounts, err := ka.keystore.Accounts(context.Background())
- if err != nil {
- return err
- }
-
- // similar to cosmos-sdk, cache and re-use calculated bech32 addresses to prevent duplicated work.
- // ref: https://github.com/cosmos/cosmos-sdk/blob/3b509c187e1643757f5ef8a0b5ae3decca0c7719/types/address.go#L705
-
- type cacheEntry struct {
- bech32Addr string
- accountInfo *accountInfo
- }
- accountCache := make(map[string]cacheEntry, len(ka.addressToPubKey))
- for bech32Addr, accountInfo := range ka.addressToPubKey {
- accountCache[accountInfo.Account] = cacheEntry{bech32Addr: bech32Addr, accountInfo: accountInfo}
- }
-
- addressToPubKey := make(map[string]*accountInfo, len(accounts))
- for _, account := range accounts {
- if prevEntry, ok := accountCache[account]; ok {
- addressToPubKey[prevEntry.bech32Addr] = prevEntry.accountInfo
- continue
- }
- pubKeyBytes, err := hex.DecodeString(account)
- if err != nil {
- return err
- }
-
- if len(pubKeyBytes) != secp256k1.PubKeySize {
- return errors.New("length of pubkey is incorrect")
- }
-
- sha := sha256.Sum256(pubKeyBytes)
- hasherRIPEMD160 := ripemd160.New()
- _, _ = hasherRIPEMD160.Write(sha[:])
- address := crypto.Address(hasherRIPEMD160.Sum(nil))
-
- bech32Addr, err := bech32.ConvertAndEncode(ka.accountPrefix, address)
- if err != nil {
- return err
- }
-
- addressToPubKey[bech32Addr] = &accountInfo{
- Account: account,
- PubKey: &secp256k1.PubKey{Key: pubKeyBytes},
- }
- }
-
- ka.addressToPubKey = addressToPubKey
- return nil
-}
-
-func (ka *keystoreAdapter) lookup(id string) (*accountInfo, error) {
- ka.mutex.RLock()
- ai, ok := ka.addressToPubKey[id]
- ka.mutex.RUnlock()
- if !ok {
- // try updating the mapping once, incase there was an update on the keystore.
- ka.mutex.Lock()
- err := ka.updateMappingLocked()
- if err != nil {
- ka.mutex.Unlock()
- return nil, err
- }
- ai, ok = ka.addressToPubKey[id]
- ka.mutex.Unlock()
- if !ok {
- return nil, errors.New("No such id")
- }
- }
- return ai, nil
-}
-
-func (ka *keystoreAdapter) Sign(ctx context.Context, id string, hash []byte) ([]byte, error) {
- accountInfo, err := ka.lookup(id)
- if err != nil {
- return nil, err
- }
- return ka.keystore.Sign(ctx, accountInfo.Account, hash)
-}
-
-// Returns the cosmos PubKey associated with the prefixed address.
-func (ka *keystoreAdapter) PubKey(address string) (cryptotypes.PubKey, error) {
- accountInfo, err := ka.lookup(address)
- if err != nil {
- return nil, err
- }
- return accountInfo.PubKey, nil
-}
diff --git a/core/chains/cosmos/cosmostxm/main_test.go b/core/chains/cosmos/cosmostxm/main_test.go
deleted file mode 100644
index bc340afa430..00000000000
--- a/core/chains/cosmos/cosmostxm/main_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package cosmostxm
-
-import (
- "os"
- "testing"
-
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/params"
-)
-
-func TestMain(m *testing.M) {
- params.InitCosmosSdk(
- /* bech32Prefix= */ "wasm",
- /* token= */ "cosm",
- )
- code := m.Run()
- os.Exit(code)
-}
diff --git a/core/chains/cosmos/cosmostxm/orm.go b/core/chains/cosmos/cosmostxm/orm.go
deleted file mode 100644
index cc9b179cce5..00000000000
--- a/core/chains/cosmos/cosmostxm/orm.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package cosmostxm
-
-import (
- "database/sql"
-
- "github.com/pkg/errors"
-
- "github.com/smartcontractkit/sqlx"
-
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/adapters"
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/db"
-
- "github.com/smartcontractkit/chainlink-relay/pkg/logger"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
-)
-
-// ORM manages the data model for cosmos tx management.
-type ORM struct {
- chainID string
- q pg.Q
-}
-
-// NewORM creates an ORM scoped to chainID.
-func NewORM(chainID string, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *ORM {
- namedLogger := logger.Named(lggr, "Configs")
- q := pg.NewQ(db, namedLogger, cfg)
- return &ORM{
- chainID: chainID,
- q: q,
- }
-}
-
-// InsertMsg inserts a cosmos msg, assumed to be a serialized cosmos ExecuteContractMsg.
-func (o *ORM) InsertMsg(contractID, typeURL string, msg []byte, qopts ...pg.QOpt) (int64, error) {
- var tm adapters.Msg
- q := o.q.WithOpts(qopts...)
- err := q.Get(&tm, `INSERT INTO cosmos_msgs (contract_id, type, raw, state, cosmos_chain_id, created_at, updated_at)
- VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) RETURNING *`, contractID, typeURL, msg, db.Unstarted, o.chainID)
- if err != nil {
- return 0, err
- }
- return tm.ID, nil
-}
-
-// UpdateMsgsContract updates messages for the given contract.
-func (o *ORM) UpdateMsgsContract(contractID string, from, to db.State, qopts ...pg.QOpt) error {
- q := o.q.WithOpts(qopts...)
- _, err := q.Exec(`UPDATE cosmos_msgs SET state = $1, updated_at = NOW()
- WHERE cosmos_chain_id = $2 AND contract_id = $3 AND state = $4`, to, o.chainID, contractID, from)
- if err != nil {
- return err
- }
- return nil
-}
-
-// GetMsgsState returns the oldest messages with a given state up to limit.
-func (o *ORM) GetMsgsState(state db.State, limit int64, qopts ...pg.QOpt) (adapters.Msgs, error) {
- if limit < 1 {
- return adapters.Msgs{}, errors.New("limit must be greater than 0")
- }
- q := o.q.WithOpts(qopts...)
- var msgs adapters.Msgs
- if err := q.Select(&msgs, `SELECT * FROM cosmos_msgs WHERE state = $1 AND cosmos_chain_id = $2 ORDER BY id ASC LIMIT $3`, state, o.chainID, limit); err != nil {
- return nil, err
- }
- return msgs, nil
-}
-
-// GetMsgs returns any messages matching ids.
-func (o *ORM) GetMsgs(ids ...int64) (adapters.Msgs, error) {
- var msgs adapters.Msgs
- if err := o.q.Select(&msgs, `SELECT * FROM cosmos_msgs WHERE id = ANY($1)`, ids); err != nil {
- return nil, err
- }
- return msgs, nil
-}
-
-// UpdateMsgs updates msgs with the given ids.
-// Note state transitions are validated at the db level.
-func (o *ORM) UpdateMsgs(ids []int64, state db.State, txHash *string, qopts ...pg.QOpt) error {
- if state == db.Broadcasted && txHash == nil {
- return errors.New("txHash is required when updating to broadcasted")
- }
- q := o.q.WithOpts(qopts...)
- var res sql.Result
- var err error
- if state == db.Broadcasted {
- res, err = q.Exec(`UPDATE cosmos_msgs SET state = $1, updated_at = NOW(), tx_hash = $2 WHERE id = ANY($3)`, state, *txHash, ids)
- } else {
- res, err = q.Exec(`UPDATE cosmos_msgs SET state = $1, updated_at = NOW() WHERE id = ANY($2)`, state, ids)
- }
- if err != nil {
- return err
- }
- count, err := res.RowsAffected()
- if err != nil {
- return err
- }
- if int(count) != len(ids) {
- return errors.Errorf("expected %d records updated, got %d", len(ids), count)
- }
- return nil
-}
diff --git a/core/chains/cosmos/cosmostxm/orm_test.go b/core/chains/cosmos/cosmostxm/orm_test.go
deleted file mode 100644
index 3cee25bac12..00000000000
--- a/core/chains/cosmos/cosmostxm/orm_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package cosmostxm
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- cosmosdb "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/db"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/cosmostest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
-)
-
-func TestORM(t *testing.T) {
- db := pgtest.NewSqlxDB(t)
- lggr := logger.TestLogger(t)
- logCfg := pgtest.NewQConfig(true)
- chainID := cosmostest.RandomChainID()
- o := NewORM(chainID, db, lggr, logCfg)
-
- // Create
- mid, err := o.InsertMsg("0x123", "", []byte("hello"))
- require.NoError(t, err)
- assert.NotEqual(t, 0, int(mid))
-
- // Read
- unstarted, err := o.GetMsgsState(cosmosdb.Unstarted, 5)
- require.NoError(t, err)
- require.Equal(t, 1, len(unstarted))
- assert.Equal(t, "hello", string(unstarted[0].Raw))
- assert.Equal(t, chainID, unstarted[0].ChainID)
- t.Log(unstarted[0].UpdatedAt, unstarted[0].CreatedAt)
-
- // Limit
- unstarted, err = o.GetMsgsState(cosmosdb.Unstarted, 0)
- assert.Error(t, err)
- assert.Empty(t, unstarted)
- unstarted, err = o.GetMsgsState(cosmosdb.Unstarted, -1)
- assert.Error(t, err)
- assert.Empty(t, unstarted)
- mid2, err := o.InsertMsg("0xabc", "", []byte("test"))
- require.NoError(t, err)
- assert.NotEqual(t, 0, int(mid2))
- unstarted, err = o.GetMsgsState(cosmosdb.Unstarted, 1)
- require.NoError(t, err)
- require.Equal(t, 1, len(unstarted))
- assert.Equal(t, "hello", string(unstarted[0].Raw))
- assert.Equal(t, chainID, unstarted[0].ChainID)
- unstarted, err = o.GetMsgsState(cosmosdb.Unstarted, 2)
- require.NoError(t, err)
- require.Equal(t, 2, len(unstarted))
- assert.Equal(t, "test", string(unstarted[1].Raw))
- assert.Equal(t, chainID, unstarted[1].ChainID)
-
- // Update
- txHash := "123"
- err = o.UpdateMsgs([]int64{mid}, cosmosdb.Started, &txHash)
- require.NoError(t, err)
- err = o.UpdateMsgs([]int64{mid}, cosmosdb.Broadcasted, &txHash)
- require.NoError(t, err)
- broadcasted, err := o.GetMsgsState(cosmosdb.Broadcasted, 5)
- require.NoError(t, err)
- require.Equal(t, 1, len(broadcasted))
- assert.Equal(t, broadcasted[0].Raw, unstarted[0].Raw)
- require.NotNil(t, broadcasted[0].TxHash)
- assert.Equal(t, *broadcasted[0].TxHash, txHash)
- assert.Equal(t, chainID, broadcasted[0].ChainID)
-
- err = o.UpdateMsgs([]int64{mid}, cosmosdb.Confirmed, nil)
- require.NoError(t, err)
- confirmed, err := o.GetMsgsState(cosmosdb.Confirmed, 5)
- require.NoError(t, err)
- require.Equal(t, 1, len(confirmed))
-}
diff --git a/core/chains/cosmos/cosmostxm/txm.go b/core/chains/cosmos/cosmostxm/txm.go
deleted file mode 100644
index 712e1b8fc73..00000000000
--- a/core/chains/cosmos/cosmostxm/txm.go
+++ /dev/null
@@ -1,542 +0,0 @@
-package cosmostxm
-
-import (
- "cmp"
- "context"
- "encoding/hex"
- "fmt"
- "slices"
- "strings"
- "time"
-
- "github.com/gogo/protobuf/proto"
- "github.com/pkg/errors"
-
- "github.com/smartcontractkit/sqlx"
-
- wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types"
- "github.com/cometbft/cometbft/crypto/tmhash"
- sdk "github.com/cosmos/cosmos-sdk/types"
- txtypes "github.com/cosmos/cosmos-sdk/types/tx"
- "github.com/cosmos/cosmos-sdk/x/bank/types"
-
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos"
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/adapters"
- cosmosclient "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/client"
- coscfg "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/config"
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/db"
-
- "github.com/smartcontractkit/chainlink-relay/pkg/logger"
- "github.com/smartcontractkit/chainlink-relay/pkg/loop"
- "github.com/smartcontractkit/chainlink-relay/pkg/services"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-var (
- _ services.Service = (*Txm)(nil)
- _ adapters.TxManager = (*Txm)(nil)
-)
-
-// Txm manages transactions for the cosmos blockchain.
-type Txm struct {
- services.StateMachine
- eb pg.EventBroadcaster
- sub pg.Subscription
- orm *ORM
- lggr logger.Logger
- tc func() (cosmosclient.ReaderWriter, error)
- keystoreAdapter *keystoreAdapter
- stop, done chan struct{}
- cfg coscfg.Config
- gpe cosmosclient.ComposedGasPriceEstimator
-}
-
-// NewTxm creates a txm. Uses simulation so should only be used to send txes to trusted contracts i.e. OCR.
-func NewTxm(db *sqlx.DB, tc func() (cosmosclient.ReaderWriter, error), gpe cosmosclient.ComposedGasPriceEstimator, chainID string, cfg coscfg.Config, ks loop.Keystore, lggr logger.Logger, logCfg pg.QConfig, eb pg.EventBroadcaster) *Txm {
- lggr = logger.Named(lggr, "Txm")
- keystoreAdapter := newKeystoreAdapter(ks, cfg.Bech32Prefix())
- return &Txm{
- eb: eb,
- orm: NewORM(chainID, db, lggr, logCfg),
- lggr: lggr,
- tc: tc,
- keystoreAdapter: keystoreAdapter,
- stop: make(chan struct{}),
- done: make(chan struct{}),
- cfg: cfg,
- gpe: gpe,
- }
-}
-
-// Start subscribes to pg notifications about cosmos msg inserts and processes them.
-func (txm *Txm) Start(context.Context) error {
- return txm.StartOnce("Txm", func() error {
- sub, err := txm.eb.Subscribe(pg.ChannelInsertOnCosmosMsg, "")
- if err != nil {
- return err
- }
- txm.sub = sub
- go txm.run()
- return nil
- })
-}
-
-func (txm *Txm) confirmAnyUnconfirmed(ctx context.Context) {
- // Confirm any broadcasted but not confirmed txes.
- // This is an edge case if we crash after having broadcasted but before we confirm.
- for {
- broadcasted, err := txm.orm.GetMsgsState(db.Broadcasted, txm.cfg.MaxMsgsPerBatch())
- if err != nil {
- // Should never happen but if so, theoretically can retry with a reboot
- logger.Criticalw(txm.lggr, "unable to look for broadcasted but unconfirmed txes", "err", err)
- return
- }
- if len(broadcasted) == 0 {
- return
- }
- tc, err := txm.tc()
- if err != nil {
- logger.Criticalw(txm.lggr, "unable to get client for handling broadcasted but unconfirmed txes", "count", len(broadcasted), "err", err)
- return
- }
- msgsByTxHash := make(map[string]adapters.Msgs)
- for _, msg := range broadcasted {
- msgsByTxHash[*msg.TxHash] = append(msgsByTxHash[*msg.TxHash], msg)
- }
- for txHash, msgs := range msgsByTxHash {
- maxPolls, pollPeriod := txm.confirmPollConfig()
- err := txm.confirmTx(ctx, tc, txHash, msgs.GetIDs(), maxPolls, pollPeriod)
- if err != nil {
- txm.lggr.Errorw("unable to confirm broadcasted but unconfirmed txes", "err", err, "txhash", txHash)
- if ctx.Err() != nil {
- return
- }
- }
- }
- }
-}
-
-func (txm *Txm) run() {
- defer close(txm.done)
- ctx, cancel := utils.StopChan(txm.stop).NewCtx()
- defer cancel()
- txm.confirmAnyUnconfirmed(ctx)
- // Jitter in case we have multiple cosmos chains each with their own client.
- tick := time.After(utils.WithJitter(txm.cfg.BlockRate()))
- for {
- select {
- case <-txm.sub.Events():
- txm.sendMsgBatch(ctx)
- case <-tick:
- txm.sendMsgBatch(ctx)
- tick = time.After(utils.WithJitter(txm.cfg.BlockRate()))
- case <-txm.stop:
- return
- }
- }
-}
-
-var (
- typeMsgSend = sdk.MsgTypeURL(&types.MsgSend{})
- typeMsgExecuteContract = sdk.MsgTypeURL(&wasmtypes.MsgExecuteContract{})
-)
-
-func unmarshalMsg(msgType string, raw []byte) (sdk.Msg, string, error) {
- switch msgType {
- case typeMsgSend:
- var ms types.MsgSend
- err := ms.Unmarshal(raw)
- if err != nil {
- return nil, "", err
- }
- return &ms, ms.FromAddress, nil
- case typeMsgExecuteContract:
- var ms wasmtypes.MsgExecuteContract
- err := ms.Unmarshal(raw)
- if err != nil {
- return nil, "", err
- }
- return &ms, ms.Sender, nil
- }
- return nil, "", errors.Errorf("unrecognized message type: %s", msgType)
-}
-
-type msgValidator struct {
- cutoff time.Time
- expired, valid adapters.Msgs
-}
-
-func (e *msgValidator) add(msg adapters.Msg) {
- if msg.CreatedAt.Before(e.cutoff) {
- e.expired = append(e.expired, msg)
- } else {
- e.valid = append(e.valid, msg)
- }
-}
-
-func (e *msgValidator) sortValid() {
- slices.SortFunc(e.valid, func(a, b adapters.Msg) int {
- ac, bc := a.CreatedAt, b.CreatedAt
- if ac.Equal(bc) {
- return cmp.Compare(a.ID, b.ID)
- }
- if ac.After(bc) {
- return 1
- }
- return -1 // ac.Before(bc)
- })
-}
-
-func (txm *Txm) sendMsgBatch(ctx context.Context) {
- msgs := msgValidator{cutoff: time.Now().Add(-txm.cfg.TxMsgTimeout())}
- err := txm.orm.q.Transaction(func(tx pg.Queryer) error {
- // There may be leftover Started messages after a crash or failed send attempt.
- started, err := txm.orm.GetMsgsState(db.Started, txm.cfg.MaxMsgsPerBatch(), pg.WithQueryer(tx))
- if err != nil {
- txm.lggr.Errorw("unable to read unstarted msgs", "err", err)
- return err
- }
- if limit := txm.cfg.MaxMsgsPerBatch() - int64(len(started)); limit > 0 {
- // Use the remaining batch budget for Unstarted
- unstarted, err := txm.orm.GetMsgsState(db.Unstarted, limit, pg.WithQueryer(tx)) //nolint
- if err != nil {
- txm.lggr.Errorw("unable to read unstarted msgs", "err", err)
- return err
- }
- for _, msg := range unstarted {
- msgs.add(msg)
- }
- // Update valid, Unstarted messages to Started
- err = txm.orm.UpdateMsgs(msgs.valid.GetIDs(), db.Started, nil, pg.WithQueryer(tx))
- if err != nil {
- // Assume transient db error retry
- txm.lggr.Errorw("unable to mark unstarted txes as started", "err", err)
- return err
- }
- }
- for _, msg := range started {
- msgs.add(msg)
- }
- // Update expired messages (Unstarted or Started) to Errored
- err = txm.orm.UpdateMsgs(msgs.expired.GetIDs(), db.Errored, nil, pg.WithQueryer(tx))
- if err != nil {
- // Assume transient db error retry
- txm.lggr.Errorw("unable to mark expired txes as errored", "err", err)
- return err
- }
- return nil
- })
- if err != nil {
- return
- }
- if len(msgs.valid) == 0 {
- return
- }
- msgs.sortValid()
- txm.lggr.Debugw("building a batch", "not expired", msgs.valid, "marked expired", msgs.expired)
- var msgsByFrom = make(map[string]adapters.Msgs)
- for _, m := range msgs.valid {
- msg, sender, err2 := unmarshalMsg(m.Type, m.Raw)
- if err2 != nil {
- // Should be impossible given the check in Enqueue
- logger.Criticalw(txm.lggr, "Failed to unmarshal msg, skipping", "err", err2, "msg", m)
- continue
- }
- m.DecodedMsg = msg
- _, err2 = sdk.AccAddressFromBech32(sender)
- if err2 != nil {
- // Should never happen, we parse sender on Enqueue
- logger.Criticalw(txm.lggr, "Unable to parse sender", "err", err2, "sender", sender)
- continue
- }
- msgsByFrom[sender] = append(msgsByFrom[sender], m)
- }
-
- txm.lggr.Debugw("msgsByFrom", "msgsByFrom", msgsByFrom)
- gasPrice, err := txm.GasPrice()
- if err != nil {
- // Should be impossible
- logger.Criticalw(txm.lggr, "Failed to get gas price", "err", err)
- return
- }
- for s, msgs := range msgsByFrom {
- sender, _ := sdk.AccAddressFromBech32(s) // Already checked validity above
- err := txm.sendMsgBatchFromAddress(ctx, gasPrice, sender, msgs)
- if err != nil {
- txm.lggr.Errorw("Could not send message batch", "err", err, "from", sender.String())
- continue
- }
- if ctx.Err() != nil {
- return
- }
- }
-
-}
-
-func (txm *Txm) sendMsgBatchFromAddress(ctx context.Context, gasPrice sdk.DecCoin, sender sdk.AccAddress, msgs adapters.Msgs) error {
- tc, err := txm.tc()
- if err != nil {
- logger.Criticalw(txm.lggr, "unable to get client", "err", err)
- return err
- }
- an, sn, err := tc.Account(sender)
- if err != nil {
- txm.lggr.Warnw("unable to read account", "err", err, "from", sender.String())
- // If we can't read the account, assume transient api issues and leave msgs unstarted
- // to retry on next poll.
- return err
- }
-
- txm.lggr.Debugw("simulating batch", "from", sender, "msgs", msgs, "seqnum", sn)
- simResults, err := tc.BatchSimulateUnsigned(msgs.GetSimMsgs(), sn)
- if err != nil {
- txm.lggr.Warnw("unable to simulate", "err", err, "from", sender.String())
- // If we can't simulate assume transient api issue and retry on next poll.
- // Note one rare scenario in which this can happen: the cosmos node misbehaves
- // in that it confirms a txhash is present but still gives an old seq num.
- // This is benign as the next retry will succeeds.
- return err
- }
- txm.lggr.Debugw("simulation results", "from", sender, "succeeded", simResults.Succeeded, "failed", simResults.Failed)
- err = txm.orm.UpdateMsgs(simResults.Failed.GetSimMsgsIDs(), db.Errored, nil)
- if err != nil {
- txm.lggr.Errorw("unable to mark failed sim txes as errored", "err", err, "from", sender.String())
- // If we can't mark them as failed retry on next poll. Presumably same ones will fail.
- return err
- }
-
- // Continue if there are no successful txes
- if len(simResults.Succeeded) == 0 {
- txm.lggr.Warnw("all sim msgs errored, not sending tx", "from", sender.String())
- return errors.New("all sim msgs errored")
- }
- // Get the gas limit for the successful batch
- s, err := tc.SimulateUnsigned(simResults.Succeeded.GetMsgs(), sn)
- if err != nil {
- // In the OCR context this should only happen upon stale report
- txm.lggr.Warnw("unexpected failure after successful simulation", "err", err)
- return err
- }
- gasLimit := s.GasInfo.GasUsed
-
- lb, err := tc.LatestBlock()
- if err != nil {
- txm.lggr.Warnw("unable to get latest block", "err", err, "from", sender.String())
- // Assume transient api issue and retry.
- return err
- }
- header, timeout := lb.SdkBlock.Header.Height, txm.cfg.BlocksUntilTxTimeout()
- if header < 0 {
- return fmt.Errorf("invalid negative header height: %d", header)
- } else if timeout < 0 {
- return fmt.Errorf("invalid negative blocks until tx timeout: %d", timeout)
- }
- timeoutHeight := uint64(header) + uint64(timeout)
- signedTx, err := tc.CreateAndSign(simResults.Succeeded.GetMsgs(), an, sn, gasLimit, txm.cfg.GasLimitMultiplier(),
- gasPrice, NewKeyWrapper(txm.keystoreAdapter, sender.String()), timeoutHeight)
- if err != nil {
- txm.lggr.Errorw("unable to sign tx", "err", err, "from", sender.String())
- return err
- }
-
- // We need to ensure that we either broadcast successfully and mark the tx as
- // broadcasted OR we do not broadcast successfully and we do not mark it as broadcasted.
- // We do this by first marking it broadcasted then rolling back if the broadcast api call fails.
- // There is still a small chance of network failure or node/db crash after broadcasting but before committing the tx,
- // in which case the msgs would be picked up again and re-broadcast, ensuring at-least once delivery.
- var resp *txtypes.BroadcastTxResponse
- err = txm.orm.q.Transaction(func(tx pg.Queryer) error {
- txHash := strings.ToUpper(hex.EncodeToString(tmhash.Sum(signedTx)))
- err = txm.orm.UpdateMsgs(simResults.Succeeded.GetSimMsgsIDs(), db.Broadcasted, &txHash, pg.WithQueryer(tx))
- if err != nil {
- return err
- }
-
- txm.lggr.Infow("broadcasting tx", "from", sender, "msgs", simResults.Succeeded, "gasLimit", gasLimit, "gasPrice", gasPrice.String(), "timeoutHeight", timeoutHeight, "hash", txHash)
- resp, err = tc.Broadcast(signedTx, txtypes.BroadcastMode_BROADCAST_MODE_SYNC)
- if err != nil {
- // Rollback marking as broadcasted
- // Note can happen if the node's mempool is full, where we expect errCode 20.
- return err
- }
- if resp.TxResponse == nil {
- // Rollback marking as broadcasted
- return errors.New("unexpected nil tx response")
- }
- if resp.TxResponse.TxHash != txHash {
- // Should never happen
- logger.Criticalw(txm.lggr, "txhash mismatch", "got", resp.TxResponse.TxHash, "want", txHash)
- }
- return nil
- })
- if err != nil {
- txm.lggr.Errorw("error broadcasting tx", "err", err, "from", sender.String())
- // Was unable to broadcast, retry on next poll
- return err
- }
-
- maxPolls, pollPeriod := txm.confirmPollConfig()
- if err := txm.confirmTx(ctx, tc, resp.TxResponse.TxHash, simResults.Succeeded.GetSimMsgsIDs(), maxPolls, pollPeriod); err != nil {
- txm.lggr.Errorw("error confirming tx", "err", err, "hash", resp.TxResponse.TxHash)
- return err
- }
-
- return nil
-}
-
-func (txm *Txm) confirmPollConfig() (maxPolls int, pollPeriod time.Duration) {
- blocks := txm.cfg.BlocksUntilTxTimeout()
- blockPeriod := txm.cfg.BlockRate()
- pollPeriod = txm.cfg.ConfirmPollPeriod()
- if pollPeriod == 0 {
- // don't divide by zero
- maxPolls = 1
- } else {
- maxPolls = int((time.Duration(blocks) * blockPeriod) / pollPeriod)
- }
- return
-}
-
-func (txm *Txm) confirmTx(ctx context.Context, tc cosmosclient.Reader, txHash string, broadcasted []int64, maxPolls int, pollPeriod time.Duration) error {
- // We either mark these broadcasted txes as confirmed or errored.
- // Confirmed: we see the txhash onchain. There are no reorgs in cosmos chains.
- // Errored: we do not see the txhash onchain after waiting for N blocks worth
- // of time (plus a small buffer to account for block time variance) where N
- // is TimeoutHeight - HeightAtBroadcast. In other words, if we wait for that long
- // and the tx is not confirmed, we know it has timed out.
- for tries := 0; tries < maxPolls; tries++ {
- // Jitter in-case we're confirming multiple txes in parallel for different keys
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-time.After(utils.WithJitter(pollPeriod)):
- }
- // Confirm that this tx is onchain, ensuring the sequence number has incremented
- // so we can build a new batch
- tx, err := tc.Tx(txHash)
- if err != nil {
- if strings.Contains(err.Error(), "not found") {
- txm.lggr.Infow("txhash not found yet, still confirming", "hash", txHash)
- } else {
- txm.lggr.Errorw("error looking for hash of tx", "err", err, "hash", txHash)
- }
- continue
- }
- // Sanity check
- if tx.TxResponse == nil || tx.TxResponse.TxHash != txHash {
- txm.lggr.Errorw("error looking for hash of tx, unexpected response", "tx", tx, "hash", txHash)
- continue
- }
-
- txm.lggr.Infow("successfully sent batch", "hash", txHash, "msgs", broadcasted)
- // If confirmed mark these as completed.
- err = txm.orm.UpdateMsgs(broadcasted, db.Confirmed, nil)
- if err != nil {
- return err
- }
- return nil
- }
- txm.lggr.Errorw("unable to confirm tx after timeout period, marking errored", "hash", txHash)
- // If we are unable to confirm the tx after the timeout period
- // mark these msgs as errored
- err := txm.orm.UpdateMsgs(broadcasted, db.Errored, nil)
- if err != nil {
- txm.lggr.Errorw("unable to mark timed out txes as errored", "err", err, "txes", broadcasted, "num", len(broadcasted))
- return err
- }
- return nil
-}
-
-// Enqueue enqueue a msg destined for the cosmos chain.
-func (txm *Txm) Enqueue(contractID string, msg sdk.Msg) (int64, error) {
- typeURL, raw, err := txm.marshalMsg(msg)
- if err != nil {
- return 0, err
- }
-
- // We could consider simulating here too, but that would
- // introduce another network call and essentially double
- // the enqueue time. Enqueue is used in the context of OCRs Transmit
- // and must be fast, so we do the minimum.
-
- var id int64
- err = txm.orm.q.Transaction(func(tx pg.Queryer) (err error) {
- // cancel any unstarted msgs (normally just one)
- err = txm.orm.UpdateMsgsContract(contractID, db.Unstarted, db.Errored, pg.WithQueryer(tx))
- if err != nil {
- return err
- }
- id, err = txm.orm.InsertMsg(contractID, typeURL, raw, pg.WithQueryer(tx))
- return err
- })
- return id, err
-}
-
-func (txm *Txm) marshalMsg(msg sdk.Msg) (string, []byte, error) {
- switch ms := msg.(type) {
- case *wasmtypes.MsgExecuteContract:
- _, err := sdk.AccAddressFromBech32(ms.Sender)
- if err != nil {
- txm.lggr.Errorw("failed to parse sender, skipping", "err", err, "sender", ms.Sender)
- return "", nil, err
- }
-
- case *types.MsgSend:
- _, err := sdk.AccAddressFromBech32(ms.FromAddress)
- if err != nil {
- txm.lggr.Errorw("failed to parse sender, skipping", "err", err, "sender", ms.FromAddress)
- return "", nil, err
- }
-
- default:
- return "", nil, &cosmos.ErrMsgUnsupported{Msg: msg}
- }
- typeURL := sdk.MsgTypeURL(msg)
- raw, err := proto.Marshal(msg)
- if err != nil {
- txm.lggr.Errorw("failed to marshal msg, skipping", "err", err, "msg", msg)
- return "", nil, err
- }
- return typeURL, raw, nil
-}
-
-// GetMsgs returns any messages matching ids.
-func (txm *Txm) GetMsgs(ids ...int64) (adapters.Msgs, error) {
- return txm.orm.GetMsgs(ids...)
-}
-
-// GasPrice returns the gas price from the estimator in the configured fee token.
-func (txm *Txm) GasPrice() (sdk.DecCoin, error) {
- prices := txm.gpe.GasPrices()
- gasPrice, ok := prices[txm.cfg.GasToken()]
- if !ok {
- return sdk.DecCoin{}, errors.New("unexpected empty gas price")
- }
- return gasPrice, nil
-}
-
-// Close close service
-func (txm *Txm) Close() error {
- return txm.StopOnce("Txm", func() error {
- txm.sub.Close()
- close(txm.stop)
- <-txm.done
- return nil
- })
-}
-
-func (txm *Txm) Name() string { return txm.lggr.Name() }
-
-// Healthy service is healthy
-func (txm *Txm) Healthy() error {
- return nil
-}
-
-// Ready service is ready
-func (txm *Txm) Ready() error {
- return nil
-}
-
-func (txm *Txm) HealthReport() map[string]error { return map[string]error{txm.Name(): txm.Healthy()} }
diff --git a/core/chains/cosmos/cosmostxm/txm_internal_test.go b/core/chains/cosmos/cosmostxm/txm_internal_test.go
deleted file mode 100644
index f29f130cae4..00000000000
--- a/core/chains/cosmos/cosmostxm/txm_internal_test.go
+++ /dev/null
@@ -1,426 +0,0 @@
-package cosmostxm
-
-import (
- "fmt"
- "testing"
- "time"
-
- wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types"
- tmservicetypes "github.com/cosmos/cosmos-sdk/client/grpc/tmservice"
- cosmostypes "github.com/cosmos/cosmos-sdk/types"
- txtypes "github.com/cosmos/cosmos-sdk/types/tx"
- "github.com/pkg/errors"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/mock"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
-
- cosmosclient "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/client"
- tcmocks "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/client/mocks"
- coscfg "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/config"
- cosmosdb "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/db"
- relayutils "github.com/smartcontractkit/chainlink-relay/pkg/utils"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/cosmostest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-func generateExecuteMsg(msg []byte, from, to cosmostypes.AccAddress) cosmostypes.Msg {
- return &wasmtypes.MsgExecuteContract{
- Sender: from.String(),
- Contract: to.String(),
- Msg: msg,
- Funds: cosmostypes.Coins{},
- }
-}
-
-func newReaderWriterMock(t *testing.T) *tcmocks.ReaderWriter {
- tc := new(tcmocks.ReaderWriter)
- tc.Test(t)
- t.Cleanup(func() { tc.AssertExpectations(t) })
- return tc
-}
-
-func TestTxm(t *testing.T) {
- db := pgtest.NewSqlxDB(t)
- lggr := testutils.LoggerAssertMaxLevel(t, zapcore.ErrorLevel)
- ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, pgtest.NewQConfig(true))
- require.NoError(t, ks.Unlock("blah"))
-
- for i := 0; i < 4; i++ {
- _, err := ks.Cosmos().Create()
- require.NoError(t, err)
- }
-
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- adapter := newKeystoreAdapter(loopKs, "wasm")
- accounts, err := adapter.Accounts()
- require.NoError(t, err)
- require.Equal(t, len(accounts), 4)
-
- sender1, err := cosmostypes.AccAddressFromBech32(accounts[0])
- require.NoError(t, err)
- sender2, err := cosmostypes.AccAddressFromBech32(accounts[1])
- require.NoError(t, err)
- contract, err := cosmostypes.AccAddressFromBech32(accounts[2])
- require.NoError(t, err)
- contract2, err := cosmostypes.AccAddressFromBech32(accounts[3])
- require.NoError(t, err)
-
- logCfg := pgtest.NewQConfig(true)
- chainID := cosmostest.RandomChainID()
- two := int64(2)
- gasToken := "ucosm"
- cfg := &coscfg.TOMLConfig{Chain: coscfg.Chain{
- MaxMsgsPerBatch: &two,
- GasToken: &gasToken,
- }}
- cfg.SetDefaults()
- gpe := cosmosclient.NewMustGasPriceEstimator([]cosmosclient.GasPricesEstimator{
- cosmosclient.NewFixedGasPriceEstimator(map[string]cosmostypes.DecCoin{
- cfg.GasToken(): cosmostypes.NewDecCoinFromDec(cfg.GasToken(), cosmostypes.MustNewDecFromStr("0.01")),
- },
- lggr.(logger.SugaredLogger),
- ),
- }, lggr)
-
- t.Run("single msg", func(t *testing.T) {
- tc := newReaderWriterMock(t)
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfg, loopKs, lggr, logCfg, nil)
-
- // Enqueue a single msg, then send it in a batch
- id1, err := txm.Enqueue(contract.String(), generateExecuteMsg([]byte(`1`), sender1, contract))
- require.NoError(t, err)
- tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil)
- tc.On("BatchSimulateUnsigned", mock.Anything, mock.Anything).Return(&cosmosclient.BatchSimResults{
- Failed: nil,
- Succeeded: cosmosclient.SimMsgs{{ID: id1, Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender1.String(),
- Msg: []byte(`1`),
- }}},
- }, nil)
- tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{
- GasUsed: 1_000_000,
- }}, nil)
- tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{SdkBlock: &tmservicetypes.Block{
- Header: tmservicetypes.Header{Height: 1},
- }}, nil)
- tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil)
-
- txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"}
- tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil)
- tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil)
- txm.sendMsgBatch(testutils.Context(t))
-
- // Should be in completed state
- completed, err := txm.orm.GetMsgs(id1)
- require.NoError(t, err)
- require.Equal(t, 1, len(completed))
- assert.Equal(t, completed[0].State, cosmosdb.Confirmed)
- })
-
- t.Run("two msgs different accounts", func(t *testing.T) {
- tc := newReaderWriterMock(t)
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfg, loopKs, lggr, pgtest.NewQConfig(true), nil)
-
- id1, err := txm.Enqueue(contract.String(), generateExecuteMsg([]byte(`0`), sender1, contract))
- require.NoError(t, err)
- id2, err := txm.Enqueue(contract.String(), generateExecuteMsg([]byte(`1`), sender2, contract))
- require.NoError(t, err)
-
- tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil).Once()
- // Note this must be arg dependent, we don't know which order
- // the procesing will happen in (map iteration by from address).
- tc.On("BatchSimulateUnsigned", cosmosclient.SimMsgs{
- {
- ID: id2,
- Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender2.String(),
- Msg: []byte(`1`),
- Contract: contract.String(),
- },
- },
- }, mock.Anything).Return(&cosmosclient.BatchSimResults{
- Failed: nil,
- Succeeded: cosmosclient.SimMsgs{
- {
- ID: id2,
- Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender2.String(),
- Msg: []byte(`1`),
- Contract: contract.String(),
- },
- },
- },
- }, nil).Once()
- tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{
- GasUsed: 1_000_000,
- }}, nil).Once()
- tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{SdkBlock: &tmservicetypes.Block{
- Header: tmservicetypes.Header{Height: 1},
- }}, nil).Once()
- tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil).Once()
- txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"}
- tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil).Once()
- tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil).Once()
- txm.sendMsgBatch(testutils.Context(t))
-
- // Should be in completed state
- completed, err := txm.orm.GetMsgs(id1, id2)
- require.NoError(t, err)
- require.Equal(t, 2, len(completed))
- assert.Equal(t, cosmosdb.Errored, completed[0].State) // cancelled
- assert.Equal(t, cosmosdb.Confirmed, completed[1].State)
- })
-
- t.Run("two msgs different contracts", func(t *testing.T) {
- tc := newReaderWriterMock(t)
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfg, loopKs, lggr, pgtest.NewQConfig(true), nil)
-
- id1, err := txm.Enqueue(contract.String(), generateExecuteMsg([]byte(`0`), sender1, contract))
- require.NoError(t, err)
- id2, err := txm.Enqueue(contract2.String(), generateExecuteMsg([]byte(`1`), sender2, contract2))
- require.NoError(t, err)
- ids := []int64{id1, id2}
- senders := []string{sender1.String(), sender2.String()}
- contracts := []string{contract.String(), contract2.String()}
- for i := 0; i < 2; i++ {
- tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil).Once()
- // Note this must be arg dependent, we don't know which order
- // the procesing will happen in (map iteration by from address).
- tc.On("BatchSimulateUnsigned", cosmosclient.SimMsgs{
- {
- ID: ids[i],
- Msg: &wasmtypes.MsgExecuteContract{
- Sender: senders[i],
- Msg: []byte(fmt.Sprintf(`%d`, i)),
- Contract: contracts[i],
- },
- },
- }, mock.Anything).Return(&cosmosclient.BatchSimResults{
- Failed: nil,
- Succeeded: cosmosclient.SimMsgs{
- {
- ID: ids[i],
- Msg: &wasmtypes.MsgExecuteContract{
- Sender: senders[i],
- Msg: []byte(fmt.Sprintf(`%d`, i)),
- Contract: contracts[i],
- },
- },
- },
- }, nil).Once()
- tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{
- GasUsed: 1_000_000,
- }}, nil).Once()
- tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{SdkBlock: &tmservicetypes.Block{
- Header: tmservicetypes.Header{Height: 1},
- }}, nil).Once()
- tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil).Once()
- }
- txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"}
- tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil).Twice()
- tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil).Twice()
- txm.sendMsgBatch(testutils.Context(t))
-
- // Should be in completed state
- completed, err := txm.orm.GetMsgs(id1, id2)
- require.NoError(t, err)
- require.Equal(t, 2, len(completed))
- assert.Equal(t, cosmosdb.Confirmed, completed[0].State)
- assert.Equal(t, cosmosdb.Confirmed, completed[1].State)
- })
-
- t.Run("failed to confirm", func(t *testing.T) {
- tc := newReaderWriterMock(t)
- tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{
- Tx: &txtypes.Tx{},
- TxResponse: &cosmostypes.TxResponse{TxHash: "0x123"},
- }, errors.New("not found")).Twice()
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfg, loopKs, lggr, pgtest.NewQConfig(true), nil)
- i, err := txm.orm.InsertMsg("blah", "", []byte{0x01})
- require.NoError(t, err)
- txh := "0x123"
- require.NoError(t, txm.orm.UpdateMsgs([]int64{i}, cosmosdb.Started, &txh))
- require.NoError(t, txm.orm.UpdateMsgs([]int64{i}, cosmosdb.Broadcasted, &txh))
- err = txm.confirmTx(testutils.Context(t), tc, txh, []int64{i}, 2, 1*time.Millisecond)
- require.NoError(t, err)
- m, err := txm.orm.GetMsgs(i)
- require.NoError(t, err)
- require.Equal(t, 1, len(m))
- assert.Equal(t, cosmosdb.Errored, m[0].State)
- })
-
- t.Run("confirm any unconfirmed", func(t *testing.T) {
- require.Equal(t, int64(2), cfg.MaxMsgsPerBatch())
- txHash1 := "0x1234"
- txHash2 := "0x1235"
- txHash3 := "0xabcd"
- tc := newReaderWriterMock(t)
- tc.On("Tx", txHash1).Return(&txtypes.GetTxResponse{
- TxResponse: &cosmostypes.TxResponse{TxHash: txHash1},
- }, nil).Once()
- tc.On("Tx", txHash2).Return(&txtypes.GetTxResponse{
- TxResponse: &cosmostypes.TxResponse{TxHash: txHash2},
- }, nil).Once()
- tc.On("Tx", txHash3).Return(&txtypes.GetTxResponse{
- TxResponse: &cosmostypes.TxResponse{TxHash: txHash3},
- }, nil).Once()
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfg, loopKs, lggr, pgtest.NewQConfig(true), nil)
-
- // Insert and broadcast 3 msgs with different txhashes.
- id1, err := txm.orm.InsertMsg("blah", "", []byte{0x01})
- require.NoError(t, err)
- id2, err := txm.orm.InsertMsg("blah", "", []byte{0x02})
- require.NoError(t, err)
- id3, err := txm.orm.InsertMsg("blah", "", []byte{0x03})
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id1}, cosmosdb.Started, &txHash1)
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id2}, cosmosdb.Started, &txHash2)
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id3}, cosmosdb.Started, &txHash3)
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id1}, cosmosdb.Broadcasted, &txHash1)
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id2}, cosmosdb.Broadcasted, &txHash2)
- require.NoError(t, err)
- err = txm.orm.UpdateMsgs([]int64{id3}, cosmosdb.Broadcasted, &txHash3)
- require.NoError(t, err)
-
- // Confirm them as in a restart while confirming scenario
- txm.confirmAnyUnconfirmed(testutils.Context(t))
- msgs, err := txm.orm.GetMsgs(id1, id2, id3)
- require.NoError(t, err)
- require.Equal(t, 3, len(msgs))
- assert.Equal(t, cosmosdb.Confirmed, msgs[0].State)
- assert.Equal(t, cosmosdb.Confirmed, msgs[1].State)
- assert.Equal(t, cosmosdb.Confirmed, msgs[2].State)
- })
-
- t.Run("expired msgs", func(t *testing.T) {
- tc := new(tcmocks.ReaderWriter)
- timeout, err := relayutils.NewDuration(1 * time.Millisecond)
- require.NoError(t, err)
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- two := int64(2)
- cfgShortExpiry := &coscfg.TOMLConfig{Chain: coscfg.Chain{
- MaxMsgsPerBatch: &two,
- TxMsgTimeout: &timeout,
- }}
- cfgShortExpiry.SetDefaults()
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfgShortExpiry, loopKs, lggr, pgtest.NewQConfig(true), nil)
-
- // Send a single one expired
- id1, err := txm.orm.InsertMsg("blah", "", []byte{0x03})
- require.NoError(t, err)
- time.Sleep(1 * time.Millisecond)
- txm.sendMsgBatch(testutils.Context(t))
- // Should be marked errored
- m, err := txm.orm.GetMsgs(id1)
- require.NoError(t, err)
- assert.Equal(t, cosmosdb.Errored, m[0].State)
-
- // Send a batch which is all expired
- id2, err := txm.orm.InsertMsg("blah", "", []byte{0x03})
- require.NoError(t, err)
- id3, err := txm.orm.InsertMsg("blah", "", []byte{0x03})
- require.NoError(t, err)
- time.Sleep(1 * time.Millisecond)
- txm.sendMsgBatch(testutils.Context(t))
- require.NoError(t, err)
- ms, err := txm.orm.GetMsgs(id2, id3)
- require.NoError(t, err)
- assert.Equal(t, cosmosdb.Errored, ms[0].State)
- assert.Equal(t, cosmosdb.Errored, ms[1].State)
- })
-
- t.Run("started msgs", func(t *testing.T) {
- tc := new(tcmocks.ReaderWriter)
- tc.On("Account", mock.Anything).Return(uint64(0), uint64(0), nil)
- tc.On("SimulateUnsigned", mock.Anything, mock.Anything).Return(&txtypes.SimulateResponse{GasInfo: &cosmostypes.GasInfo{
- GasUsed: 1_000_000,
- }}, nil)
- tc.On("LatestBlock").Return(&tmservicetypes.GetLatestBlockResponse{SdkBlock: &tmservicetypes.Block{
- Header: tmservicetypes.Header{Height: 1},
- }}, nil)
- tc.On("CreateAndSign", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]byte{0x01}, nil)
- txResp := &cosmostypes.TxResponse{TxHash: "4BF5122F344554C53BDE2EBB8CD2B7E3D1600AD631C385A5D7CCE23C7785459A"}
- tc.On("Broadcast", mock.Anything, mock.Anything).Return(&txtypes.BroadcastTxResponse{TxResponse: txResp}, nil)
- tc.On("Tx", mock.Anything).Return(&txtypes.GetTxResponse{Tx: &txtypes.Tx{}, TxResponse: txResp}, nil)
- tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
- two := int64(2)
- cfgMaxMsgs := &coscfg.TOMLConfig{Chain: coscfg.Chain{
- MaxMsgsPerBatch: &two,
- }}
- cfgMaxMsgs.SetDefaults()
- loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
- txm := NewTxm(db, tcFn, *gpe, chainID, cfgMaxMsgs, loopKs, lggr, pgtest.NewQConfig(true), nil)
-
- // Leftover started is processed
- msg1 := generateExecuteMsg([]byte{0x03}, sender1, contract)
- id1 := mustInsertMsg(t, txm, contract.String(), msg1)
- require.NoError(t, txm.orm.UpdateMsgs([]int64{id1}, cosmosdb.Started, nil))
- msgs := cosmosclient.SimMsgs{{ID: id1, Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender1.String(),
- Msg: []byte{0x03},
- Contract: contract.String(),
- }}}
- tc.On("BatchSimulateUnsigned", msgs, mock.Anything).
- Return(&cosmosclient.BatchSimResults{Failed: nil, Succeeded: msgs}, nil).Once()
- time.Sleep(1 * time.Millisecond)
- txm.sendMsgBatch(testutils.Context(t))
- m, err := txm.orm.GetMsgs(id1)
- require.NoError(t, err)
- assert.Equal(t, cosmosdb.Confirmed, m[0].State)
-
- // Leftover started is not cancelled
- msg2 := generateExecuteMsg([]byte{0x04}, sender1, contract)
- msg3 := generateExecuteMsg([]byte{0x05}, sender1, contract)
- id2 := mustInsertMsg(t, txm, contract.String(), msg2)
- require.NoError(t, txm.orm.UpdateMsgs([]int64{id2}, cosmosdb.Started, nil))
- time.Sleep(time.Millisecond) // ensure != CreatedAt
- id3 := mustInsertMsg(t, txm, contract.String(), msg3)
- msgs = cosmosclient.SimMsgs{{ID: id2, Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender1.String(),
- Msg: []byte{0x04},
- Contract: contract.String(),
- }}, {ID: id3, Msg: &wasmtypes.MsgExecuteContract{
- Sender: sender1.String(),
- Msg: []byte{0x05},
- Contract: contract.String(),
- }}}
- tc.On("BatchSimulateUnsigned", msgs, mock.Anything).
- Return(&cosmosclient.BatchSimResults{Failed: nil, Succeeded: msgs}, nil).Once()
- time.Sleep(1 * time.Millisecond)
- txm.sendMsgBatch(testutils.Context(t))
- require.NoError(t, err)
- ms, err := txm.orm.GetMsgs(id2, id3)
- require.NoError(t, err)
- assert.Equal(t, cosmosdb.Confirmed, ms[0].State)
- assert.Equal(t, cosmosdb.Confirmed, ms[1].State)
- })
-}
-
-func mustInsertMsg(t *testing.T, txm *Txm, contractID string, msg cosmostypes.Msg) int64 {
- typeURL, raw, err := txm.marshalMsg(msg)
- require.NoError(t, err)
- id, err := txm.orm.InsertMsg(contractID, typeURL, raw)
- require.NoError(t, err)
- return id
-}
diff --git a/core/chains/cosmos/cosmostxm/txm_test.go b/core/chains/cosmos/cosmostxm/txm_test.go
deleted file mode 100644
index 25ac9e8d9ec..00000000000
--- a/core/chains/cosmos/cosmostxm/txm_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-//go:build integration
-
-package cosmostxm_test
-
-// TestTxm_Integration is disabled in order to be moved to chainlink-cosmos before DB testing is available
-//func TestTxm_Integration(t *testing.T) {
-// chainID := cosmostest.RandomChainID()
-// cosmosChain := coscfg.Chain{}
-// cosmosChain.SetDefaults()
-// fallbackGasPrice := sdk.NewDecCoinFromDec(*cosmosChain.GasToken, sdk.MustNewDecFromStr("0.01"))
-// chainConfig := cosmos.CosmosConfig{ChainID: &chainID, Enabled: ptr(true), Chain: cosmosChain}
-// cfg, db := heavyweight.FullTestDBNoFixturesV2(t, "cosmos_txm", func(c *chainlink.Config, s *chainlink.Secrets) {
-// c.Cosmos = cosmos.CosmosConfigs{&chainConfig}
-// })
-// lggr := logger.TestLogger(t)
-// logCfg := pgtest.NewQConfig(true)
-// gpe := cosmosclient.NewMustGasPriceEstimator([]cosmosclient.GasPricesEstimator{
-// cosmosclient.NewFixedGasPriceEstimator(map[string]sdk.DecCoin{
-// *cosmosChain.GasToken: fallbackGasPrice,
-// },
-// lggr.(logger.SugaredLogger),
-// ),
-// }, lggr)
-// orm := cosmostxm.NewORM(chainID, db, lggr, logCfg)
-// eb := pg.NewEventBroadcaster(cfg.Database().URL(), 0, 0, lggr, uuid.New())
-// require.NoError(t, eb.Start(testutils.Context(t)))
-// t.Cleanup(func() { require.NoError(t, eb.Close()) })
-// ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, pgtest.NewQConfig(true))
-// zeConfig := sdk.GetConfig()
-// fmt.Println(zeConfig)
-// accounts, testdir, tendermintURL := cosmosclient.SetupLocalCosmosNode(t, chainID, *cosmosChain.GasToken)
-// tc, err := cosmosclient.NewClient(chainID, tendermintURL, 0, lggr)
-// require.NoError(t, err)
-//
-// loopKs := &keystore.CosmosLoopKeystore{Cosmos: ks.Cosmos()}
-// keystoreAdapter := cosmostxm.NewKeystoreAdapter(loopKs, *cosmosChain.Bech32Prefix)
-//
-// // First create a transmitter key and fund it with 1k native tokens
-// require.NoError(t, ks.Unlock("blah"))
-// err = ks.Cosmos().EnsureKey()
-// require.NoError(t, err)
-// ksAccounts, err := keystoreAdapter.Accounts()
-// require.NoError(t, err)
-// transmitterAddress := ksAccounts[0]
-// transmitterID, err := sdk.AccAddressFromBech32(transmitterAddress)
-// require.NoError(t, err)
-// an, sn, err := tc.Account(accounts[0].Address)
-// require.NoError(t, err)
-// resp, err := tc.SignAndBroadcast([]sdk.Msg{banktypes.NewMsgSend(accounts[0].Address, transmitterID, sdk.NewCoins(sdk.NewInt64Coin(*cosmosChain.GasToken, 100000)))},
-// an, sn, gpe.GasPrices()[*cosmosChain.GasToken], accounts[0].PrivateKey, txtypes.BroadcastMode_BROADCAST_MODE_SYNC)
-// tx, success := cosmosclient.AwaitTxCommitted(t, tc, resp.TxResponse.TxHash)
-// require.True(t, success)
-// require.Equal(t, types.CodeTypeOK, tx.TxResponse.Code)
-// require.NoError(t, err)
-//
-// // TODO: find a way to pull this test artifact from
-// // the chainlink-cosmos repo instead of copying it to cores testdata
-// contractID := cosmosclient.DeployTestContract(t, tendermintURL, chainID, *cosmosChain.GasToken, accounts[0], cosmosclient.Account{
-// Name: "transmitter",
-// PrivateKey: cosmostxm.NewKeyWrapper(keystoreAdapter, transmitterAddress),
-// Address: transmitterID,
-// }, tc, testdir, "../../../testdata/cosmos/my_first_contract.wasm")
-//
-// tcFn := func() (cosmosclient.ReaderWriter, error) { return tc, nil }
-// // Start txm
-// txm := cosmostxm.NewTxm(db, tcFn, *gpe, chainID, &chainConfig, loopKs, lggr, pgtest.NewQConfig(true), eb)
-// require.NoError(t, txm.Start(testutils.Context(t)))
-//
-// // Change the contract state
-// setMsg := &wasmtypes.MsgExecuteContract{
-// Sender: transmitterID.String(),
-// Contract: contractID.String(),
-// Msg: []byte(`{"reset":{"count":5}}`),
-// Funds: sdk.Coins{},
-// }
-// _, err = txm.Enqueue(contractID.String(), setMsg)
-// require.NoError(t, err)
-//
-// // Observe the counter gets set eventually
-// gomega.NewWithT(t).Eventually(func() bool {
-// d, err := tc.ContractState(contractID, []byte(`{"get_count":{}}`))
-// require.NoError(t, err)
-// t.Log("contract value", string(d))
-// return string(d) == `{"count":5}`
-// }, 20*time.Second, time.Second).Should(gomega.BeTrue())
-// // Ensure messages are completed
-// gomega.NewWithT(t).Eventually(func() bool {
-// msgs, err := orm.GetMsgsState(Confirmed, 5)
-// require.NoError(t, err)
-// return 1 == len(msgs)
-// }, 5*time.Second, time.Second).Should(gomega.BeTrue())
-//
-// // Ensure invalid msgs are marked as errored
-// invalidMsg := &wasmtypes.MsgExecuteContract{
-// Sender: transmitterID.String(),
-// Contract: contractID.String(),
-// Msg: []byte(`{"blah":{"blah":5}}`),
-// Funds: sdk.Coins{},
-// }
-// _, err = txm.Enqueue(contractID.String(), invalidMsg)
-// require.NoError(t, err)
-// _, err = txm.Enqueue(contractID.String(), invalidMsg)
-// require.NoError(t, err)
-// _, err = txm.Enqueue(contractID.String(), setMsg)
-// require.NoError(t, err)
-//
-// // Ensure messages are completed
-// gomega.NewWithT(t).Eventually(func() bool {
-// succeeded, err := orm.GetMsgsState(Confirmed, 5)
-// require.NoError(t, err)
-// errored, err := orm.GetMsgsState(Errored, 5)
-// require.NoError(t, err)
-// t.Log("errored", len(errored), "succeeded", len(succeeded))
-// return 2 == len(succeeded) && 2 == len(errored)
-// }, 20*time.Second, time.Second).Should(gomega.BeTrue())
-//
-// // Observe the messages have been marked as completed
-// require.NoError(t, txm.Close())
-//}
-//
-//func ptr[T any](t T) *T { return &t }
diff --git a/core/chains/cosmos/relayer_adapter.go b/core/chains/cosmos/relayer_adapter.go
deleted file mode 100644
index ace441c2bb5..00000000000
--- a/core/chains/cosmos/relayer_adapter.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package cosmos
-
-import (
- "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/adapters"
-
- "github.com/smartcontractkit/chainlink-relay/pkg/loop"
-
- pkgcosmos "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos"
- "github.com/smartcontractkit/chainlink/v2/core/chains"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay"
-)
-
-// LegacyChainContainer is container interface for Cosmos chains
-type LegacyChainContainer interface {
- Get(id string) (adapters.Chain, error)
- Len() int
- List(ids ...string) ([]adapters.Chain, error)
- Slice() []adapters.Chain
-}
-
-type LegacyChains = chains.ChainsKV[adapters.Chain]
-
-var _ LegacyChainContainer = &LegacyChains{}
-
-func NewLegacyChains(m map[string]adapters.Chain) *LegacyChains {
- return chains.NewChainsKV[adapters.Chain](m)
-}
-
-type LoopRelayerChainer interface {
- loop.Relayer
- Chain() adapters.Chain
-}
-
-type LoopRelayerChain struct {
- loop.Relayer
- chain adapters.Chain
-}
-
-func NewLoopRelayerChain(r *pkgcosmos.Relayer, s adapters.Chain) *LoopRelayerChain {
- ra := relay.NewServerAdapter(r, s)
- return &LoopRelayerChain{
- Relayer: ra,
- chain: s,
- }
-}
-func (r *LoopRelayerChain) Chain() adapters.Chain {
- return r.chain
-}
-
-var _ LoopRelayerChainer = &LoopRelayerChain{}
diff --git a/core/chains/evm/chain.go b/core/chains/evm/chain.go
index 6eed13271e3..b5896393d3c 100644
--- a/core/chains/evm/chain.go
+++ b/core/chains/evm/chain.go
@@ -11,7 +11,7 @@ import (
gotoml "github.com/pelletier/go-toml/v2"
"go.uber.org/multierr"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
relaychains "github.com/smartcontractkit/chainlink-relay/pkg/chains"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
@@ -498,3 +498,21 @@ func newPrimary(cfg evmconfig.NodePool, noNewHeadsThreshold time.Duration, lggr
return evmclient.NewNode(cfg, noNewHeadsThreshold, lggr, (url.URL)(*n.WSURL), (*url.URL)(n.HTTPURL), *n.Name, id, chainID, *n.Order), nil
}
+
+// TODO-1663: replace newEthClientFromChain with the function below once client.go is deprecated.
+//func newEthClientFromChain(cfg evmconfig.NodePool, noNewHeadsThreshold time.Duration, lggr logger.Logger, chainID *big.Int, chainType config.ChainType, nodes []*toml.Node) evmclient.Client {
+// var empty url.URL
+// var primaries []commonclient.Node[*big.Int, *evmtypes.Head, evmclient.RPCCLient]
+// var sendonlys []commonclient.SendOnlyNode[*big.Int, evmclient.RPCCLient]
+// for i, node := range nodes {
+// if node.SendOnly != nil && *node.SendOnly {
+// rpc := evmclient.NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), fmt.Sprintf("eth-sendonly-rpc-%d", i), int32(i), chainID, commontypes.Primary)
+// sendonly := commonclient.NewSendOnlyNode[*big.Int, evmclient.RPCCLient](lggr, (url.URL)(*node.HTTPURL), *node.Name, chainID, rpc)
+// sendonlys = append(sendonlys, sendonly)
+// } else {
+// rpc := evmclient.NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), fmt.Sprintf("eth-sendonly-rpc-%d", i), int32(i), chainID, commontypes.Primary)
+// primaries = append(primaries, commonclient.NewNode[*big.Int, *evmtypes.Head, evmclient.RPCCLient](cfg, noNewHeadsThreshold, lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, rpc, "EVM"))
+// }
+// }
+// return evmclient.NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), noNewHeadsThreshold, primaries, sendonlys, chainID, chainType)
+//}
diff --git a/core/chains/evm/chain_test.go b/core/chains/evm/chain_test.go
index ba24598ef73..f25af87a35b 100644
--- a/core/chains/evm/chain_test.go
+++ b/core/chains/evm/chain_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/mocks"
diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go
new file mode 100644
index 00000000000..4c5108745c5
--- /dev/null
+++ b/core/chains/evm/client/chain_client.go
@@ -0,0 +1,273 @@
+package client
+
+import (
+ "context"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
+ "github.com/smartcontractkit/chainlink/v2/core/assets"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+var _ Client = (*chainClient)(nil)
+
+// TODO-1663: rename this to client, once the client.go file is deprecated.
+type chainClient struct {
+ multiNode commonclient.MultiNode[
+ *big.Int,
+ evmtypes.Nonce,
+ common.Address,
+ common.Hash,
+ *types.Transaction,
+ common.Hash,
+ types.Log,
+ ethereum.FilterQuery,
+ *evmtypes.Receipt,
+ *assets.Wei,
+ *evmtypes.Head,
+ RPCCLient,
+ ]
+ logger logger.Logger
+}
+
+func NewChainClient(
+ logger logger.Logger,
+ selectionMode string,
+ leaseDuration time.Duration,
+ noNewHeadsThreshold time.Duration,
+ nodes []commonclient.Node[*big.Int, *evmtypes.Head, RPCCLient],
+ sendonlys []commonclient.SendOnlyNode[*big.Int, RPCCLient],
+ chainID *big.Int,
+ chainType config.ChainType,
+) Client {
+ multiNode := commonclient.NewMultiNode[
+ *big.Int,
+ evmtypes.Nonce,
+ common.Address,
+ common.Hash,
+ *types.Transaction,
+ common.Hash,
+ types.Log,
+ ethereum.FilterQuery,
+ *evmtypes.Receipt,
+ *assets.Wei,
+ *evmtypes.Head,
+ RPCCLient,
+ ](
+ logger,
+ selectionMode,
+ leaseDuration,
+ noNewHeadsThreshold,
+ nodes,
+ sendonlys,
+ chainID,
+ chainType,
+ "EVM",
+ ClassifySendOnlyError,
+ )
+ return &chainClient{
+ multiNode: multiNode,
+ logger: logger,
+ }
+}
+
+func (c *chainClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) {
+ return c.multiNode.BalanceAt(ctx, account, blockNumber)
+}
+
+func (c *chainClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error {
+ batch := make([]any, len(b))
+ for i, arg := range b {
+ batch[i] = any(arg)
+ }
+ return c.multiNode.BatchCallContext(ctx, batch)
+}
+
+func (c *chainClient) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error {
+ batch := make([]any, len(b))
+ for i, arg := range b {
+ batch[i] = any(arg)
+ }
+ return c.multiNode.BatchCallContextAll(ctx, batch)
+}
+
+// TODO-1663: return custom Block type instead of geth's once client.go is deprecated.
+func (c *chainClient) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Block, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return b, err
+ }
+ return rpc.BlockByHashGeth(ctx, hash)
+}
+
+// TODO-1663: return custom Block type instead of geth's once client.go is deprecated.
+func (c *chainClient) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Block, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return b, err
+ }
+ return rpc.BlockByNumberGeth(ctx, number)
+}
+
+func (c *chainClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
+ return c.multiNode.CallContext(ctx, result, method)
+}
+
+func (c *chainClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
+ return c.multiNode.CallContract(ctx, msg, blockNumber)
+}
+
+// TODO-1663: change this to actual ChainID() call once client.go is deprecated.
+func (c *chainClient) ChainID() (*big.Int, error) {
+ //return c.multiNode.ChainID(ctx), nil
+ return c.multiNode.ConfiguredChainID(), nil
+}
+
+func (c *chainClient) Close() {
+ c.multiNode.Close()
+}
+
+func (c *chainClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
+ return c.multiNode.CodeAt(ctx, account, blockNumber)
+}
+
+func (c *chainClient) ConfiguredChainID() *big.Int {
+ return c.multiNode.ConfiguredChainID()
+}
+
+func (c *chainClient) Dial(ctx context.Context) error {
+ return c.multiNode.Dial(ctx)
+}
+
+func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) {
+ return c.multiNode.EstimateGas(ctx, call)
+}
+func (c *chainClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
+ return c.multiNode.FilterEvents(ctx, q)
+}
+
+func (c *chainClient) HeaderByHash(ctx context.Context, h common.Hash) (head *types.Header, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return head, err
+ }
+ return rpc.HeaderByHash(ctx, h)
+}
+
+func (c *chainClient) HeaderByNumber(ctx context.Context, n *big.Int) (head *types.Header, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return head, err
+ }
+ return rpc.HeaderByNumber(ctx, n)
+}
+
+func (c *chainClient) HeadByHash(ctx context.Context, h common.Hash) (*evmtypes.Head, error) {
+ return c.multiNode.BlockByHash(ctx, h)
+}
+
+func (c *chainClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) {
+ return c.multiNode.BlockByNumber(ctx, n)
+}
+
+func (c *chainClient) IsL2() bool {
+ return c.multiNode.IsL2()
+}
+
+func (c *chainClient) LINKBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) {
+ return c.multiNode.LINKBalance(ctx, address, linkAddress)
+}
+
+func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) {
+ return c.multiNode.LatestBlockHeight(ctx)
+}
+
+func (c *chainClient) NodeStates() map[string]string {
+ return c.multiNode.NodeStates()
+}
+
+func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return b, err
+ }
+ return rpc.PendingCodeAt(ctx, account)
+}
+
+// TODO-1663: change this to evmtypes.Nonce(int64) once client.go is deprecated.
+func (c *chainClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {
+ n, err := c.multiNode.PendingSequenceAt(ctx, account)
+ return uint64(n), err
+}
+
+func (c *chainClient) SendTransaction(ctx context.Context, tx *types.Transaction) error {
+ return c.multiNode.SendTransaction(ctx, tx)
+}
+
+func (c *chainClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) {
+ err := c.SendTransaction(ctx, tx)
+ return ClassifySendError(err, c.logger, tx, fromAddress, c.IsL2())
+}
+
+func (c *chainClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) {
+ return c.multiNode.SequenceAt(ctx, account, blockNumber)
+}
+
+func (c *chainClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (s ethereum.Subscription, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return s, err
+ }
+ return rpc.SubscribeFilterLogs(ctx, q, ch)
+}
+
+func (c *chainClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) {
+ csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch)
+ err := csf.start(c.multiNode.Subscribe(ctx, csf.srcCh, "newHeads"))
+ if err != nil {
+ return nil, err
+ }
+ return csf, nil
+}
+
+func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return p, err
+ }
+ return rpc.SuggestGasPrice(ctx)
+}
+
+func (c *chainClient) SuggestGasTipCap(ctx context.Context) (t *big.Int, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return t, err
+ }
+ return rpc.SuggestGasTipCap(ctx)
+}
+
+func (c *chainClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) {
+ return c.multiNode.TokenBalance(ctx, address, contractAddress)
+}
+
+func (c *chainClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) {
+ return c.multiNode.TransactionByHash(ctx, txHash)
+}
+
+// TODO-1663: return custom Receipt type instead of geth's once client.go is deprecated.
+func (c *chainClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error) {
+ rpc, err := c.multiNode.SelectNodeRPC()
+ if err != nil {
+ return r, err
+ }
+ //return rpc.TransactionReceipt(ctx, txHash)
+ return rpc.TransactionReceiptGeth(ctx, txHash)
+}
diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go
index 3a3b8b23a92..fb8a39f3798 100644
--- a/core/chains/evm/client/client.go
+++ b/core/chains/evm/client/client.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
htrktypes "github.com/smartcontractkit/chainlink/v2/common/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/assets"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -61,7 +61,7 @@ type Client interface {
HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error)
SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error)
- SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (clienttypes.SendTxReturnCode, error)
+ SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error)
// Wrapped Geth client methods
// blockNumber can be specified as `nil` to imply latest block
@@ -211,9 +211,9 @@ func (client *client) HeaderByHash(ctx context.Context, h common.Hash) (*types.H
return client.pool.HeaderByHash(ctx, h)
}
-func (client *client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (clienttypes.SendTxReturnCode, error) {
+func (client *client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) {
err := client.SendTransaction(ctx, tx)
- return NewSendErrorReturnCode(err, client.logger, tx, fromAddress, client.pool.ChainType().IsL2())
+ return ClassifySendError(err, client.logger, tx, fromAddress, client.pool.ChainType().IsL2())
}
// SendTransaction also uses the sendonly HTTP RPC URLs if set
diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go
index 88bc37411c6..673fe044afe 100644
--- a/core/chains/evm/client/client_test.go
+++ b/core/chains/evm/client/client_test.go
@@ -22,27 +22,55 @@ import (
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
+
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
-func mustNewClient(t *testing.T, wsURL string, sendonlys ...url.URL) evmclient.Client {
+func mustNewClient(t *testing.T, wsURL string, sendonlys ...url.URL) client.Client {
return mustNewClientWithChainID(t, wsURL, testutils.FixtureChainID, sendonlys...)
}
-func mustNewClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) evmclient.Client {
- cfg := evmclient.TestNodePoolConfig{
- NodeSelectionMode: evmclient.NodeSelectionMode_RoundRobin,
+func mustNewClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) client.Client {
+ cfg := client.TestNodePoolConfig{
+ NodeSelectionMode: client.NodeSelectionMode_RoundRobin,
+ }
+ c, err := client.NewClientWithTestNode(t, cfg, time.Second*0, wsURL, nil, sendonlys, 42, chainID)
+ require.NoError(t, err)
+ return c
+}
+
+func mustNewChainClient(t *testing.T, wsURL string, sendonlys ...url.URL) client.Client {
+ return mustNewChainClientWithChainID(t, wsURL, testutils.FixtureChainID, sendonlys...)
+}
+
+func mustNewChainClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) client.Client {
+ cfg := client.TestNodePoolConfig{
+ NodeSelectionMode: client.NodeSelectionMode_RoundRobin,
}
- c, err := evmclient.NewClientWithTestNode(t, cfg, time.Second*0, wsURL, nil, sendonlys, 42, chainID)
+ c, err := client.NewChainClientWithTestNode(t, cfg, time.Second*0, cfg.NodeLeaseDuration, wsURL, nil, sendonlys, 42, chainID)
require.NoError(t, err)
return c
}
+func mustNewClients(t *testing.T, wsURL string, sendonlys ...url.URL) []client.Client {
+ var clients []client.Client
+ clients = append(clients, mustNewClient(t, wsURL, sendonlys...))
+ clients = append(clients, mustNewChainClient(t, wsURL, sendonlys...))
+ return clients
+}
+
+func mustNewClientsWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) []client.Client {
+ var clients []client.Client
+ clients = append(clients, mustNewClientWithChainID(t, wsURL, chainID, sendonlys...))
+ clients = append(clients, mustNewChainClientWithChainID(t, wsURL, chainID, sendonlys...))
+ return clients
+}
+
func TestEthClient_TransactionReceipt(t *testing.T) {
t.Parallel()
@@ -78,15 +106,17 @@ func TestEthClient_TransactionReceipt(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- hash := common.HexToHash(txHash)
- receipt, err := ethClient.TransactionReceipt(testutils.Context(t), hash)
- require.NoError(t, err)
- assert.Equal(t, hash, receipt.TxHash)
- assert.Equal(t, big.NewInt(11), receipt.BlockNumber)
+ hash := common.HexToHash(txHash)
+ receipt, err := ethClient.TransactionReceipt(testutils.Context(t), hash)
+ require.NoError(t, err)
+ assert.Equal(t, hash, receipt.TxHash)
+ assert.Equal(t, big.NewInt(11), receipt.BlockNumber)
+ }
})
t.Run("no tx hash, returns ethereum.NotFound", func(t *testing.T) {
@@ -108,13 +138,15 @@ func TestEthClient_TransactionReceipt(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- hash := common.HexToHash(txHash)
- _, err = ethClient.TransactionReceipt(testutils.Context(t), hash)
- require.Equal(t, ethereum.NotFound, errors.Cause(err))
+ hash := common.HexToHash(txHash)
+ _, err = ethClient.TransactionReceipt(testutils.Context(t), hash)
+ require.Equal(t, ethereum.NotFound, errors.Cause(err))
+ }
})
}
@@ -144,15 +176,17 @@ func TestEthClient_PendingNonceAt(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- result, err := ethClient.PendingNonceAt(testutils.Context(t), address)
- require.NoError(t, err)
+ result, err := ethClient.PendingNonceAt(testutils.Context(t), address)
+ require.NoError(t, err)
- var expected uint64 = 256
- require.Equal(t, result, expected)
+ var expected uint64 = 256
+ require.Equal(t, result, expected)
+ }
}
func TestEthClient_BalanceAt(t *testing.T) {
@@ -189,13 +223,15 @@ func TestEthClient_BalanceAt(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- result, err := ethClient.BalanceAt(testutils.Context(t), address, nil)
- require.NoError(t, err)
- assert.Equal(t, test.balance, result)
+ result, err := ethClient.BalanceAt(testutils.Context(t), address, nil)
+ require.NoError(t, err)
+ assert.Equal(t, test.balance, result)
+ }
})
}
}
@@ -220,13 +256,15 @@ func TestEthClient_LatestBlockHeight(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- result, err := ethClient.LatestBlockHeight(testutils.Context(t))
- require.NoError(t, err)
- require.Equal(t, big.NewInt(256), result)
+ result, err := ethClient.LatestBlockHeight(testutils.Context(t))
+ require.NoError(t, err)
+ require.Equal(t, big.NewInt(256), result)
+ }
}
func TestEthClient_GetERC20Balance(t *testing.T) {
@@ -248,7 +286,7 @@ func TestEthClient_GetERC20Balance(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
contractAddress := testutils.NewAddress()
userAddress := testutils.NewAddress()
- functionSelector := evmtypes.HexToFunctionSelector(evmclient.BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address)
+ functionSelector := evmtypes.HexToFunctionSelector(client.BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address)
txData := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(userAddress.Bytes(), utils.EVMWordByteLen))
wsURL := cltest.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) {
@@ -277,13 +315,15 @@ func TestEthClient_GetERC20Balance(t *testing.T) {
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- result, err := ethClient.TokenBalance(ctx, userAddress, contractAddress)
- require.NoError(t, err)
- assert.Equal(t, test.balance, result)
+ result, err := ethClient.TokenBalance(ctx, userAddress, contractAddress)
+ require.NoError(t, err)
+ assert.Equal(t, test.balance, result)
+ }
})
}
}
@@ -354,20 +394,22 @@ func TestEthClient_HeaderByNumber(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
-
- ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second)
- defer cancel()
- result, err := ethClient.HeadByNumber(ctx, expectedBlockNum)
- if test.error != nil {
- require.Error(t, err, test.error)
- } else {
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
require.NoError(t, err)
- require.Equal(t, expectedBlockHash, result.Hash.Hex())
- require.Equal(t, test.expectedResponseBlock, result.Number)
- require.Zero(t, cltest.FixtureChainID.Cmp(result.EVMChainID.ToInt()))
+
+ ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second)
+ result, err := ethClient.HeadByNumber(ctx, expectedBlockNum)
+ if test.error != nil {
+ require.Error(t, err, test.error)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, expectedBlockHash, result.Hash.Hex())
+ require.Equal(t, test.expectedResponseBlock, result.Number)
+ require.Zero(t, cltest.FixtureChainID.Cmp(result.EVMChainID.ToInt()))
+ }
+ cancel()
}
})
}
@@ -395,12 +437,14 @@ func TestEthClient_SendTransaction_NoSecondaryURL(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- err = ethClient.SendTransaction(testutils.Context(t), tx)
- assert.NoError(t, err)
+ err = ethClient.SendTransaction(testutils.Context(t), tx)
+ assert.NoError(t, err)
+ }
}
func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) {
@@ -432,16 +476,19 @@ func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) {
t.Cleanup(ts.Close)
sendonlyURL := *cltest.MustParseURL(t, ts.URL)
- ethClient := mustNewClient(t, wsURL, sendonlyURL, sendonlyURL)
- err = ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
- err = ethClient.SendTransaction(testutils.Context(t), tx)
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL, sendonlyURL, sendonlyURL)
+ for _, ethClient := range clients {
+ err = ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
+
+ err = ethClient.SendTransaction(testutils.Context(t), tx)
+ require.NoError(t, err)
+ }
// Unfortunately it's a bit tricky to test this, since there is no
// synchronization. We have to rely on timing instead.
- require.Eventually(t, func() bool { return service.sentCount.Load() == int32(2) }, testutils.WaitTimeout(t), 500*time.Millisecond)
+ require.Eventually(t, func() bool { return service.sentCount.Load() == int32(len(clients)*2) }, testutils.WaitTimeout(t), 500*time.Millisecond)
}
func TestEthClient_SendTransactionReturnCode(t *testing.T) {
@@ -467,13 +514,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.Fatal)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.Fatal)
+ }
})
t.Run("returns TransactionAlreadyKnown error type when error message is nonce too low", func(t *testing.T) {
@@ -493,13 +542,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.TransactionAlreadyKnown)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.TransactionAlreadyKnown)
+ }
})
t.Run("returns Successful error type when there is no error message", func(t *testing.T) {
@@ -518,13 +569,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.NoError(t, err)
- assert.Equal(t, errType, clienttypes.Successful)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.NoError(t, err)
+ assert.Equal(t, errType, commonclient.Successful)
+ }
})
t.Run("returns Underpriced error type when transaction is terminally underpriced", func(t *testing.T) {
@@ -544,13 +597,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.Underpriced)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.Underpriced)
+ }
})
t.Run("returns Unsupported error type when error message is queue full", func(t *testing.T) {
@@ -570,13 +625,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.Unsupported)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.Unsupported)
+ }
})
t.Run("returns Retryable error type when there is a transaction gap", func(t *testing.T) {
@@ -596,13 +653,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.Retryable)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.Retryable)
+ }
})
t.Run("returns InsufficientFunds error type when the sender address doesn't have enough funds", func(t *testing.T) {
@@ -622,13 +681,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.InsufficientFunds)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.InsufficientFunds)
+ }
})
t.Run("returns ExceedsFeeCap error type when gas price is too high for the node", func(t *testing.T) {
@@ -648,13 +709,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.ExceedsMaxFee)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.ExceedsMaxFee)
+ }
})
t.Run("returns Unknown error type when the error can't be categorized", func(t *testing.T) {
@@ -674,13 +737,15 @@ func TestEthClient_SendTransactionReturnCode(t *testing.T) {
return
})
- ethClient := mustNewClient(t, wsURL)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClients(t, wsURL)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
- assert.Error(t, err)
- assert.Equal(t, errType, clienttypes.Unknown)
+ errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress)
+ assert.Error(t, err)
+ assert.Equal(t, errType, commonclient.Unknown)
+ }
})
}
@@ -718,24 +783,132 @@ func TestEthClient_SubscribeNewHead(t *testing.T) {
return
})
- ethClient := mustNewClientWithChainID(t, wsURL, chainId)
- err := ethClient.Dial(testutils.Context(t))
- require.NoError(t, err)
+ clients := mustNewClientsWithChainID(t, wsURL, chainId)
+ for _, ethClient := range clients {
+ err := ethClient.Dial(testutils.Context(t))
+ require.NoError(t, err)
- headCh := make(chan *evmtypes.Head)
- sub, err := ethClient.SubscribeNewHead(ctx, headCh)
- require.NoError(t, err)
- defer sub.Unsubscribe()
-
- select {
- case err := <-sub.Err():
- t.Fatal(err)
- case <-ctx.Done():
- t.Fatal(ctx.Err())
- case h := <-headCh:
- require.NotNil(t, h.EVMChainID)
- require.Zero(t, chainId.Cmp(h.EVMChainID.ToInt()))
+ headCh := make(chan *evmtypes.Head)
+ sub, err := ethClient.SubscribeNewHead(ctx, headCh)
+ require.NoError(t, err)
+
+ select {
+ case err := <-sub.Err():
+ t.Fatal(err)
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ case h := <-headCh:
+ require.NotNil(t, h.EVMChainID)
+ require.Zero(t, chainId.Cmp(h.EVMChainID.ToInt()))
+ }
+ sub.Unsubscribe()
}
}
-const headResult = evmclient.HeadResult
+func TestEthClient_ErroringClient(t *testing.T) {
+ t.Parallel()
+ ctx := testutils.Context(t)
+
+ // Empty node means there are no active nodes to select from, causing client to always return error.
+ erroringClient := client.NewChainClientWithEmptyNode(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID)
+
+ _, err := erroringClient.BalanceAt(ctx, common.Address{}, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ err = erroringClient.BatchCallContext(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ err = erroringClient.BatchCallContextAll(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.BlockByHash(ctx, common.Hash{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.BlockByNumber(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ err = erroringClient.CallContext(ctx, nil, "")
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.CallContract(ctx, ethereum.CallMsg{}, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ // TODO-1663: test actual ChainID() call once client.go is deprecated.
+ id, err := erroringClient.ChainID()
+ require.Equal(t, id, testutils.FixtureChainID)
+ //require.Equal(t, err, commonclient.ErroringNodeError)
+ require.Equal(t, err, nil)
+
+ _, err = erroringClient.CodeAt(ctx, common.Address{}, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ id = erroringClient.ConfiguredChainID()
+ require.Equal(t, id, testutils.FixtureChainID)
+
+ err = erroringClient.Dial(ctx)
+ require.ErrorContains(t, err, "no available nodes for chain")
+
+ _, err = erroringClient.EstimateGas(ctx, ethereum.CallMsg{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.FilterLogs(ctx, ethereum.FilterQuery{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.HeaderByHash(ctx, common.Hash{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.HeaderByNumber(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.HeadByHash(ctx, common.Hash{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.HeadByNumber(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.LINKBalance(ctx, common.Address{}, common.Address{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.LatestBlockHeight(ctx)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.PendingCodeAt(ctx, common.Address{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.PendingNonceAt(ctx, common.Address{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ err = erroringClient.SendTransaction(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ code, err := erroringClient.SendTransactionReturnCode(ctx, nil, common.Address{})
+ require.Equal(t, code, commonclient.Unknown)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.SequenceAt(ctx, common.Address{}, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.SubscribeNewHead(ctx, nil)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.SuggestGasPrice(ctx)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.SuggestGasTipCap(ctx)
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.TokenBalance(ctx, common.Address{}, common.Address{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.TransactionByHash(ctx, common.Hash{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+ _, err = erroringClient.TransactionReceipt(ctx, common.Hash{})
+ require.Equal(t, err, commonclient.ErroringNodeError)
+
+}
+
+const headResult = client.HeadResult
diff --git a/core/chains/evm/client/errors.go b/core/chains/evm/client/errors.go
index 7b89e7b92d1..4cb505dc9eb 100644
--- a/core/chains/evm/client/errors.go
+++ b/core/chains/evm/client/errors.go
@@ -10,7 +10,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/label"
"github.com/smartcontractkit/chainlink/v2/core/logger"
)
@@ -207,7 +207,23 @@ var harmony = ClientErrors{
Fatal: harmonyFatal,
}
-var clients = []ClientErrors{parity, geth, arbitrum, metis, substrate, avalanche, nethermind, harmony, besu, erigon, klaytn, celo}
+var zkSync = ClientErrors{
+ NonceTooLow: regexp.MustCompile(`(?:: |^)nonce too low\..+actual: \d*$`),
+ NonceTooHigh: regexp.MustCompile(`(?:: |^)nonce too high\..+actual: \d*$`),
+ TerminallyUnderpriced: regexp.MustCompile(`(?:: |^)max fee per gas less than block base fee$`),
+ InsufficientEth: regexp.MustCompile(`(?:: |^)(?:insufficient balance for transfer$|insufficient funds for gas + value)`),
+ TxFeeExceedsCap: regexp.MustCompile(`(?:: |^)max priority fee per gas higher than max fee per gas$`),
+ // intrinsic gas too low - gas limit less than 14700
+ // Not enough gas for transaction validation - gas limit less than L2 fee
+ // Failed to pay the fee to the operator - gas limit less than L2+L1 fee
+ // Error function_selector = 0x, data = 0x - contract call with gas limit of 0
+ // can't start a transaction from a non-account - trying to send from an invalid address, e.g. estimating a contract -> contract tx
+ // max fee per gas higher than 2^64-1 - uint64 overflow
+ // oversized data - data too large
+ Fatal: regexp.MustCompile(`(?:: |^)(?:exceeds block gas limit|intrinsic gas too low|Not enough gas for transaction validation|Failed to pay the fee to the operator|Error function_selector = 0x, data = 0x|invalid sender. can't start a transaction from a non-account|max(?: priority)? fee per (?:gas|pubdata byte) higher than 2\^64-1|oversized data. max: \d+; actual: \d+)$`),
+}
+
+var clients = []ClientErrors{parity, geth, arbitrum, metis, substrate, avalanche, nethermind, harmony, besu, erigon, klaytn, celo, zkSync}
func (s *SendError) is(errorType int) bool {
if s == nil || s.err == nil {
@@ -397,20 +413,20 @@ func ExtractRPCError(baseErr error) (*JsonError, error) {
return &jErr, nil
}
-func NewSendErrorReturnCode(err error, lggr logger.Logger, tx *types.Transaction, fromAddress common.Address, isL2 bool) (clienttypes.SendTxReturnCode, error) {
+func ClassifySendError(err error, lggr logger.Logger, tx *types.Transaction, fromAddress common.Address, isL2 bool) (commonclient.SendTxReturnCode, error) {
sendError := NewSendError(err)
if sendError == nil {
- return clienttypes.Successful, err
+ return commonclient.Successful, err
}
if sendError.Fatal() {
lggr.Criticalw("Fatal error sending transaction", "err", sendError, "etx", tx)
// Attempt is thrown away in this case; we don't need it since it never got accepted by a node
- return clienttypes.Fatal, err
+ return commonclient.Fatal, err
}
if sendError.IsNonceTooLowError() || sendError.IsTransactionAlreadyMined() {
// Nonce too low indicated that a transaction at this nonce was confirmed already.
// Mark it as TransactionAlreadyKnown.
- return clienttypes.TransactionAlreadyKnown, err
+ return commonclient.TransactionAlreadyKnown, err
}
if sendError.IsReplacementUnderpriced() {
lggr.Errorw(fmt.Sprintf("Replacement transaction underpriced for eth_tx %x. "+
@@ -419,41 +435,41 @@ func NewSendErrorReturnCode(err error, lggr logger.Logger, tx *types.Transaction
tx.Hash(), err), "gasPrice", tx.GasPrice, "gasTipCap", tx.GasTipCap, "gasFeeCap", tx.GasFeeCap)
// Assume success and hand off to the next cycle.
- return clienttypes.Successful, err
+ return commonclient.Successful, err
}
if sendError.IsTransactionAlreadyInMempool() {
lggr.Debugw("Transaction already in mempool", "txHash", tx.Hash, "nodeErr", sendError.Error())
- return clienttypes.Successful, err
+ return commonclient.Successful, err
}
if sendError.IsTemporarilyUnderpriced() {
lggr.Infow("Transaction temporarily underpriced", "err", sendError.Error())
- return clienttypes.Successful, err
+ return commonclient.Successful, err
}
if sendError.IsTerminallyUnderpriced() {
- return clienttypes.Underpriced, err
+ return commonclient.Underpriced, err
}
if sendError.L2FeeTooLow() || sendError.IsL2FeeTooHigh() || sendError.IsL2Full() {
if isL2 {
- return clienttypes.FeeOutOfValidRange, err
+ return commonclient.FeeOutOfValidRange, err
}
- return clienttypes.Unsupported, errors.Wrap(sendError, "this error type only handled for L2s")
+ return commonclient.Unsupported, errors.Wrap(sendError, "this error type only handled for L2s")
}
if sendError.IsNonceTooHighError() {
// This error occurs when the tx nonce is greater than current_nonce + tx_count_in_mempool,
// instead of keeping the tx in mempool. This can happen if previous transactions haven't
// reached the client yet. The correct thing to do is to mark it as retryable.
lggr.Warnw("Transaction has a nonce gap.", "err", err)
- return clienttypes.Retryable, err
+ return commonclient.Retryable, err
}
if sendError.IsInsufficientEth() {
lggr.Criticalw(fmt.Sprintf("Tx %x with type 0x%d was rejected due to insufficient eth: %s\n"+
"ACTION REQUIRED: Chainlink wallet with address 0x%x is OUT OF FUNDS",
tx.Hash(), tx.Type(), sendError.Error(), fromAddress,
), "err", sendError)
- return clienttypes.InsufficientFunds, err
+ return commonclient.InsufficientFunds, err
}
if sendError.IsTimeout() {
- return clienttypes.Retryable, errors.Wrapf(sendError, "timeout while sending transaction %s", tx.Hash().Hex())
+ return commonclient.Retryable, errors.Wrapf(sendError, "timeout while sending transaction %s", tx.Hash().Hex())
}
if sendError.IsTxFeeExceedsCap() {
lggr.Criticalw(fmt.Sprintf("Sending transaction failed: %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning),
@@ -461,7 +477,19 @@ func NewSendErrorReturnCode(err error, lggr logger.Logger, tx *types.Transaction
"err", sendError,
"id", "RPCTxFeeCapExceeded",
)
- return clienttypes.ExceedsMaxFee, err
+ return commonclient.ExceedsMaxFee, err
+ }
+ return commonclient.Unknown, err
+}
+
+// ClassifySendOnlyError handles SendOnly nodes error codes. In that case, we don't assume there is another transaction that will be correctly
+// priced.
+func ClassifySendOnlyError(err error) commonclient.SendTxReturnCode {
+ sendError := NewSendError(err)
+ if sendError == nil || sendError.IsNonceTooLowError() || sendError.IsTransactionAlreadyMined() || sendError.IsTransactionAlreadyInMempool() {
+ // Nonce too low or transaction known errors are expected since
+ // the primary SendTransaction may well have succeeded already
+ return commonclient.Successful
}
- return clienttypes.Unknown, err
+ return commonclient.Fatal
}
diff --git a/core/chains/evm/client/errors_test.go b/core/chains/evm/client/errors_test.go
index a5a3cc15eb6..ad8079824ab 100644
--- a/core/chains/evm/client/errors_test.go
+++ b/core/chains/evm/client/errors_test.go
@@ -40,6 +40,7 @@ func Test_Eth_Errors(t *testing.T) {
{"call failed: nonce too low: address 0x0499BEA33347cb62D79A9C0b1EDA01d8d329894c current nonce (5833) > tx nonce (5511)", true, "Avalanche"},
{"call failed: OldNonce", true, "Nethermind"},
{"call failed: OldNonce, Current nonce: 22, nonce of rejected tx: 17", true, "Nethermind"},
+ {"nonce too low. allowed nonce range: 427 - 447, actual: 426", true, "zkSync"},
}
for _, test := range tests {
@@ -60,6 +61,7 @@ func Test_Eth_Errors(t *testing.T) {
{"nonce too high: address 0x336394A3219e71D9d9bd18201d34E95C1Bb7122C, tx: 8089 state: 8090", true, "Arbitrum"},
{"nonce too high", true, "Geth"},
{"nonce too high", true, "Erigon"},
+ {"nonce too high. allowed nonce range: 427 - 477, actual: 527", true, "zkSync"},
}
for _, test := range tests {
@@ -152,6 +154,7 @@ func Test_Eth_Errors(t *testing.T) {
{"FeeTooLowToCompete", true, "Nethermind"},
{"transaction underpriced", true, "Klaytn"},
{"intrinsic gas too low", true, "Klaytn"},
+ {"max fee per gas less than block base fee", true, "zkSync"},
}
for _, test := range tests {
@@ -194,6 +197,8 @@ func Test_Eth_Errors(t *testing.T) {
{"call failed: InsufficientFunds, Account balance: 4740799397601480913, cumulative cost: 22019342038993800000", true, "Nethermind"},
{"insufficient funds", true, "Klaytn"},
{"insufficient funds for gas * price + value + gatewayFee", true, "celo"},
+ {"insufficient balance for transfer", true, "zkSync"},
+ {"insufficient funds for gas + value. balance: 42719769622667482000, fee: 48098250000000, value: 42719769622667482000", true, "celo"},
}
for _, test := range tests {
err = evmclient.NewSendErrorS(test.message)
@@ -213,6 +218,7 @@ func Test_Eth_Errors(t *testing.T) {
{"invalid gas fee cap", true, "Klaytn"},
{"max fee per gas higher than max priority fee per gas", true, "Klaytn"},
{"tx fee (1.10 of currency celo) exceeds the configured cap (1.00 celo)", true, "celo"},
+ {"max priority fee per gas higher than max fee per gas", true, "zkSync"},
}
for _, test := range tests {
err = evmclient.NewSendErrorS(test.message)
@@ -329,6 +335,16 @@ func Test_Eth_Errors_Fatal(t *testing.T) {
{"`to` address of transaction in blacklist", true, "Harmony"},
{"`from` address of transaction in blacklist", true, "Harmony"},
{"staking message does not match directive message", true, "Harmony"},
+
+ {"intrinsic gas too low", true, "zkSync"},
+ {"failed to validate the transaction. reason: Validation revert: Account validation error: Not enough gas for transaction validation", true, "zkSync"},
+ {"failed to validate the transaction. reason: Validation revert: Failed to pay for the transaction: Failed to pay the fee to the operator", true, "zkSync"},
+ {"failed to validate the transaction. reason: Validation revert: Account validation error: Error function_selector = 0x, data = 0x", true, "zkSync"},
+ {"invalid sender. can't start a transaction from a non-account", true, "zkSync"},
+ {"Failed to serialize transaction: max fee per gas higher than 2^64-1", true, "zkSync"},
+ {"Failed to serialize transaction: max fee per pubdata byte higher than 2^64-1", true, "zkSync"},
+ {"Failed to serialize transaction: max priority fee per gas higher than 2^64-1", true, "zkSync"},
+ {"Failed to serialize transaction: oversized data. max: 1000000; actual: 1000000", true, "zkSync"},
}
for _, test := range tests {
diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go
index 342a9143432..2820ba992c3 100644
--- a/core/chains/evm/client/helpers_test.go
+++ b/core/chains/evm/client/helpers_test.go
@@ -9,7 +9,10 @@ import (
"github.com/pkg/errors"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ commonconfig "github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -64,6 +67,67 @@ func Wrap(err error, s string) error {
return wrap(err, s)
}
+func NewChainClientWithTestNode(
+ t *testing.T,
+ nodeCfg commonclient.NodeConfig,
+ noNewHeadsThreshold time.Duration,
+ leaseDuration time.Duration,
+ rpcUrl string,
+ rpcHTTPURL *url.URL,
+ sendonlyRPCURLs []url.URL,
+ id int32,
+ chainID *big.Int,
+) (Client, error) {
+ parsed, err := url.ParseRequestURI(rpcUrl)
+ if err != nil {
+ return nil, err
+ }
+
+ if parsed.Scheme != "ws" && parsed.Scheme != "wss" {
+ return nil, errors.Errorf("ethereum url scheme must be websocket: %s", parsed.String())
+ }
+
+ lggr := logger.TestLogger(t)
+ rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary)
+
+ n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCCLient](
+ nodeCfg, noNewHeadsThreshold, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM")
+ primaries := []commonclient.Node[*big.Int, *evmtypes.Head, RPCCLient]{n}
+
+ var sendonlys []commonclient.SendOnlyNode[*big.Int, RPCCLient]
+ for i, u := range sendonlyRPCURLs {
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return nil, errors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String())
+ }
+ var empty url.URL
+ rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary)
+ s := commonclient.NewSendOnlyNode[*big.Int, RPCCLient](
+ lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc)
+ sendonlys = append(sendonlys, s)
+ }
+
+ var chainType commonconfig.ChainType
+ c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, noNewHeadsThreshold, primaries, sendonlys, chainID, chainType)
+ t.Cleanup(c.Close)
+ return c, nil
+}
+
+func NewChainClientWithEmptyNode(
+ t *testing.T,
+ selectionMode string,
+ leaseDuration time.Duration,
+ noNewHeadsThreshold time.Duration,
+ chainID *big.Int,
+) Client {
+
+ lggr := logger.TestLogger(t)
+
+ var chainType commonconfig.ChainType
+ c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, nil, nil, chainID, chainType)
+ t.Cleanup(c.Close)
+ return c
+}
+
type TestableSendOnlyNode interface {
SendOnlyNode
SetEthClient(newBatchSender BatchSender, newSender TxSender)
diff --git a/core/chains/evm/client/mocks/client.go b/core/chains/evm/client/mocks/client.go
index fdcb15d6a63..7617a7c05f9 100644
--- a/core/chains/evm/client/mocks/client.go
+++ b/core/chains/evm/client/mocks/client.go
@@ -7,10 +7,10 @@ import (
assets "github.com/smartcontractkit/chainlink/v2/core/assets"
- chainsclient "github.com/smartcontractkit/chainlink/v2/common/chains/client"
-
common "github.com/ethereum/go-ethereum/common"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
+
context "context"
ethereum "github.com/ethereum/go-ethereum"
@@ -566,18 +566,18 @@ func (_m *Client) SendTransaction(ctx context.Context, tx *types.Transaction) er
}
// SendTransactionReturnCode provides a mock function with given fields: ctx, tx, fromAddress
-func (_m *Client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (chainsclient.SendTxReturnCode, error) {
+func (_m *Client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) {
ret := _m.Called(ctx, tx, fromAddress)
- var r0 chainsclient.SendTxReturnCode
+ var r0 commonclient.SendTxReturnCode
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) (chainsclient.SendTxReturnCode, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) (commonclient.SendTxReturnCode, error)); ok {
return rf(ctx, tx, fromAddress)
}
- if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) chainsclient.SendTxReturnCode); ok {
+ if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) commonclient.SendTxReturnCode); ok {
r0 = rf(ctx, tx, fromAddress)
} else {
- r0 = ret.Get(0).(chainsclient.SendTxReturnCode)
+ r0 = ret.Get(0).(commonclient.SendTxReturnCode)
}
if rf, ok := ret.Get(1).(func(context.Context, *types.Transaction, common.Address) error); ok {
diff --git a/core/chains/evm/client/null_client.go b/core/chains/evm/client/null_client.go
index 8e271aea1e7..286f62b3b8b 100644
--- a/core/chains/evm/client/null_client.go
+++ b/core/chains/evm/client/null_client.go
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
"github.com/smartcontractkit/chainlink/v2/core/assets"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -121,9 +121,9 @@ func (nc *NullClient) HeaderByHash(ctx context.Context, h common.Hash) (*types.H
return nil, nil
}
-func (nc *NullClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, sender common.Address) (clienttypes.SendTxReturnCode, error) {
+func (nc *NullClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, sender common.Address) (commonclient.SendTxReturnCode, error) {
nc.lggr.Debug("SendTransactionReturnCode")
- return clienttypes.Successful, nil
+ return commonclient.Successful, nil
}
func (nc *NullClient) SendTransaction(ctx context.Context, tx *types.Transaction) error {
diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go
new file mode 100644
index 00000000000..04b9fad1fcd
--- /dev/null
+++ b/core/chains/evm/client/rpc_client.go
@@ -0,0 +1,1045 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "net/url"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethclient"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/google/uuid"
+ "github.com/pkg/errors"
+
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
+ commontypes "github.com/smartcontractkit/chainlink/v2/common/types"
+ "github.com/smartcontractkit/chainlink/v2/core/assets"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+// RPCCLient includes all the necessary generalized RPC methods along with any additional chain-specific methods.
+type RPCCLient interface {
+ commonclient.RPC[
+ *big.Int,
+ evmtypes.Nonce,
+ common.Address,
+ common.Hash,
+ *types.Transaction,
+ common.Hash,
+ types.Log,
+ ethereum.FilterQuery,
+ *evmtypes.Receipt,
+ *assets.Wei,
+ *evmtypes.Head,
+ ]
+ BlockByHashGeth(ctx context.Context, hash common.Hash) (b *types.Block, err error)
+ BlockByNumberGeth(ctx context.Context, number *big.Int) (b *types.Block, err error)
+ HeaderByHash(ctx context.Context, h common.Hash) (head *types.Header, err error)
+ HeaderByNumber(ctx context.Context, n *big.Int) (head *types.Header, err error)
+ PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error)
+ SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (s ethereum.Subscription, err error)
+ SuggestGasPrice(ctx context.Context) (p *big.Int, err error)
+ SuggestGasTipCap(ctx context.Context) (t *big.Int, err error)
+ TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error)
+}
+
+type rpcClient struct {
+ rpcLog logger.Logger
+ name string
+ id int32
+ chainID *big.Int
+ tier commonclient.NodeTier
+
+ ws rawclient
+ http *rawclient
+
+ stateMu sync.RWMutex // protects state* fields
+
+ // Need to track subscriptions because closing the RPC does not (always?)
+ // close the underlying subscription
+ subs []ethereum.Subscription
+
+ // Need to track the aliveLoop subscription, so we do not cancel it when checking lease on the MultiNode
+ aliveLoopSub ethereum.Subscription
+
+ // chStopInFlight can be closed to immediately cancel all in-flight requests on
+ // this rpcClient. Closing and replacing should be serialized through
+ // stateMu since it can happen on state transitions as well as rpcClient Close.
+ chStopInFlight chan struct{}
+}
+
+// NewRPCCLient returns a new *rpcClient as commonclient.RPC
+func NewRPCClient(
+ lggr logger.Logger,
+ wsuri url.URL,
+ httpuri *url.URL,
+ name string,
+ id int32,
+ chainID *big.Int,
+ tier commonclient.NodeTier,
+) RPCCLient {
+ r := new(rpcClient)
+ r.name = name
+ r.id = id
+ r.chainID = chainID
+ r.tier = tier
+ r.ws.uri = wsuri
+ if httpuri != nil {
+ r.http = &rawclient{uri: *httpuri}
+ }
+ r.chStopInFlight = make(chan struct{})
+ lggr = lggr.Named("Client").With(
+ "clientTier", tier.String(),
+ "clientName", name,
+ "client", r.String(),
+ "evmChainID", chainID,
+ )
+ r.rpcLog = lggr.Named("RPC")
+
+ return r
+}
+
+// Not thread-safe, pure dial.
+func (r *rpcClient) Dial(callerCtx context.Context) error {
+ ctx, cancel := r.makeQueryCtx(callerCtx)
+ defer cancel()
+
+ promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc()
+ lggr := r.rpcLog.With("wsuri", r.ws.uri.Redacted())
+ if r.http != nil {
+ lggr = lggr.With("httpuri", r.http.uri.Redacted())
+ }
+ lggr.Debugw("RPC dial: evmclient.Client#dial")
+
+ wsrpc, err := rpc.DialWebsocket(ctx, r.ws.uri.String(), "")
+ if err != nil {
+ promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc()
+ return errors.Wrapf(err, "error while dialing websocket: %v", r.ws.uri.Redacted())
+ }
+
+ r.ws.rpc = wsrpc
+ r.ws.geth = ethclient.NewClient(wsrpc)
+
+ if r.http != nil {
+ if err := r.DialHTTP(); err != nil {
+ return err
+ }
+ }
+
+ promEVMPoolRPCNodeDialsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc()
+
+ return nil
+}
+
+// Not thread-safe, pure dial.
+// DialHTTP doesn't actually make any external HTTP calls
+// It can only return error if the URL is malformed.
+func (r *rpcClient) DialHTTP() error {
+ promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc()
+ lggr := r.rpcLog.With("httpuri", r.ws.uri.Redacted())
+ lggr.Debugw("RPC dial: evmclient.Client#dial")
+
+ var httprpc *rpc.Client
+ httprpc, err := rpc.DialHTTP(r.http.uri.String())
+ if err != nil {
+ promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc()
+ return errors.Wrapf(err, "error while dialing HTTP: %v", r.http.uri.Redacted())
+ }
+
+ r.http.rpc = httprpc
+ r.http.geth = ethclient.NewClient(httprpc)
+
+ promEVMPoolRPCNodeDialsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc()
+
+ return nil
+}
+
+func (r *rpcClient) Close() {
+ defer func() {
+ if r.ws.rpc != nil {
+ r.ws.rpc.Close()
+ }
+ }()
+
+ r.stateMu.Lock()
+ defer r.stateMu.Unlock()
+ r.cancelInflightRequests()
+}
+
+// cancelInflightRequests closes and replaces the chStopInFlight
+// WARNING: NOT THREAD-SAFE
+// This must be called from within the r.stateMu lock
+func (r *rpcClient) cancelInflightRequests() {
+ close(r.chStopInFlight)
+ r.chStopInFlight = make(chan struct{})
+}
+
+func (r *rpcClient) String() string {
+ s := fmt.Sprintf("(%s)%s:%s", r.tier.String(), r.name, r.ws.uri.Redacted())
+ if r.http != nil {
+ s = s + fmt.Sprintf(":%s", r.http.uri.Redacted())
+ }
+ return s
+}
+
+func (r *rpcClient) logResult(
+ lggr logger.Logger,
+ err error,
+ callDuration time.Duration,
+ rpcDomain,
+ callName string,
+ results ...interface{},
+) {
+ lggr = lggr.With("duration", callDuration, "rpcDomain", rpcDomain, "callName", callName)
+ promEVMPoolRPCNodeCalls.WithLabelValues(r.chainID.String(), r.name).Inc()
+ if err == nil {
+ promEVMPoolRPCNodeCallsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc()
+ lggr.Tracew(
+ fmt.Sprintf("evmclient.Client#%s RPC call success", callName),
+ results...,
+ )
+ } else {
+ promEVMPoolRPCNodeCallsFailed.WithLabelValues(r.chainID.String(), r.name).Inc()
+ lggr.Debugw(
+ fmt.Sprintf("evmclient.Client#%s RPC call failure", callName),
+ append(results, "err", err)...,
+ )
+ }
+ promEVMPoolRPCCallTiming.
+ WithLabelValues(
+ r.chainID.String(), // chain id
+ r.name, // rpcClient name
+ rpcDomain, // rpc domain
+ "false", // is send only
+ strconv.FormatBool(err == nil), // is successful
+ callName, // rpc call name
+ ).
+ Observe(float64(callDuration))
+}
+
+func (r *rpcClient) getRPCDomain() string {
+ if r.http != nil {
+ return r.http.uri.Host
+ }
+ return r.ws.uri.Host
+}
+
+// registerSub adds the sub to the rpcClient list
+func (r *rpcClient) registerSub(sub ethereum.Subscription) {
+ r.stateMu.Lock()
+ defer r.stateMu.Unlock()
+ r.subs = append(r.subs, sub)
+}
+
+// disconnectAll disconnects all clients connected to the rpcClient
+// WARNING: NOT THREAD-SAFE
+// This must be called from within the r.stateMu lock
+func (r *rpcClient) DisconnectAll() {
+ if r.ws.rpc != nil {
+ r.ws.rpc.Close()
+ }
+ r.cancelInflightRequests()
+ r.unsubscribeAll()
+}
+
+// unsubscribeAll unsubscribes all subscriptions
+// WARNING: NOT THREAD-SAFE
+// This must be called from within the r.stateMu lock
+func (r *rpcClient) unsubscribeAll() {
+ for _, sub := range r.subs {
+ sub.Unsubscribe()
+ }
+ r.subs = nil
+}
+func (r *rpcClient) SetAliveLoopSub(sub commontypes.Subscription) {
+ r.stateMu.Lock()
+ defer r.stateMu.Unlock()
+
+ r.aliveLoopSub = sub
+}
+
+// SubscribersCount returns the number of client subscribed to the node
+func (r *rpcClient) SubscribersCount() int32 {
+ r.stateMu.RLock()
+ defer r.stateMu.RUnlock()
+ return int32(len(r.subs))
+}
+
+// UnsubscribeAllExceptAliveLoop disconnects all subscriptions to the node except the alive loop subscription
+// while holding the n.stateMu lock
+func (r *rpcClient) UnsubscribeAllExceptAliveLoop() {
+ r.stateMu.Lock()
+ defer r.stateMu.Unlock()
+
+ for _, s := range r.subs {
+ if s != r.aliveLoopSub {
+ s.Unsubscribe()
+ }
+ }
+}
+
+// RPC wrappers
+
+// CallContext implementation
+func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With(
+ "method", method,
+ "args", args,
+ )
+
+ lggr.Debug("RPC call: evmclient.Client#CallContext")
+ start := time.Now()
+ if http != nil {
+ err = r.wrapHTTP(http.rpc.CallContext(ctx, result, method, args...))
+ } else {
+ err = r.wrapWS(ws.rpc.CallContext(ctx, result, method, args...))
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContext")
+
+ return err
+}
+
+func (r *rpcClient) BatchCallContext(ctx context.Context, b []any) error {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return err
+ }
+ batch := make([]rpc.BatchElem, len(b))
+ for i, arg := range b {
+ batch[i] = arg.(rpc.BatchElem)
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b)
+
+ lggr.Trace("RPC call: evmclient.Client#BatchCallContext")
+ start := time.Now()
+ if http != nil {
+ err = r.wrapHTTP(http.rpc.BatchCallContext(ctx, batch))
+ } else {
+ err = r.wrapWS(ws.rpc.BatchCallContext(ctx, batch))
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "BatchCallContext")
+
+ return err
+}
+
+func (r *rpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) {
+ ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("args", args)
+
+ lggr.Debug("RPC call: evmclient.Client#EthSubscribe")
+ start := time.Now()
+ sub, err := ws.rpc.EthSubscribe(ctx, channel, args...)
+ if err == nil {
+ r.registerSub(sub)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe")
+
+ return sub, err
+}
+
+// GethClient wrappers
+
+func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) {
+ err = r.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash, false)
+ if err != nil {
+ return nil, err
+ }
+ if receipt == nil {
+ err = ethereum.NotFound
+ return
+ }
+ return
+}
+
+func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("txHash", txHash)
+
+ lggr.Debug("RPC call: evmclient.Client#TransactionReceipt")
+
+ start := time.Now()
+ if http != nil {
+ receipt, err = http.geth.TransactionReceipt(ctx, txHash)
+ err = r.wrapHTTP(err)
+ } else {
+ receipt, err = ws.geth.TransactionReceipt(ctx, txHash)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "TransactionReceipt",
+ "receipt", receipt,
+ )
+
+ return
+}
+func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("txHash", txHash)
+
+ lggr.Debug("RPC call: evmclient.Client#TransactionByHash")
+
+ start := time.Now()
+ if http != nil {
+ tx, _, err = http.geth.TransactionByHash(ctx, txHash)
+ err = r.wrapHTTP(err)
+ } else {
+ tx, _, err = ws.geth.TransactionByHash(ctx, txHash)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "TransactionByHash",
+ "receipt", tx,
+ )
+
+ return
+}
+
+func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header *types.Header, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("number", number)
+
+ lggr.Debug("RPC call: evmclient.Client#HeaderByNumber")
+ start := time.Now()
+ if http != nil {
+ header, err = http.geth.HeaderByNumber(ctx, number)
+ err = r.wrapHTTP(err)
+ } else {
+ header, err = ws.geth.HeaderByNumber(ctx, number)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "HeaderByNumber", "header", header)
+
+ return
+}
+
+func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header *types.Header, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("hash", hash)
+
+ lggr.Debug("RPC call: evmclient.Client#HeaderByHash")
+ start := time.Now()
+ if http != nil {
+ header, err = http.geth.HeaderByHash(ctx, hash)
+ err = r.wrapHTTP(err)
+ } else {
+ header, err = ws.geth.HeaderByHash(ctx, hash)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "HeaderByHash",
+ "header", header,
+ )
+
+ return
+}
+
+func (r *rpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) {
+ hex := ToBlockNumArg(number)
+ err = r.CallContext(ctx, &head, "eth_getBlockByNumber", hex, false)
+ if err != nil {
+ return nil, err
+ }
+ if head == nil {
+ err = ethereum.NotFound
+ return
+ }
+ head.EVMChainID = utils.NewBig(r.chainID)
+ return
+}
+
+func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) {
+ err = r.CallContext(ctx, &head, "eth_getBlockByHash", hash.Hex(), false)
+ if err != nil {
+ return nil, err
+ }
+ if head == nil {
+ err = ethereum.NotFound
+ return
+ }
+ head.EVMChainID = utils.NewBig(r.chainID)
+ return
+}
+
+func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (block *types.Block, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("hash", hash)
+
+ lggr.Debug("RPC call: evmclient.Client#BlockByHash")
+ start := time.Now()
+ if http != nil {
+ block, err = http.geth.BlockByHash(ctx, hash)
+ err = r.wrapHTTP(err)
+ } else {
+ block, err = ws.geth.BlockByHash(ctx, hash)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockByHash",
+ "block", block,
+ )
+
+ return
+}
+
+func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (block *types.Block, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("number", number)
+
+ lggr.Debug("RPC call: evmclient.Client#BlockByNumber")
+ start := time.Now()
+ if http != nil {
+ block, err = http.geth.BlockByNumber(ctx, number)
+ err = r.wrapHTTP(err)
+ } else {
+ block, err = ws.geth.BlockByNumber(ctx, number)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockByNumber",
+ "block", block,
+ )
+
+ return
+}
+
+func (r *rpcClient) SendTransaction(ctx context.Context, tx *types.Transaction) error {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("tx", tx)
+
+ lggr.Debug("RPC call: evmclient.Client#SendTransaction")
+ start := time.Now()
+ if http != nil {
+ err = r.wrapHTTP(http.geth.SendTransaction(ctx, tx))
+ } else {
+ err = r.wrapWS(ws.geth.SendTransaction(ctx, tx))
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "SendTransaction")
+
+ return err
+}
+
+func (r *rpcClient) SimulateTransaction(ctx context.Context, tx *types.Transaction) error {
+ // Not Implemented
+ return errors.New("SimulateTransaction not implemented")
+}
+
+func (r *rpcClient) SendEmptyTransaction(
+ ctx context.Context,
+ newTxAttempt func(nonce evmtypes.Nonce, feeLimit uint32, fee *assets.Wei, fromAddress common.Address) (attempt any, err error),
+ nonce evmtypes.Nonce,
+ gasLimit uint32,
+ fee *assets.Wei,
+ fromAddress common.Address,
+) (txhash string, err error) {
+ // Not Implemented
+ return "", errors.New("SendEmptyTransaction not implemented")
+}
+
+// PendingSequenceAt returns one higher than the highest nonce from both mempool and mined transactions
+func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Address) (nonce evmtypes.Nonce, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("account", account)
+
+ lggr.Debug("RPC call: evmclient.Client#PendingNonceAt")
+ start := time.Now()
+ var n uint64
+ if http != nil {
+ n, err = http.geth.PendingNonceAt(ctx, account)
+ nonce = evmtypes.Nonce(int64(n))
+ err = r.wrapHTTP(err)
+ } else {
+ n, err = ws.geth.PendingNonceAt(ctx, account)
+ nonce = evmtypes.Nonce(int64(n))
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "PendingNonceAt",
+ "nonce", nonce,
+ )
+
+ return
+}
+
+// SequenceAt is a bit of a misnomer. You might expect it to return the highest
+// mined nonce at the given block number, but it actually returns the total
+// transaction count which is the highest mined nonce + 1
+func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (nonce evmtypes.Nonce, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber)
+
+ lggr.Debug("RPC call: evmclient.Client#NonceAt")
+ start := time.Now()
+ var n uint64
+ if http != nil {
+ n, err = http.geth.NonceAt(ctx, account, blockNumber)
+ nonce = evmtypes.Nonce(int64(n))
+ err = r.wrapHTTP(err)
+ } else {
+ n, err = ws.geth.NonceAt(ctx, account, blockNumber)
+ nonce = evmtypes.Nonce(int64(n))
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "NonceAt",
+ "nonce", nonce,
+ )
+
+ return
+}
+
+func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) (code []byte, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("account", account)
+
+ lggr.Debug("RPC call: evmclient.Client#PendingCodeAt")
+ start := time.Now()
+ if http != nil {
+ code, err = http.geth.PendingCodeAt(ctx, account)
+ err = r.wrapHTTP(err)
+ } else {
+ code, err = ws.geth.PendingCodeAt(ctx, account)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "PendingCodeAt",
+ "code", code,
+ )
+
+ return
+}
+
+func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) (code []byte, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber)
+
+ lggr.Debug("RPC call: evmclient.Client#CodeAt")
+ start := time.Now()
+ if http != nil {
+ code, err = http.geth.CodeAt(ctx, account, blockNumber)
+ err = r.wrapHTTP(err)
+ } else {
+ code, err = ws.geth.CodeAt(ctx, account, blockNumber)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "CodeAt",
+ "code", code,
+ )
+
+ return
+}
+
+func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer cancel()
+ call := c.(ethereum.CallMsg)
+ lggr := r.newRqLggr().With("call", call)
+
+ lggr.Debug("RPC call: evmclient.Client#EstimateGas")
+ start := time.Now()
+ if http != nil {
+ gas, err = http.geth.EstimateGas(ctx, call)
+ err = r.wrapHTTP(err)
+ } else {
+ gas, err = ws.geth.EstimateGas(ctx, call)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "EstimateGas",
+ "gas", gas,
+ )
+
+ return
+}
+
+func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr()
+
+ lggr.Debug("RPC call: evmclient.Client#SuggestGasPrice")
+ start := time.Now()
+ if http != nil {
+ price, err = http.geth.SuggestGasPrice(ctx)
+ err = r.wrapHTTP(err)
+ } else {
+ price, err = ws.geth.SuggestGasPrice(ctx)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "SuggestGasPrice",
+ "price", price,
+ )
+
+ return
+}
+
+func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) (val []byte, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber)
+ message := msg.(ethereum.CallMsg)
+
+ lggr.Debug("RPC call: evmclient.Client#CallContract")
+ start := time.Now()
+ if http != nil {
+ val, err = http.geth.CallContract(ctx, message, blockNumber)
+ err = r.wrapHTTP(err)
+ } else {
+ val, err = ws.geth.CallContract(ctx, message, blockNumber)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContract",
+ "val", val,
+ )
+
+ return
+
+}
+
+func (r *rpcClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) {
+ var height big.Int
+ h, err := r.BlockNumber(ctx)
+ return height.SetUint64(h), err
+}
+
+func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr()
+
+ lggr.Debug("RPC call: evmclient.Client#BlockNumber")
+ start := time.Now()
+ if http != nil {
+ height, err = http.geth.BlockNumber(ctx)
+ err = r.wrapHTTP(err)
+ } else {
+ height, err = ws.geth.BlockNumber(ctx)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockNumber",
+ "height", height,
+ )
+
+ return
+}
+
+func (r *rpcClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber)
+
+ lggr.Debug("RPC call: evmclient.Client#BalanceAt")
+ start := time.Now()
+ if http != nil {
+ balance, err = http.geth.BalanceAt(ctx, account, blockNumber)
+ err = r.wrapHTTP(err)
+ } else {
+ balance, err = ws.geth.BalanceAt(ctx, account, blockNumber)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "BalanceAt",
+ "balance", balance,
+ )
+
+ return
+}
+
+// TokenBalance returns the balance of the given address for the token contract address.
+func (r *rpcClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) {
+ result := ""
+ numLinkBigInt := new(big.Int)
+ functionSelector := evmtypes.HexToFunctionSelector(BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address)
+ data := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(address.Bytes(), utils.EVMWordByteLen))
+ args := CallArgs{
+ To: contractAddress,
+ Data: data,
+ }
+ err := r.CallContext(ctx, &result, "eth_call", args, "latest")
+ if err != nil {
+ return numLinkBigInt, err
+ }
+ numLinkBigInt.SetString(result, 0)
+ return numLinkBigInt, nil
+}
+
+// LINKBalance returns the balance of LINK at the given address
+func (r *rpcClient) LINKBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) {
+ balance, err := r.TokenBalance(ctx, address, linkAddress)
+ if err != nil {
+ return assets.NewLinkFromJuels(0), err
+ }
+ return (*assets.Link)(balance), nil
+}
+
+func (r *rpcClient) FilterEvents(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {
+ return r.FilterLogs(ctx, q)
+}
+
+func (r *rpcClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []types.Log, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("q", q)
+
+ lggr.Debug("RPC call: evmclient.Client#FilterLogs")
+ start := time.Now()
+ if http != nil {
+ l, err = http.geth.FilterLogs(ctx, q)
+ err = r.wrapHTTP(err)
+ } else {
+ l, err = ws.geth.FilterLogs(ctx, q)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "FilterLogs",
+ "log", l,
+ )
+
+ return
+}
+
+func (r *rpcClient) ClientVersion(ctx context.Context) (version string, err error) {
+ err = r.CallContext(ctx, &version, "web3_clientVersion")
+ return
+}
+
+func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (sub ethereum.Subscription, err error) {
+ ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr().With("q", q)
+
+ lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs")
+ start := time.Now()
+ sub, err = ws.geth.SubscribeFilterLogs(ctx, q, ch)
+ if err == nil {
+ r.registerSub(sub)
+ }
+ err = r.wrapWS(err)
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "SubscribeFilterLogs")
+
+ return
+}
+
+func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer cancel()
+ lggr := r.newRqLggr()
+
+ lggr.Debug("RPC call: evmclient.Client#SuggestGasTipCap")
+ start := time.Now()
+ if http != nil {
+ tipCap, err = http.geth.SuggestGasTipCap(ctx)
+ err = r.wrapHTTP(err)
+ } else {
+ tipCap, err = ws.geth.SuggestGasTipCap(ctx)
+ err = r.wrapWS(err)
+ }
+ duration := time.Since(start)
+
+ r.logResult(lggr, err, duration, r.getRPCDomain(), "SuggestGasTipCap",
+ "tipCap", tipCap,
+ )
+
+ return
+}
+
+// Returns the ChainID according to the geth client. This is useful for functions like verify()
+// the common node.
+func (r *rpcClient) ChainID(ctx context.Context) (chainID *big.Int, err error) {
+ ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx)
+
+ defer cancel()
+
+ if http != nil {
+ chainID, err = http.geth.ChainID(ctx)
+ err = r.wrapHTTP(err)
+ } else {
+ chainID, err = ws.geth.ChainID(ctx)
+ err = r.wrapWS(err)
+ }
+ return
+}
+
+// newRqLggr generates a new logger with a unique request ID
+func (r *rpcClient) newRqLggr() logger.Logger {
+ return r.rpcLog.With(
+ "requestID", uuid.New(),
+ )
+}
+
+func wrapCallError(err error, tp string) error {
+ if err == nil {
+ return nil
+ }
+ if errors.Cause(err).Error() == "context deadline exceeded" {
+ err = errors.Wrap(err, "remote node timed out")
+ }
+ return errors.Wrapf(err, "%s call failed", tp)
+}
+
+func (r *rpcClient) wrapWS(err error) error {
+ err = wrapCallError(err, fmt.Sprintf("%s websocket (%s)", r.tier.String(), r.ws.uri.Redacted()))
+ return err
+}
+
+func (r *rpcClient) wrapHTTP(err error) error {
+ err = wrapCallError(err, fmt.Sprintf("%s http (%s)", r.tier.String(), r.http.uri.Redacted()))
+ if err != nil {
+ r.rpcLog.Debugw("Call failed", "err", err)
+ } else {
+ r.rpcLog.Trace("Call succeeded")
+ }
+ return err
+}
+
+// makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx
+func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient, err error) {
+ // Need to wrap in mutex because state transition can cancel and replace the
+ // context
+ r.stateMu.RLock()
+ cancelCh := r.chStopInFlight
+ ws = r.ws
+ if r.http != nil {
+ cp := *r.http
+ http = &cp
+ }
+ r.stateMu.RUnlock()
+ ctx, cancel = makeQueryCtx(parentCtx, cancelCh)
+ return
+}
+
+func (r *rpcClient) makeQueryCtx(ctx context.Context) (context.Context, context.CancelFunc) {
+ return makeQueryCtx(ctx, r.getChStopInflight())
+}
+
+// getChStopInflight provides a convenience helper that mutex wraps a
+// read to the chStopInFlight
+func (r *rpcClient) getChStopInflight() chan struct{} {
+ r.stateMu.RLock()
+ defer r.stateMu.RUnlock()
+ return r.chStopInFlight
+}
+
+func (r *rpcClient) Name() string {
+ return r.name
+}
+
+func Name(r *rpcClient) string {
+ return r.name
+}
diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go
index abab2046620..f4ad6a65a1a 100644
--- a/core/chains/evm/client/simulated_backend_client.go
+++ b/core/chains/evm/client/simulated_backend_client.go
@@ -18,13 +18,48 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rpc"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
"github.com/smartcontractkit/chainlink/v2/core/assets"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
+func init() {
+ var err error
+
+ balanceOfABI, err = abi.JSON(strings.NewReader(balanceOfABIString))
+ if err != nil {
+ panic(fmt.Errorf("%w: while parsing erc20ABI", err))
+ }
+}
+
+var (
+ balanceOfABIString = `[
+ {
+ "constant": true,
+ "inputs": [
+ {
+ "name": "_owner",
+ "type": "address"
+ }
+ ],
+ "name": "balanceOf",
+ "outputs": [
+ {
+ "name": "balance",
+ "type": "uint256"
+ }
+ ],
+ "payable": false,
+ "stateMutability": "view",
+ "type": "function"
+ }
+]`
+
+ balanceOfABI abi.ABI
+)
+
// SimulatedBackendClient is an Client implementation using a simulated
// blockchain backend. Note that not all RPC methods are implemented here.
type SimulatedBackendClient struct {
@@ -51,69 +86,6 @@ func (c *SimulatedBackendClient) Dial(context.Context) error {
// other simulated clients might still be using it
func (c *SimulatedBackendClient) Close() {}
-// checkEthCallArgs extracts and verifies the arguments for an eth_call RPC
-func (c *SimulatedBackendClient) checkEthCallArgs(
- args []interface{}) (*CallArgs, *big.Int, error) {
- if len(args) != 2 {
- return nil, nil, fmt.Errorf(
- "should have two arguments after \"eth_call\", got %d", len(args))
- }
- callArgs, ok := args[0].(map[string]interface{})
- if !ok {
- return nil, nil, fmt.Errorf("third arg to SimulatedBackendClient.Call "+
- "must be an eth.CallArgs, got %+#v", args[0])
- }
- blockNumber, err := c.blockNumber(args[1])
- if err != nil {
- return nil, nil, fmt.Errorf("fourth arg to SimulatedBackendClient.Call "+
- "must be the string \"latest\", or a *big.Int, got %#+v", args[1])
- }
-
- // to and from need to map to a common.Address but could come in as a string
- var (
- toAddr common.Address
- frmAddr common.Address
- )
-
- toAddr, err = interfaceToAddress(callArgs["to"])
- if err != nil {
- return nil, nil, err
- }
-
- // from is optional in the standard client; default to 0x when missing
- if value, ok := callArgs["from"]; ok {
- addr, err := interfaceToAddress(value)
- if err != nil {
- return nil, nil, err
- }
-
- frmAddr = addr
- } else {
- frmAddr = common.HexToAddress("0x")
- }
-
- ca := CallArgs{
- To: toAddr,
- From: frmAddr,
- Data: callArgs["data"].(hexutil.Bytes),
- }
-
- return &ca, blockNumber, nil
-}
-
-func interfaceToAddress(value interface{}) (common.Address, error) {
- switch v := value.(type) {
- case common.Address:
- return v, nil
- case string:
- return common.HexToAddress(v), nil
- case *big.Int:
- return common.BigToAddress(v), nil
- default:
- return common.HexToAddress("0x"), fmt.Errorf("unrecognized value type for converting value to common.Address; try string, *big.Int, or common.Address")
- }
-}
-
// CallContext mocks the ethereum client RPC calls used by chainlink, copying the
// return value into result.
// The simulated client avoids the old block error from the simulated backend by
@@ -121,41 +93,16 @@ func interfaceToAddress(value interface{}) (common.Address, error) {
// and will not return an error when an old block is used.
func (c *SimulatedBackendClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
switch method {
+ case "eth_getTransactionReceipt":
+ return c.ethGetTransactionReceipt(ctx, result, args...)
+ case "eth_getBlockByNumber":
+ return c.ethGetBlockByNumber(ctx, result, args...)
case "eth_call":
- var (
- callArgs *CallArgs
- b []byte
- err error
- )
-
- if callArgs, _, err = c.checkEthCallArgs(args); err != nil {
- return err
- }
-
- callMsg := ethereum.CallMsg{From: callArgs.From, To: &callArgs.To, Data: callArgs.Data}
-
- if b, err = c.b.CallContract(ctx, callMsg, nil /* always latest block */); err != nil {
- return fmt.Errorf("%w: while calling contract at address %x with "+
- "data %x", err, callArgs.To, callArgs.Data)
- }
-
- switch r := result.(type) {
- case *hexutil.Bytes:
- *r = append(*r, b...)
-
- if !bytes.Equal(*r, b) {
- return fmt.Errorf("was passed a non-empty array, or failed to copy "+
- "answer. Expected %x = %x", *r, b)
- }
- return nil
- default:
- return fmt.Errorf("first arg to SimulatedBackendClient.Call is an "+
- "unrecognized type: %T; add processing logic for it here", result)
- }
+ return c.ethCall(ctx, result, args...)
+ case "eth_getHeaderByNumber":
+ return c.ethGetHeaderByNumber(ctx, result, args...)
default:
- return fmt.Errorf("second arg to SimulatedBackendClient.Call is an RPC "+
- "API method which has not yet been implemented: %s. Add processing for "+
- "it here", method)
+ return fmt.Errorf("second arg to SimulatedBackendClient.Call is an RPC API method which has not yet been implemented: %s. Add processing for it here", method)
}
}
@@ -175,38 +122,6 @@ func (c *SimulatedBackendClient) currentBlockNumber() *big.Int {
return c.b.Blockchain().CurrentBlock().Number
}
-var balanceOfABIString = `[
- {
- "constant": true,
- "inputs": [
- {
- "name": "_owner",
- "type": "address"
- }
- ],
- "name": "balanceOf",
- "outputs": [
- {
- "name": "balance",
- "type": "uint256"
- }
- ],
- "payable": false,
- "stateMutability": "view",
- "type": "function"
- }
-]`
-
-var balanceOfABI abi.ABI
-
-func init() {
- var err error
- balanceOfABI, err = abi.JSON(strings.NewReader(balanceOfABIString))
- if err != nil {
- panic(fmt.Errorf("%w: while parsing erc20ABI", err))
- }
-}
-
func (c *SimulatedBackendClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (balance *big.Int, err error) {
callData, err := balanceOfABI.Pack("balanceOf", address)
if err != nil {
@@ -251,13 +166,12 @@ func (c *SimulatedBackendClient) blockNumber(number interface{}) (blockNumber *b
case "earliest":
return big.NewInt(0), nil
case "pending":
- panic("not implemented") // I don't understand the semantics of this.
+ panic("pending block not supported by simulated backend client") // I don't understand the semantics of this.
// return big.NewInt(0).Add(c.currentBlockNumber(), big.NewInt(1)), nil
default:
- blockNumber, err = utils.HexToUint256(n)
+ blockNumber, err := hexutil.DecodeBig(n)
if err != nil {
- return nil, fmt.Errorf("%w: while parsing '%s' as hex-encoded"+
- "block number", err, n)
+ return nil, fmt.Errorf("%w: while parsing '%s' as hex-encoded block number", err, n)
}
return blockNumber, nil
}
@@ -318,7 +232,8 @@ func (c *SimulatedBackendClient) BlockByHash(ctx context.Context, hash common.Ha
}
func (c *SimulatedBackendClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) {
- panic("not implemented")
+ header, err := c.b.HeaderByNumber(ctx, nil)
+ return header.Number, err
}
// ChainID returns the ethereum ChainID.
@@ -386,7 +301,7 @@ func (c *SimulatedBackendClient) SubscribeNewHead(
case h := <-ch:
var head *evmtypes.Head
if h != nil {
- head = &evmtypes.Head{Number: h.Number.Int64(), Hash: h.Hash(), ParentHash: h.ParentHash, Parent: lastHead, EVMChainID: utils.NewBig(c.chainId)}
+ head = &evmtypes.Head{Difficulty: (*utils.Big)(h.Difficulty), Timestamp: time.Unix(int64(h.Time), 0), Number: h.Number.Int64(), Hash: h.Hash(), ParentHash: h.ParentHash, Parent: lastHead, EVMChainID: utils.NewBig(c.chainId)}
lastHead = head
}
select {
@@ -416,16 +331,16 @@ func (c *SimulatedBackendClient) HeaderByHash(ctx context.Context, h common.Hash
return c.b.HeaderByHash(ctx, h)
}
-func (c *SimulatedBackendClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (clienttypes.SendTxReturnCode, error) {
+func (c *SimulatedBackendClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) {
err := c.SendTransaction(ctx, tx)
if err == nil {
- return clienttypes.Successful, nil
+ return commonclient.Successful, nil
}
if strings.Contains(err.Error(), "could not fetch parent") || strings.Contains(err.Error(), "invalid transaction") {
- return clienttypes.Fatal, err
+ return commonclient.Fatal, err
}
// All remaining error messages returned from SendTransaction are considered Unknown.
- return clienttypes.Unknown, err
+ return commonclient.Unknown, err
}
// SendTransaction sends a transaction.
@@ -520,114 +435,18 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B
for i, elem := range b {
switch elem.Method {
case "eth_getTransactionReceipt":
- if _, ok := elem.Result.(*evmtypes.Receipt); !ok {
- return fmt.Errorf("SimulatedBackendClient expected return type of *evmtypes.Receipt for eth_getTransactionReceipt, got type %T", elem.Result)
- }
- if len(elem.Args) != 1 {
- return fmt.Errorf("SimulatedBackendClient expected 1 arg, got %d for eth_getTransactionReceipt", len(elem.Args))
- }
- hash, is := elem.Args[0].(common.Hash)
- if !is {
- return fmt.Errorf("SimulatedBackendClient expected arg to be a hash, got: %T", elem.Args[0])
- }
- receipt, err := c.b.TransactionReceipt(ctx, hash)
- if receipt != nil {
- *(b[i].Result.(*evmtypes.Receipt)) = *evmtypes.FromGethReceipt(receipt)
- }
- b[i].Error = err
+ b[i].Error = c.ethGetTransactionReceipt(ctx, b[i].Result, b[i].Args...)
case "eth_getBlockByNumber":
- switch v := elem.Result.(type) {
- case *evmtypes.Head:
- case *evmtypes.Block:
- default:
- return fmt.Errorf("SimulatedBackendClient expected return type of [*evmtypes.Head] or [*evmtypes.Block] for eth_getBlockByNumber, got type %T", v)
- }
- if len(elem.Args) != 2 {
- return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getBlockByNumber", len(elem.Args))
- }
- blockNumOrTag, is := elem.Args[0].(string)
- if !is {
- return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getBlockByNumber, got: %T", elem.Args[0])
- }
- _, is = elem.Args[1].(bool)
- if !is {
- return fmt.Errorf("SimulatedBackendClient expected second arg to be a boolean for eth_getBlockByNumber, got: %T", elem.Args[1])
- }
- header, err := c.fetchHeader(ctx, blockNumOrTag)
- if err != nil {
- return err
- }
- switch res := elem.Result.(type) {
- case *evmtypes.Head:
- res.Number = header.Number.Int64()
- res.Hash = header.Hash()
- res.ParentHash = header.ParentHash
- res.Timestamp = time.Unix(int64(header.Time), 0).UTC()
- case *evmtypes.Block:
- res.Number = header.Number.Int64()
- res.Hash = header.Hash()
- res.ParentHash = header.ParentHash
- res.Timestamp = time.Unix(int64(header.Time), 0).UTC()
- default:
- return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", elem.Result)
- }
- b[i].Error = err
+ b[i].Error = c.ethGetBlockByNumber(ctx, b[i].Result, b[i].Args...)
case "eth_call":
- if len(elem.Args) != 2 {
- return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_call", len(elem.Args))
- }
-
- _, ok := elem.Result.(*string)
- if !ok {
- return fmt.Errorf("SimulatedBackendClient expected result to be *string for eth_call, got: %T", elem.Result)
- }
-
- params, ok := elem.Args[0].(map[string]interface{})
- if !ok {
- return fmt.Errorf("SimulatedBackendClient expected first arg to be map[string]interface{} for eth_call, got: %T", elem.Args[0])
- }
-
- blockNum, ok := elem.Args[1].(string)
- if !ok {
- return fmt.Errorf("SimulatedBackendClient expected second arg to be a string for eth_call, got: %T", elem.Args[1])
- }
-
- if blockNum != "" {
- if _, ok = new(big.Int).SetString(blockNum, 0); !ok {
- return fmt.Errorf("error while converting block number string: %s to big.Int ", blockNum)
- }
- }
-
- callMsg := toCallMsg(params)
- resp, err := c.b.CallContract(ctx, callMsg, nil)
- *(b[i].Result.(*string)) = hexutil.Encode(resp)
- b[i].Error = err
+ b[i].Error = c.ethCall(ctx, b[i].Result, b[i].Args...)
case "eth_getHeaderByNumber":
- if len(elem.Args) != 1 {
- return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getHeaderByNumber", len(elem.Args))
- }
- blockNum, is := elem.Args[0].(string)
- if !is {
- return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getHeaderByNumber, got: %T", elem.Args[0])
- }
- n, err := hexutil.DecodeBig(blockNum)
- if err != nil {
- return fmt.Errorf("error while converting hex block number %s to big.Int ", blockNum)
- }
- header, err := c.b.HeaderByNumber(ctx, n)
- if err != nil {
- return err
- }
- switch v := elem.Result.(type) {
- case *types.Header:
- b[i].Result = header
- default:
- return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", v)
- }
+ b[i].Error = c.ethGetHeaderByNumber(ctx, b[i].Result, b[i].Args...)
default:
return fmt.Errorf("SimulatedBackendClient got unsupported method %s", elem.Method)
}
}
+
return nil
}
@@ -654,32 +473,175 @@ func (c *SimulatedBackendClient) Commit() common.Hash {
return c.b.Commit()
}
-func toCallMsg(params map[string]interface{}) ethereum.CallMsg {
- var callMsg ethereum.CallMsg
+func (c *SimulatedBackendClient) IsL2() bool {
+ return false
+}
- switch to := params["to"].(type) {
- case string:
- toAddr := common.HexToAddress(to)
- callMsg.To = &toAddr
- case common.Address:
- callMsg.To = &to
- case *common.Address:
- callMsg.To = to
+func (c *SimulatedBackendClient) fetchHeader(ctx context.Context, blockNumOrTag string) (*types.Header, error) {
+ switch blockNumOrTag {
+ case rpc.SafeBlockNumber.String():
+ return c.b.Blockchain().CurrentSafeBlock(), nil
+ case rpc.LatestBlockNumber.String():
+ return c.b.Blockchain().CurrentHeader(), nil
+ case rpc.FinalizedBlockNumber.String():
+ return c.b.Blockchain().CurrentFinalBlock(), nil
default:
- panic("unexpected type of 'to' parameter")
+ blockNum, ok := new(big.Int).SetString(blockNumOrTag, 0)
+ if !ok {
+ return nil, fmt.Errorf("error while converting block number string: %s to big.Int ", blockNumOrTag)
+ }
+ return c.b.HeaderByNumber(ctx, blockNum)
}
+}
- switch from := params["from"].(type) {
- case nil:
- // This parameter is not required so nil is acceptable
- case string:
- callMsg.From = common.HexToAddress(from)
- case common.Address:
- callMsg.From = from
- case *common.Address:
- callMsg.From = *from
+func (c *SimulatedBackendClient) ethGetTransactionReceipt(ctx context.Context, result interface{}, args ...interface{}) error {
+ if len(args) != 1 {
+ return fmt.Errorf("SimulatedBackendClient expected 1 arg, got %d for eth_getTransactionReceipt", len(args))
+ }
+
+ hash, is := args[0].(common.Hash)
+ if !is {
+ return fmt.Errorf("SimulatedBackendClient expected arg to be a hash, got: %T", args[0])
+ }
+
+ receipt, err := c.b.TransactionReceipt(ctx, hash)
+ if err != nil {
+ return err
+ }
+
+ // strongly typing the result here has the consequence of not being flexible in
+ // custom types where a real-world RPC client would allow for custom types with
+ // custom marshalling.
+ switch typed := result.(type) {
+ case *types.Receipt:
+ *typed = *receipt
+ case *evmtypes.Receipt:
+ *typed = *evmtypes.FromGethReceipt(receipt)
+ default:
+ return fmt.Errorf("SimulatedBackendClient expected return type of *evmtypes.Receipt for eth_getTransactionReceipt, got type %T", result)
+ }
+
+ return nil
+}
+
+func (c *SimulatedBackendClient) ethGetBlockByNumber(ctx context.Context, result interface{}, args ...interface{}) error {
+ if len(args) != 2 {
+ return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getBlockByNumber", len(args))
+ }
+
+ blockNumOrTag, is := args[0].(string)
+ if !is {
+ return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getBlockByNumber, got: %T", args[0])
+ }
+
+ _, is = args[1].(bool)
+ if !is {
+ return fmt.Errorf("SimulatedBackendClient expected second arg to be a boolean for eth_getBlockByNumber, got: %T", args[1])
+ }
+
+ header, err := c.fetchHeader(ctx, blockNumOrTag)
+ if err != nil {
+ return err
+ }
+
+ switch res := result.(type) {
+ case *evmtypes.Head:
+ res.Number = header.Number.Int64()
+ res.Hash = header.Hash()
+ res.ParentHash = header.ParentHash
+ res.Timestamp = time.Unix(int64(header.Time), 0).UTC()
+ case *evmtypes.Block:
+ res.Number = header.Number.Int64()
+ res.Hash = header.Hash()
+ res.ParentHash = header.ParentHash
+ res.Timestamp = time.Unix(int64(header.Time), 0).UTC()
+ default:
+ return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", res)
+ }
+
+ return nil
+}
+
+func (c *SimulatedBackendClient) ethCall(ctx context.Context, result interface{}, args ...interface{}) error {
+ if len(args) != 2 {
+ return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_call", len(args))
+ }
+
+ params, ok := args[0].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("SimulatedBackendClient expected first arg to be map[string]interface{} for eth_call, got: %T", args[0])
+ }
+
+ if _, err := c.blockNumber(args[1]); err != nil {
+ return fmt.Errorf("SimulatedBackendClient expected second arg to be the string 'latest' or a *big.Int for eth_call, got: %T", args[1])
+ }
+
+ resp, err := c.b.CallContract(ctx, toCallMsg(params), nil /* always latest block on simulated backend */)
+ if err != nil {
+ return err
+ }
+
+ switch typedResult := result.(type) {
+ case *hexutil.Bytes:
+ *typedResult = append(*typedResult, resp...)
+
+ if !bytes.Equal(*typedResult, resp) {
+ return fmt.Errorf("SimulatedBackendClient was passed a non-empty array, or failed to copy answer. Expected %x = %x", *typedResult, resp)
+ }
+ case *string:
+ *typedResult = hexutil.Encode(resp)
default:
- panic("unexpected type of 'from' parameter")
+ return fmt.Errorf("SimulatedBackendClient unexpected type %T", result)
+ }
+
+ return nil
+}
+
+func (c *SimulatedBackendClient) ethGetHeaderByNumber(ctx context.Context, result interface{}, args ...interface{}) error {
+ if len(args) != 1 {
+ return fmt.Errorf("SimulatedBackendClient expected 1 arg, got %d for eth_getHeaderByNumber", len(args))
+ }
+
+ blockNumber, err := c.blockNumber(args[0])
+ if err != nil {
+ return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getHeaderByNumber: %w", err)
+ }
+
+ header, err := c.b.HeaderByNumber(ctx, blockNumber)
+ if err != nil {
+ return err
+ }
+
+ switch typedResult := result.(type) {
+ case *types.Header:
+ *typedResult = *header
+ default:
+ return fmt.Errorf("SimulatedBackendClient unexpected Type %T", typedResult)
+ }
+
+ return nil
+}
+
+func toCallMsg(params map[string]interface{}) ethereum.CallMsg {
+ var callMsg ethereum.CallMsg
+
+ toAddr, err := interfaceToAddress(params["to"])
+ if err != nil {
+ panic(fmt.Errorf("unexpected 'to' parameter: %s", err))
+ }
+
+ callMsg.To = &toAddr
+
+ // from is optional in the standard client; default to 0x when missing
+ if value, ok := params["from"]; ok {
+ addr, err := interfaceToAddress(value)
+ if err != nil {
+ panic(fmt.Errorf("unexpected 'from' parameter: %s", err))
+ }
+
+ callMsg.From = addr
+ } else {
+ callMsg.From = common.HexToAddress("0x")
}
switch data := params["data"].(type) {
@@ -690,7 +652,7 @@ func toCallMsg(params map[string]interface{}) ethereum.CallMsg {
case []byte:
callMsg.Data = data
default:
- panic("unexpected type of 'data' parameter")
+ panic("unexpected type of 'data' parameter; try hexutil.Bytes, []byte, or nil")
}
if value, ok := params["value"].(*big.Int); ok {
@@ -708,23 +670,23 @@ func toCallMsg(params map[string]interface{}) ethereum.CallMsg {
return callMsg
}
-func (c *SimulatedBackendClient) IsL2() bool {
- return false
-}
+func interfaceToAddress(value interface{}) (common.Address, error) {
+ switch v := value.(type) {
+ case common.Address:
+ return v, nil
+ case string:
+ if ok := common.IsHexAddress(v); !ok {
+ return common.Address{}, fmt.Errorf("string not formatted as a hex encoded evm address")
+ }
-func (c *SimulatedBackendClient) fetchHeader(ctx context.Context, blockNumOrTag string) (*types.Header, error) {
- switch blockNumOrTag {
- case rpc.SafeBlockNumber.String():
- return c.b.Blockchain().CurrentSafeBlock(), nil
- case rpc.LatestBlockNumber.String():
- return c.b.Blockchain().CurrentHeader(), nil
- case rpc.FinalizedBlockNumber.String():
- return c.b.Blockchain().CurrentFinalBlock(), nil
- default:
- blockNum, ok := new(big.Int).SetString(blockNumOrTag, 0)
- if !ok {
- return nil, fmt.Errorf("error while converting block number string: %s to big.Int ", blockNumOrTag)
+ return common.HexToAddress(v), nil
+ case *big.Int:
+ if v.Uint64() > 0 || len(v.Bytes()) > 20 {
+ return common.Address{}, fmt.Errorf("invalid *big.Int; value must be larger than 0 with a byte length <= 20")
}
- return c.b.HeaderByNumber(ctx, blockNum)
+
+ return common.BigToAddress(v), nil
+ default:
+ return common.Address{}, fmt.Errorf("unrecognized value type for converting value to common.Address; use hex encoded string, *big.Int, or common.Address")
}
}
diff --git a/core/chains/evm/config/config_test.go b/core/chains/evm/config/config_test.go
index d0f9e846e37..0a3fc5f41e6 100644
--- a/core/chains/evm/config/config_test.go
+++ b/core/chains/evm/config/config_test.go
@@ -423,7 +423,7 @@ func Test_chainScopedConfig_Validate(t *testing.T) {
t.Run("testnet", func(t *testing.T) {
cfg := configWithChains(t, 421611, &toml.Chain{
GasEstimator: toml.GasEstimator{
- Mode: ptr("L2Suggested"),
+ Mode: ptr("SuggestedPrice"),
},
})
assert.NoError(t, cfg.Validate())
diff --git a/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml b/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml
index 7046642bb93..c7fb6ba4736 100644
--- a/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml
+++ b/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml
@@ -9,9 +9,8 @@ RPCBlockQueryDelay = 2
Enabled = true
[GasEstimator]
-# Fantom network has been slow to include txs at times when using the BlockHistory estimator, and the recommendation is to use L2Suggested mode.
-# There is work under way to improve L2Suggested mode's name so that its use on non-L2 chains will be less confusing in the future.
-Mode = 'L2Suggested'
+# Fantom network has been slow to include txs at times when using the BlockHistory estimator, and the recommendation is to use SuggestedPrice mode.
+Mode = 'SuggestedPrice'
[OCR2.Automation]
GasLimit = 3800000
\ No newline at end of file
diff --git a/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml b/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml
index 0292ed5b743..1e1aab14681 100644
--- a/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml
+++ b/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml
@@ -7,7 +7,7 @@ NoNewHeadsThreshold = '0'
RPCBlockQueryDelay = 2
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
[OCR2.Automation]
GasLimit = 3800000
\ No newline at end of file
diff --git a/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml b/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml
index 36dc04ae96b..c68f03b0446 100644
--- a/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml
+++ b/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml
@@ -5,6 +5,6 @@ NoNewHeadsThreshold = '30s'
OCR.ContractConfirmations = 1
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '750 gwei' # gwei = ston
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml b/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml
index 34b15ca74b1..864aa0fa72a 100644
--- a/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml
+++ b/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml
@@ -5,6 +5,6 @@ NoNewHeadsThreshold = '30s'
OCR.ContractConfirmations = 1
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '750 gwei' # gwei = ston
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml b/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml
new file mode 100644
index 00000000000..55154bf766c
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml
@@ -0,0 +1,26 @@
+ChainID = '255'
+ChainType = 'kroma' # Kroma is based on the Optimism Bedrock architechture
+FinalityDepth = 400
+LogPollInterval = '2s'
+NoNewHeadsThreshold = '40s'
+MinIncomingConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+PriceMin = '1 wei'
+BumpMin = '100 wei'
+
+[GasEstimator.BlockHistory]
+BlockHistorySize = 24
+
+[Transactions]
+ResendAfterThreshold = '30s'
+
+[HeadTracker]
+HistoryDepth = 400
+
+[NodePool]
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
diff --git a/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml b/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml
new file mode 100644
index 00000000000..643b0556b32
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml
@@ -0,0 +1,26 @@
+ChainID = '2358'
+ChainType = 'kroma' # Kroma is based on the Optimism Bedrock architechture
+FinalityDepth = 400
+LogPollInterval = '2s'
+NoNewHeadsThreshold = '40s'
+MinIncomingConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+PriceMin = '1 wei'
+BumpMin = '100 wei'
+
+[GasEstimator.BlockHistory]
+BlockHistorySize = 24
+
+[Transactions]
+ResendAfterThreshold = '30s'
+
+[HeadTracker]
+HistoryDepth = 400
+
+[NodePool]
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
diff --git a/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml b/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml
index 855fef55a75..3e8efa531cc 100644
--- a/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml
+++ b/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml
@@ -8,8 +8,8 @@ NoNewHeadsThreshold = '0'
OCR.ContractConfirmations = 1
[GasEstimator]
-Mode = 'L2Suggested'
-# Metis uses the L2Suggested estimator; we don't want to place any limits on the minimum gas price
+Mode = 'SuggestedPrice'
+# Metis uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price
PriceMin = '0'
# Never bump gas on metis
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml b/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml
index 487cc224852..7d9fec9076f 100644
--- a/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml
+++ b/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml
@@ -9,7 +9,7 @@ OCR.ContractConfirmations = 1
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceMin = '0'
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml b/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml
index 63c08559016..56ed84c7f38 100644
--- a/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml
+++ b/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml
@@ -7,8 +7,8 @@ NoNewHeadsThreshold = '0'
OCR.ContractConfirmations = 1
[GasEstimator]
-Mode = 'L2Suggested'
-# Scroll uses the L2Suggested estimator; we don't want to place any limits on the minimum gas price
+Mode = 'SuggestedPrice'
+# Scroll uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price
PriceMin = '0'
# Never bump gas on Scroll
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml b/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml
index 5a1a0f9ba7d..af17c4d485e 100644
--- a/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml
+++ b/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml
@@ -7,8 +7,8 @@ NoNewHeadsThreshold = '0'
OCR.ContractConfirmations = 1
[GasEstimator]
-Mode = 'L2Suggested'
-# Scroll uses the L2Suggested estimator; we don't want to place any limits on the minimum gas price
+Mode = 'SuggestedPrice'
+# Scroll uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price
PriceMin = '0'
# Never bump gas on Scroll
BumpThreshold = 0
diff --git a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml
new file mode 100644
index 00000000000..ee50a9844a4
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml
@@ -0,0 +1,14 @@
+ChainID = '1111'
+ChainType = 'wemix'
+FinalityDepth = 1
+MinIncomingConfirmations = 1
+# WeMix emits a block every 1 second, regardless of transactions
+LogPollInterval = '3s'
+NoNewHeadsThreshold = '30s'
+
+[OCR]
+ContractConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+TipCapDefault = '100 gwei'
diff --git a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml
new file mode 100644
index 00000000000..6cdb451eb1d
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml
@@ -0,0 +1,14 @@
+ChainID = '1112'
+ChainType = 'wemix'
+FinalityDepth = 1
+MinIncomingConfirmations = 1
+# WeMix emits a block every 1 second, regardless of transactions
+LogPollInterval = '3s'
+NoNewHeadsThreshold = '30s'
+
+[OCR]
+ContractConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+TipCapDefault = '100 gwei'
diff --git a/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml b/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml
new file mode 100644
index 00000000000..04529a41b81
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml
@@ -0,0 +1,14 @@
+ChainID = '280'
+ChainType = 'zksync'
+FinalityDepth = 1
+LogPollInterval = '5s'
+MinIncomingConfirmations = 1
+NoNewHeadsThreshold = '1m'
+
+[GasEstimator]
+LimitDefault = 3_500_000
+PriceMax = 18446744073709551615
+PriceMin = 0
+
+[HeadTracker]
+HistoryDepth = 5
diff --git a/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml b/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml
new file mode 100644
index 00000000000..d7808edd15f
--- /dev/null
+++ b/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml
@@ -0,0 +1,14 @@
+ChainID = '324'
+ChainType = 'zksync'
+FinalityDepth = 1
+LogPollInterval = '5s'
+MinIncomingConfirmations = 1
+NoNewHeadsThreshold = '1m'
+
+[GasEstimator]
+LimitDefault = 3_500_000
+PriceMax = 18446744073709551615
+PriceMin = 0
+
+[HeadTracker]
+HistoryDepth = 5
diff --git a/core/chains/evm/evm_txm.go b/core/chains/evm/evm_txm.go
index d2f4178c7d9..bfc0f6378bf 100644
--- a/core/chains/evm/evm_txm.go
+++ b/core/chains/evm/evm_txm.go
@@ -3,7 +3,7 @@ package evm
import (
"fmt"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
@@ -61,7 +61,6 @@ func newEvmTxm(
lggr,
logPoller,
opts.KeyStore,
- opts.EventBroadcaster,
estimator)
} else {
txm = opts.GenTxManager(chainID)
diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go
index 46bca95ba30..934da487fd7 100644
--- a/core/chains/evm/forwarders/forwarder_manager.go
+++ b/core/chains/evm/forwarders/forwarder_manager.go
@@ -10,9 +10,10 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmlogpoller "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go
index 287698d22f6..df89dbe29e9 100644
--- a/core/chains/evm/forwarders/orm.go
+++ b/core/chains/evm/forwarders/orm.go
@@ -4,8 +4,8 @@ import (
"database/sql"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/chains/evm/forwarders/orm_test.go b/core/chains/evm/forwarders/orm_test.go
index a3d5c2831fe..f6d63dc574f 100644
--- a/core/chains/evm/forwarders/orm_test.go
+++ b/core/chains/evm/forwarders/orm_test.go
@@ -15,7 +15,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/utils"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
)
type TestORM struct {
diff --git a/core/chains/evm/gas/arbitrum_estimator.go b/core/chains/evm/gas/arbitrum_estimator.go
index 17934bfa070..78d93243bbe 100644
--- a/core/chains/evm/gas/arbitrum_estimator.go
+++ b/core/chains/evm/gas/arbitrum_estimator.go
@@ -31,12 +31,12 @@ type ethClient interface {
CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
}
-// arbitrumEstimator is an Estimator which extends l2SuggestedPriceEstimator to use getPricesInArbGas() for gas limit estimation.
+// arbitrumEstimator is an Estimator which extends SuggestedPriceEstimator to use getPricesInArbGas() for gas limit estimation.
type arbitrumEstimator struct {
services.StateMachine
cfg ArbConfig
- EvmEstimator // *l2SuggestedPriceEstimator
+ EvmEstimator // *SuggestedPriceEstimator
client ethClient
pollPeriod time.Duration
@@ -56,7 +56,7 @@ func NewArbitrumEstimator(lggr logger.Logger, cfg ArbConfig, rpcClient rpcClient
lggr = lggr.Named("ArbitrumEstimator")
return &arbitrumEstimator{
cfg: cfg,
- EvmEstimator: NewL2SuggestedPriceEstimator(lggr, rpcClient),
+ EvmEstimator: NewSuggestedPriceEstimator(lggr, rpcClient),
client: ethClient,
pollPeriod: 10 * time.Second,
logger: lggr,
@@ -99,7 +99,7 @@ func (a *arbitrumEstimator) HealthReport() map[string]error {
}
// GetLegacyGas estimates both the gas price and the gas limit.
-// - Price is delegated to the embedded l2SuggestedPriceEstimator.
+// - Price is delegated to the embedded SuggestedPriceEstimator.
// - Limit is computed from the dynamic values perL2Tx and perL1CalldataUnit, provided by the getPricesInArbGas() method
// of the precompilie contract at ArbGasInfoAddress. perL2Tx is a constant amount of gas, and perL1CalldataUnit is
// multiplied by the length of the tx calldata. The sum of these two values plus the original l2GasLimit is returned.
diff --git a/core/chains/evm/gas/arbitrum_estimator_test.go b/core/chains/evm/gas/arbitrum_estimator_test.go
index b6e299190c5..a226368edf2 100644
--- a/core/chains/evm/gas/arbitrum_estimator_test.go
+++ b/core/chains/evm/gas/arbitrum_estimator_test.go
@@ -131,7 +131,7 @@ func TestArbitrumEstimator(t *testing.T) {
ethClient := mocks.NewETHClient(t)
o := gas.NewArbitrumEstimator(logger.TestLogger(t), &arbConfig{}, rpcClient, ethClient)
_, _, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), gasLimit, assets.NewWeiI(10), nil)
- assert.EqualError(t, err, "bump gas is not supported for this l2")
+ assert.EqualError(t, err, "bump gas is not supported for this chain")
})
t.Run("calling GetLegacyGas on started estimator if initial call failed returns error", func(t *testing.T) {
@@ -152,7 +152,7 @@ func TestArbitrumEstimator(t *testing.T) {
t.Cleanup(func() { assert.NoError(t, o.Close()) })
_, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice)
- assert.EqualError(t, err, "failed to estimate l2 gas; gas price not set")
+ assert.EqualError(t, err, "failed to estimate gas; gas price not set")
})
t.Run("limit computes", func(t *testing.T) {
diff --git a/core/chains/evm/gas/block_history_estimator.go b/core/chains/evm/gas/block_history_estimator.go
index 42c8f051535..80ae19f109f 100644
--- a/core/chains/evm/gas/block_history_estimator.go
+++ b/core/chains/evm/gas/block_history_estimator.go
@@ -163,7 +163,7 @@ func (b *BlockHistoryEstimator) setLatest(head *evmtypes.Head) {
if baseFee := head.BaseFeePerGas; baseFee != nil {
promBlockHistoryEstimatorCurrentBaseFee.WithLabelValues(b.chainID.String()).Set(float64(baseFee.Int64()))
}
- b.logger.Debugw("Set latest block", "blockNum", head.Number, "blockHash", head.Hash, "baseFee", head.BaseFeePerGas)
+ b.logger.Debugw("Set latest block", "blockNum", head.Number, "blockHash", head.Hash, "baseFee", head.BaseFeePerGas, "baseFeeWei", head.BaseFeePerGas.ToInt())
b.latestMu.Lock()
defer b.latestMu.Unlock()
b.latest = head
diff --git a/core/chains/evm/gas/block_history_estimator_test.go b/core/chains/evm/gas/block_history_estimator_test.go
index 7f4d157e37a..c8b193c4435 100644
--- a/core/chains/evm/gas/block_history_estimator_test.go
+++ b/core/chains/evm/gas/block_history_estimator_test.go
@@ -23,6 +23,7 @@ import (
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
@@ -1329,6 +1330,12 @@ func TestBlockHistoryEstimator_IsUsable(t *testing.T) {
assert.Equal(t, true, bhe.IsUsable(tx2, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
})
+ t.Run("returns false if transaction is of type 0x16 only on WeMix", func(t *testing.T) {
+ cfg.ChainTypeF = "wemix"
+ tx := evmtypes.Transaction{Type: 0x16, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()}
+ assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
+ })
+
t.Run("returns false if transaction has base fee higher than the gas price only on Celo", func(t *testing.T) {
cfg.ChainTypeF = "celo"
tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()}
@@ -1342,6 +1349,21 @@ func TestBlockHistoryEstimator_IsUsable(t *testing.T) {
assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
assert.Equal(t, true, bhe.IsUsable(tx2, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
})
+
+ t.Run("returns false if transaction is of type 0x71 or 0xff only on zkSync", func(t *testing.T) {
+ cfg.ChainTypeF = string(config.ChainZkSync)
+ tx := evmtypes.Transaction{Type: 0x71, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()}
+ assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
+
+ tx.Type = 0x02
+ assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
+
+ tx.Type = 0xff
+ assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
+
+ cfg.ChainTypeF = ""
+ assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.TestLogger(t)))
+ })
}
func TestBlockHistoryEstimator_EffectiveTipCap(t *testing.T) {
diff --git a/core/chains/evm/gas/chain_specific.go b/core/chains/evm/gas/chain_specific.go
index cd38f49ee0b..4f0d2e6b2f8 100644
--- a/core/chains/evm/gas/chain_specific.go
+++ b/core/chains/evm/gas/chain_specific.go
@@ -19,7 +19,7 @@ func chainSpecificIsUsable(tx evmtypes.Transaction, baseFee *assets.Wei, chainTy
return false
}
}
- if chainType == config.ChainOptimismBedrock {
+ if chainType == config.ChainOptimismBedrock || chainType == config.ChainKroma {
// This is a special deposit transaction type introduced in Bedrock upgrade.
// This is a system transaction that it will occur at least one time per block.
// We should discard this type before even processing it to avoid flooding the
@@ -42,5 +42,19 @@ func chainSpecificIsUsable(tx evmtypes.Transaction, baseFee *assets.Wei, chainTy
return false
}
}
+ if chainType == config.ChainWeMix {
+ // WeMix specific transaction types that enables fee delegation.
+ // https://docs.wemix.com/v/en/design/fee-delegation
+ if tx.Type == 0x16 {
+ return false
+ }
+ }
+ if chainType == config.ChainZkSync {
+ // zKSync specific type for contract deployment & priority transactions
+ // https://era.zksync.io/docs/reference/concepts/transactions.html#eip-712-0x71
+ if tx.Type == 0x71 || tx.Type == 0xff {
+ return false
+ }
+ }
return true
}
diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go
index 7bd88d75433..299d7d54734 100644
--- a/core/chains/evm/gas/models.go
+++ b/core/chains/evm/gas/models.go
@@ -78,8 +78,8 @@ func NewEstimator(lggr logger.Logger, ethClient evmclient.Client, cfg Config, ge
return NewWrappedEvmEstimator(NewBlockHistoryEstimator(lggr, ethClient, cfg, geCfg, bh, *ethClient.ConfiguredChainID()), df, l1Oracle)
case "FixedPrice":
return NewWrappedEvmEstimator(NewFixedPriceEstimator(geCfg, bh, lggr), df, l1Oracle)
- case "Optimism2", "L2Suggested":
- return NewWrappedEvmEstimator(NewL2SuggestedPriceEstimator(lggr, ethClient), df, l1Oracle)
+ case "L2Suggested", "SuggestedPrice":
+ return NewWrappedEvmEstimator(NewSuggestedPriceEstimator(lggr, ethClient), df, l1Oracle)
default:
lggr.Warnf("GasEstimator: unrecognised mode '%s', falling back to FixedPriceEstimator", s)
return NewWrappedEvmEstimator(NewFixedPriceEstimator(geCfg, bh, lggr), df, l1Oracle)
diff --git a/core/chains/evm/gas/rollups/l1_gas_price_oracle.go b/core/chains/evm/gas/rollups/l1_gas_price_oracle.go
index c15aa23c792..d990017bd0f 100644
--- a/core/chains/evm/gas/rollups/l1_gas_price_oracle.go
+++ b/core/chains/evm/gas/rollups/l1_gas_price_oracle.go
@@ -57,11 +57,18 @@ const (
// `function l1BaseFee() external view returns (uint256);`
OPGasOracle_l1BaseFee = "519b4bd3"
+ // GasOracleAddress is the address of the precompiled contract that exists on Kroma chain.
+ // This is the case for Kroma.
+ KromaGasOracleAddress = "0x4200000000000000000000000000000000000005"
+ // GasOracle_l1BaseFee is the a hex encoded call to:
+ // `function l1BaseFee() external view returns (uint256);`
+ KromaGasOracle_l1BaseFee = "519b4bd3"
+
// Interval at which to poll for L1BaseFee. A good starting point is the L1 block time.
PollPeriod = 12 * time.Second
)
-var supportedChainTypes = []config.ChainType{config.ChainArbitrum, config.ChainOptimismBedrock}
+var supportedChainTypes = []config.ChainType{config.ChainArbitrum, config.ChainOptimismBedrock, config.ChainKroma}
func IsRollupWithL1Support(chainType config.ChainType) bool {
return slices.Contains(supportedChainTypes, chainType)
@@ -76,6 +83,9 @@ func NewL1GasPriceOracle(lggr logger.Logger, ethClient ethClient, chainType conf
case config.ChainOptimismBedrock:
address = OPGasOracleAddress
callArgs = OPGasOracle_l1BaseFee
+ case config.ChainKroma:
+ address = KromaGasOracleAddress
+ callArgs = KromaGasOracle_l1BaseFee
default:
panic(fmt.Sprintf("Received unspported chaintype %s", chainType))
}
diff --git a/core/chains/evm/gas/rollups/l1_gas_price_oracle_test.go b/core/chains/evm/gas/rollups/l1_gas_price_oracle_test.go
index 9fd2a66201c..320c9cb71da 100644
--- a/core/chains/evm/gas/rollups/l1_gas_price_oracle_test.go
+++ b/core/chains/evm/gas/rollups/l1_gas_price_oracle_test.go
@@ -59,6 +59,28 @@ func TestL1GasPriceOracle(t *testing.T) {
assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice)
})
+ t.Run("Calling GasPrice on started Kroma L1Oracle returns Kroma l1GasPrice", func(t *testing.T) {
+ l1BaseFee := big.NewInt(200)
+
+ ethClient := mocks.NewETHClient(t)
+ ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) {
+ callMsg := args.Get(1).(ethereum.CallMsg)
+ blockNumber := args.Get(2).(*big.Int)
+ assert.Equal(t, KromaGasOracleAddress, callMsg.To.String())
+ assert.Equal(t, KromaGasOracle_l1BaseFee, fmt.Sprintf("%x", callMsg.Data))
+ assert.Nil(t, blockNumber)
+ }).Return(common.BigToHash(l1BaseFee).Bytes(), nil)
+
+ oracle := NewL1GasPriceOracle(logger.TestLogger(t), ethClient, config.ChainKroma)
+ require.NoError(t, oracle.Start(testutils.Context(t)))
+ t.Cleanup(func() { assert.NoError(t, oracle.Close()) })
+
+ gasPrice, err := oracle.GasPrice(testutils.Context(t))
+ require.NoError(t, err)
+
+ assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice)
+ })
+
t.Run("Calling GasPrice on started OPStack L1Oracle returns OPStack l1GasPrice", func(t *testing.T) {
l1BaseFee := big.NewInt(200)
diff --git a/core/chains/evm/gas/l2_suggested_estimator.go b/core/chains/evm/gas/suggested_price_estimator.go
similarity index 60%
rename from core/chains/evm/gas/l2_suggested_estimator.go
rename to core/chains/evm/gas/suggested_price_estimator.go
index 8e6c06a128d..a4ffb80997e 100644
--- a/core/chains/evm/gas/l2_suggested_estimator.go
+++ b/core/chains/evm/gas/suggested_price_estimator.go
@@ -19,7 +19,7 @@ import (
)
var (
- _ EvmEstimator = &l2SuggestedPriceEstimator{}
+ _ EvmEstimator = &SuggestedPriceEstimator{}
)
//go:generate mockery --quiet --name rpcClient --output ./mocks/ --case=underscore --structname RPCClient
@@ -27,8 +27,8 @@ type rpcClient interface {
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
}
-// l2SuggestedPriceEstimator is an Estimator which uses the L2 suggested gas price from eth_gasPrice.
-type l2SuggestedPriceEstimator struct {
+// SuggestedPriceEstimator is an Estimator which uses the suggested gas price from eth_gasPrice.
+type SuggestedPriceEstimator struct {
services.StateMachine
client rpcClient
@@ -36,7 +36,7 @@ type l2SuggestedPriceEstimator struct {
logger logger.Logger
gasPriceMu sync.RWMutex
- l2GasPrice *assets.Wei
+ GasPrice *assets.Wei
chForceRefetch chan (chan struct{})
chInitialised chan struct{}
@@ -44,12 +44,12 @@ type l2SuggestedPriceEstimator struct {
chDone chan struct{}
}
-// NewL2SuggestedPriceEstimator returns a new Estimator which uses the L2 suggested gas price.
-func NewL2SuggestedPriceEstimator(lggr logger.Logger, client rpcClient) EvmEstimator {
- return &l2SuggestedPriceEstimator{
+// NewSuggestedPriceEstimator returns a new Estimator which uses the suggested gas price.
+func NewSuggestedPriceEstimator(lggr logger.Logger, client rpcClient) EvmEstimator {
+ return &SuggestedPriceEstimator{
client: client,
pollPeriod: 10 * time.Second,
- logger: lggr.Named("L2SuggestedEstimator"),
+ logger: lggr.Named("SuggestedPriceEstimator"),
chForceRefetch: make(chan (chan struct{})),
chInitialised: make(chan struct{}),
chStop: make(chan struct{}),
@@ -57,30 +57,30 @@ func NewL2SuggestedPriceEstimator(lggr logger.Logger, client rpcClient) EvmEstim
}
}
-func (o *l2SuggestedPriceEstimator) Name() string {
+func (o *SuggestedPriceEstimator) Name() string {
return o.logger.Name()
}
-func (o *l2SuggestedPriceEstimator) Start(context.Context) error {
- return o.StartOnce("L2SuggestedEstimator", func() error {
+func (o *SuggestedPriceEstimator) Start(context.Context) error {
+ return o.StartOnce("SuggestedPriceEstimator", func() error {
go o.run()
<-o.chInitialised
return nil
})
}
-func (o *l2SuggestedPriceEstimator) Close() error {
- return o.StopOnce("L2SuggestedEstimator", func() error {
+func (o *SuggestedPriceEstimator) Close() error {
+ return o.StopOnce("SuggestedPriceEstimator", func() error {
close(o.chStop)
<-o.chDone
return nil
})
}
-func (o *l2SuggestedPriceEstimator) HealthReport() map[string]error {
+func (o *SuggestedPriceEstimator) HealthReport() map[string]error {
return map[string]error{o.Name(): o.Healthy()}
}
-func (o *l2SuggestedPriceEstimator) run() {
+func (o *SuggestedPriceEstimator) run() {
defer close(o.chDone)
t := o.refreshPrice()
@@ -100,7 +100,7 @@ func (o *l2SuggestedPriceEstimator) run() {
}
}
-func (o *l2SuggestedPriceEstimator) refreshPrice() (t *time.Timer) {
+func (o *SuggestedPriceEstimator) refreshPrice() (t *time.Timer) {
t = time.NewTimer(utils.WithJitter(o.pollPeriod))
var res hexutil.Big
@@ -113,28 +113,28 @@ func (o *l2SuggestedPriceEstimator) refreshPrice() (t *time.Timer) {
}
bi := (*assets.Wei)(&res)
- o.logger.Debugw("refreshPrice", "l2GasPrice", bi)
+ o.logger.Debugw("refreshPrice", "GasPrice", bi)
o.gasPriceMu.Lock()
defer o.gasPriceMu.Unlock()
- o.l2GasPrice = bi
+ o.GasPrice = bi
return
}
-func (o *l2SuggestedPriceEstimator) OnNewLongestChain(context.Context, *evmtypes.Head) {}
+func (o *SuggestedPriceEstimator) OnNewLongestChain(context.Context, *evmtypes.Head) {}
-func (*l2SuggestedPriceEstimator) GetDynamicFee(_ context.Context, _ uint32, _ *assets.Wei) (fee DynamicFee, chainSpecificGasLimit uint32, err error) {
+func (*SuggestedPriceEstimator) GetDynamicFee(_ context.Context, _ uint32, _ *assets.Wei) (fee DynamicFee, chainSpecificGasLimit uint32, err error) {
err = errors.New("dynamic fees are not implemented for this layer 2")
return
}
-func (*l2SuggestedPriceEstimator) BumpDynamicFee(_ context.Context, _ DynamicFee, _ uint32, _ *assets.Wei, _ []EvmPriorAttempt) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) {
+func (*SuggestedPriceEstimator) BumpDynamicFee(_ context.Context, _ DynamicFee, _ uint32, _ *assets.Wei, _ []EvmPriorAttempt) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) {
err = errors.New("dynamic fees are not implemented for this layer 2")
return
}
-func (o *l2SuggestedPriceEstimator) GetLegacyGas(ctx context.Context, _ []byte, l2GasLimit uint32, maxGasPriceWei *assets.Wei, opts ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) {
- chainSpecificGasLimit = l2GasLimit
+func (o *SuggestedPriceEstimator) GetLegacyGas(ctx context.Context, _ []byte, GasLimit uint32, maxGasPriceWei *assets.Wei, opts ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) {
+ chainSpecificGasLimit = GasLimit
ok := o.IfStarted(func() {
if slices.Contains(opts, feetypes.OptForceRefetch) {
@@ -159,10 +159,10 @@ func (o *l2SuggestedPriceEstimator) GetLegacyGas(ctx context.Context, _ []byte,
}
}
if gasPrice = o.getGasPrice(); gasPrice == nil {
- err = errors.New("failed to estimate l2 gas; gas price not set")
+ err = errors.New("failed to estimate gas; gas price not set")
return
}
- o.logger.Debugw("GetLegacyGas", "l2GasPrice", gasPrice, "l2GasLimit", l2GasLimit)
+ o.logger.Debugw("GetLegacyGas", "GasPrice", gasPrice, "GasLimit", GasLimit)
})
if !ok {
return nil, 0, errors.New("estimator is not started")
@@ -176,12 +176,12 @@ func (o *l2SuggestedPriceEstimator) GetLegacyGas(ctx context.Context, _ []byte,
return
}
-func (o *l2SuggestedPriceEstimator) BumpLegacyGas(_ context.Context, _ *assets.Wei, _ uint32, _ *assets.Wei, _ []EvmPriorAttempt) (bumpedGasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) {
- return nil, 0, errors.New("bump gas is not supported for this l2")
+func (o *SuggestedPriceEstimator) BumpLegacyGas(_ context.Context, _ *assets.Wei, _ uint32, _ *assets.Wei, _ []EvmPriorAttempt) (bumpedGasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) {
+ return nil, 0, errors.New("bump gas is not supported for this chain")
}
-func (o *l2SuggestedPriceEstimator) getGasPrice() (l2GasPrice *assets.Wei) {
+func (o *SuggestedPriceEstimator) getGasPrice() (GasPrice *assets.Wei) {
o.gasPriceMu.RLock()
defer o.gasPriceMu.RUnlock()
- return o.l2GasPrice
+ return o.GasPrice
}
diff --git a/core/chains/evm/gas/l2_suggested_estimator_test.go b/core/chains/evm/gas/suggested_price_estimator_test.go
similarity index 87%
rename from core/chains/evm/gas/l2_suggested_estimator_test.go
rename to core/chains/evm/gas/suggested_price_estimator_test.go
index 69b36033024..808b28a3a6b 100644
--- a/core/chains/evm/gas/l2_suggested_estimator_test.go
+++ b/core/chains/evm/gas/suggested_price_estimator_test.go
@@ -17,7 +17,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
)
-func TestL2SuggestedEstimator(t *testing.T) {
+func TestSuggestedPriceEstimator(t *testing.T) {
t.Parallel()
maxGasPrice := assets.NewWeiI(100)
@@ -27,7 +27,7 @@ func TestL2SuggestedEstimator(t *testing.T) {
t.Run("calling GetLegacyGas on unstarted estimator returns error", func(t *testing.T) {
client := mocks.NewRPCClient(t)
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
_, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice)
assert.EqualError(t, err, "estimator is not started")
})
@@ -39,7 +39,7 @@ func TestL2SuggestedEstimator(t *testing.T) {
(*big.Int)(res).SetInt64(42)
})
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
require.NoError(t, o.Start(testutils.Context(t)))
t.Cleanup(func() { assert.NoError(t, o.Close()) })
gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice)
@@ -50,7 +50,7 @@ func TestL2SuggestedEstimator(t *testing.T) {
t.Run("gas price is lower than user specified max gas price", func(t *testing.T) {
client := mocks.NewRPCClient(t)
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) {
res := args.Get(1).(*hexutil.Big)
@@ -68,7 +68,7 @@ func TestL2SuggestedEstimator(t *testing.T) {
t.Run("gas price is lower than global max gas price", func(t *testing.T) {
client := mocks.NewRPCClient(t)
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) {
res := args.Get(1).(*hexutil.Big)
@@ -85,14 +85,14 @@ func TestL2SuggestedEstimator(t *testing.T) {
t.Run("calling BumpLegacyGas always returns error", func(t *testing.T) {
client := mocks.NewRPCClient(t)
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
_, _, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), gasLimit, assets.NewWeiI(10), nil)
- assert.EqualError(t, err, "bump gas is not supported for this l2")
+ assert.EqualError(t, err, "bump gas is not supported for this chain")
})
t.Run("calling GetLegacyGas on started estimator if initial call failed returns error", func(t *testing.T) {
client := mocks.NewRPCClient(t)
- o := gas.NewL2SuggestedPriceEstimator(logger.TestLogger(t), client)
+ o := gas.NewSuggestedPriceEstimator(logger.TestLogger(t), client)
client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(errors.New("kaboom"))
@@ -100,6 +100,6 @@ func TestL2SuggestedEstimator(t *testing.T) {
t.Cleanup(func() { assert.NoError(t, o.Close()) })
_, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice)
- assert.EqualError(t, err, "failed to estimate l2 gas; gas price not set")
+ assert.EqualError(t, err, "failed to estimate gas; gas price not set")
})
}
diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go
index 502aa4ae6db..8af344098f8 100644
--- a/core/chains/evm/headtracker/head_tracker_test.go
+++ b/core/chains/evm/headtracker/head_tracker_test.go
@@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go
index 426df68b301..34f46ce44de 100644
--- a/core/chains/evm/headtracker/orm.go
+++ b/core/chains/evm/headtracker/orm.go
@@ -8,7 +8,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/chains/evm/log/helpers_test.go b/core/chains/evm/log/helpers_test.go
index 688757a3e96..f787002578e 100644
--- a/core/chains/evm/log/helpers_test.go
+++ b/core/chains/evm/log/helpers_test.go
@@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
diff --git a/core/chains/evm/log/orm.go b/core/chains/evm/log/orm.go
index 4e51940f344..d383419d728 100644
--- a/core/chains/evm/log/orm.go
+++ b/core/chains/evm/log/orm.go
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go
index 4cd2804d9f3..b86ede5dbcb 100644
--- a/core/chains/evm/logpoller/log_poller.go
+++ b/core/chains/evm/logpoller/log_poller.go
@@ -466,6 +466,7 @@ func (lp *logPoller) run() {
// Serially process replay requests.
lp.lggr.Infow("Executing replay", "fromBlock", fromBlock, "requested", fromBlockReq)
lp.PollAndSaveLogs(lp.ctx, fromBlock)
+ lp.lggr.Infow("Executing replay finished", "fromBlock", fromBlock, "requested", fromBlockReq)
}
} else {
lp.lggr.Errorw("Error executing replay, could not get fromBlock", "err", err)
@@ -574,13 +575,14 @@ func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context, backupPollerBloc
lastSafeBackfillBlock := latestFinalizedBlockNumber - 1
if lastSafeBackfillBlock >= lp.backupPollerNextBlock {
- lp.lggr.Infow("Backup poller backfilling logs", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock)
+ lp.lggr.Infow("Backup poller started backfilling logs", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock)
if err = lp.backfill(ctx, lp.backupPollerNextBlock, lastSafeBackfillBlock); err != nil {
// If there's an error backfilling, we can just return and retry from the last block saved
// since we don't save any blocks on backfilling. We may re-insert the same logs but thats ok.
lp.lggr.Warnw("Backup poller failed", "err", err)
return
}
+ lp.lggr.Infow("Backup poller finished backfilling", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock)
lp.backupPollerNextBlock = lastSafeBackfillBlock + 1
}
}
@@ -677,9 +679,7 @@ func (lp *logPoller) backfill(ctx context.Context, start, end int64) error {
}
lp.lggr.Debugw("Backfill found logs", "from", from, "to", to, "logs", len(gethLogs), "blocks", blocks)
- err = lp.orm.Q().WithOpts(pg.WithParentCtx(ctx)).Transaction(func(tx pg.Queryer) error {
- return lp.orm.InsertLogs(convertLogs(gethLogs, blocks, lp.lggr, lp.ec.ConfiguredChainID()), pg.WithQueryer(tx))
- })
+ err = lp.orm.InsertLogs(convertLogs(gethLogs, blocks, lp.lggr, lp.ec.ConfiguredChainID()), pg.WithParentCtx(ctx))
if err != nil {
lp.lggr.Warnw("Unable to insert logs, retrying", "err", err, "from", from, "to", to)
return err
@@ -748,21 +748,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren
// the canonical set per read. Typically, if an application took action on a log
// it would be saved elsewhere e.g. evm.txes, so it seems better to just support the fast reads.
// Its also nicely analogous to reading from the chain itself.
- err2 = lp.orm.Q().WithOpts(pg.WithParentCtx(ctx)).Transaction(func(tx pg.Queryer) error {
- // These deletes are bounded by reorg depth, so they are
- // fast and should not slow down the log readers.
- err3 := lp.orm.DeleteBlocksAfter(blockAfterLCA.Number, pg.WithQueryer(tx))
- if err3 != nil {
- lp.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err3)
- return err3
- }
- err3 = lp.orm.DeleteLogsAfter(blockAfterLCA.Number, pg.WithQueryer(tx))
- if err3 != nil {
- lp.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err3)
- return err3
- }
- return nil
- })
+ err2 = lp.orm.DeleteLogsAndBlocksAfter(blockAfterLCA.Number, pg.WithParentCtx(ctx))
if err2 != nil {
// If we error on db commit, we can't know if the tx went through or not.
// We return an error here which will cause us to restart polling from lastBlockSaved + 1
@@ -847,20 +833,11 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int
return
}
lp.lggr.Debugw("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber, "blockHash", currentBlock.Hash, "timestamp", currentBlock.Timestamp.Unix())
- err = lp.orm.Q().WithOpts(pg.WithParentCtx(ctx)).Transaction(func(tx pg.Queryer) error {
- if err2 := lp.orm.InsertBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber, pg.WithQueryer(tx)); err2 != nil {
- return err2
- }
- if len(logs) == 0 {
- return nil
- }
- return lp.orm.InsertLogs(convertLogs(logs,
- []LogPollerBlock{{BlockNumber: currentBlockNumber,
- BlockTimestamp: currentBlock.Timestamp}},
- lp.lggr,
- lp.ec.ConfiguredChainID(),
- ), pg.WithQueryer(tx))
- })
+ block := NewLogPollerBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber)
+ err = lp.orm.InsertLogsWithBlock(
+ convertLogs(logs, []LogPollerBlock{block}, lp.lggr, lp.ec.ConfiguredChainID()),
+ block,
+ )
if err != nil {
lp.lggr.Warnw("Unable to save logs resuming from last saved block + 1", "err", err, "block", currentBlockNumber)
return
diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go
index 471c728cdd6..5f013ca9140 100644
--- a/core/chains/evm/logpoller/log_poller_test.go
+++ b/core/chains/evm/logpoller/log_poller_test.go
@@ -85,7 +85,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo
func BenchmarkSelectLogsCreatedAfter(b *testing.B) {
chainId := big.NewInt(137)
- _, db := heavyweight.FullTestDBV2(b, "logs_scale", nil)
+ _, db := heavyweight.FullTestDBV2(b, nil)
o := logpoller.NewORM(chainId, db, logger.TestLogger(b), pgtest.NewQConfig(false))
event, address, _ := populateDatabase(b, o, chainId)
@@ -103,7 +103,7 @@ func BenchmarkSelectLogsCreatedAfter(b *testing.B) {
func TestPopulateLoadedDB(t *testing.T) {
t.Skip("Only for local load testing and query analysis")
- _, db := heavyweight.FullTestDBV2(t, "logs_scale", nil)
+ _, db := heavyweight.FullTestDBV2(t, nil)
chainID := big.NewInt(137)
o := logpoller.NewORM(big.NewInt(137), db, logger.TestLogger(t), pgtest.NewQConfig(true))
@@ -1328,7 +1328,7 @@ func TestNotifyAfterInsert(t *testing.T) {
// Use a non-transactional db for this test because notify events
// are not delivered until the transaction is committed.
var dbURL string
- _, sqlxDB := heavyweight.FullTestDBV2(t, "notify_after_insert_log", func(c *chainlink.Config, s *chainlink.Secrets) {
+ _, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
dbURL = s.Database.URL.URL().String()
})
diff --git a/core/chains/evm/logpoller/models.go b/core/chains/evm/logpoller/models.go
index 9c55786777c..87ddd079a5b 100644
--- a/core/chains/evm/logpoller/models.go
+++ b/core/chains/evm/logpoller/models.go
@@ -56,3 +56,12 @@ func (l *Log) ToGethLog() types.Log {
Index: uint(l.LogIndex),
}
}
+
+func NewLogPollerBlock(blockHash common.Hash, blockNumber int64, timestamp time.Time, finalizedBlockNumber int64) LogPollerBlock {
+ return LogPollerBlock{
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ BlockTimestamp: timestamp,
+ FinalizedBlockNumber: finalizedBlockNumber,
+ }
+}
diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go
index 7f54fa9f09a..03f4b77be25 100644
--- a/core/chains/evm/logpoller/observability.go
+++ b/core/chains/evm/logpoller/observability.go
@@ -5,9 +5,9 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -68,19 +68,15 @@ func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QC
}
}
-func (o *ObservedORM) Q() pg.Q {
- return o.ORM.Q()
-}
-
func (o *ObservedORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error {
return withObservedExec(o, "InsertLogs", func() error {
return o.ORM.InsertLogs(logs, qopts...)
})
}
-func (o *ObservedORM) InsertBlock(hash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlock int64, qopts ...pg.QOpt) error {
- return withObservedExec(o, "InsertBlock", func() error {
- return o.ORM.InsertBlock(hash, blockNumber, blockTimestamp, lastFinalizedBlock, qopts...)
+func (o *ObservedORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error {
+ return withObservedExec(o, "InsertLogsWithBlock", func() error {
+ return o.ORM.InsertLogsWithBlock(logs, block, qopts...)
})
}
@@ -102,21 +98,15 @@ func (o *ObservedORM) DeleteFilter(name string, qopts ...pg.QOpt) error {
})
}
-func (o *ObservedORM) DeleteBlocksAfter(start int64, qopts ...pg.QOpt) error {
- return withObservedExec(o, "DeleteBlocksAfter", func() error {
- return o.ORM.DeleteBlocksAfter(start, qopts...)
- })
-}
-
func (o *ObservedORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error {
return withObservedExec(o, "DeleteBlocksBefore", func() error {
return o.ORM.DeleteBlocksBefore(end, qopts...)
})
}
-func (o *ObservedORM) DeleteLogsAfter(start int64, qopts ...pg.QOpt) error {
- return withObservedExec(o, "DeleteLogsAfter", func() error {
- return o.ORM.DeleteLogsAfter(start, qopts...)
+func (o *ObservedORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error {
+ return withObservedExec(o, "DeleteLogsAndBlocksAfter", func() error {
+ return o.ORM.DeleteLogsAndBlocksAfter(start, qopts...)
})
}
diff --git a/core/chains/evm/logpoller/observability_test.go b/core/chains/evm/logpoller/observability_test.go
index 0d3eadf47d7..ded3d7854dd 100644
--- a/core/chains/evm/logpoller/observability_test.go
+++ b/core/chains/evm/logpoller/observability_test.go
@@ -38,7 +38,7 @@ func TestMultipleMetricsArePublished(t *testing.T) {
_, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx))
_, _ = orm.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx))
_ = orm.InsertLogs([]Log{}, pg.WithParentCtx(ctx))
- _ = orm.InsertBlock(common.Hash{}, 1, time.Now(), 0, pg.WithParentCtx(ctx))
+ _ = orm.InsertLogsWithBlock([]Log{}, NewLogPollerBlock(common.Hash{}, 1, time.Now(), 0), pg.WithParentCtx(ctx))
require.Equal(t, 13, testutil.CollectAndCount(orm.queryDuration))
require.Equal(t, 10, testutil.CollectAndCount(orm.datasetSize))
diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go
index 06f4acbb4f1..a1b86d2cb2c 100644
--- a/core/chains/evm/logpoller/orm.go
+++ b/core/chains/evm/logpoller/orm.go
@@ -8,8 +8,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
@@ -20,17 +20,15 @@ import (
// it exposes some of the database implementation details (e.g. pg.Q). Ideally it should be agnostic and could be applied to any persistence layer.
// What is more, LogPoller should not be aware of the underlying database implementation and delegate all the queries to the ORM.
type ORM interface {
- Q() pg.Q
InsertLogs(logs []Log, qopts ...pg.QOpt) error
- InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlockNumber int64, qopts ...pg.QOpt) error
+ InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error
InsertFilter(filter Filter, qopts ...pg.QOpt) error
LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error)
DeleteFilter(name string, qopts ...pg.QOpt) error
- DeleteBlocksAfter(start int64, qopts ...pg.QOpt) error
DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error
- DeleteLogsAfter(start int64, qopts ...pg.QOpt) error
+ DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error
DeleteExpiredLogs(qopts ...pg.QOpt) error
GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error)
@@ -58,6 +56,7 @@ type ORM interface {
type DbORM struct {
chainID *big.Int
q pg.Q
+ lggr logger.Logger
}
// NewORM creates a DbORM scoped to chainID.
@@ -67,13 +66,10 @@ func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *
return &DbORM{
chainID: chainID,
q: q,
+ lggr: lggr,
}
}
-func (o *DbORM) Q() pg.Q {
- return o.q
-}
-
// InsertBlock is idempotent to support replays.
func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64, qopts ...pg.QOpt) error {
args, err := newQueryArgs(o.chainID).
@@ -191,12 +187,6 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address
return &l, nil
}
-// DeleteBlocksAfter delete all blocks after and including start.
-func (o *DbORM) DeleteBlocksAfter(start int64, qopts ...pg.QOpt) error {
- q := o.q.WithOpts(qopts...)
- return q.ExecQ(`DELETE FROM evm.log_poller_blocks WHERE block_number >= $1 AND evm_chain_id = $2`, start, utils.NewBig(o.chainID))
-}
-
// DeleteBlocksBefore delete all blocks before and including end.
func (o *DbORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error {
q := o.q.WithOpts(qopts...)
@@ -204,9 +194,31 @@ func (o *DbORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error {
return err
}
-func (o *DbORM) DeleteLogsAfter(start int64, qopts ...pg.QOpt) error {
- q := o.q.WithOpts(qopts...)
- return q.ExecQ(`DELETE FROM evm.logs WHERE block_number >= $1 AND evm_chain_id = $2`, start, utils.NewBig(o.chainID))
+func (o *DbORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error {
+ // These deletes are bounded by reorg depth, so they are
+ // fast and should not slow down the log readers.
+ return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error {
+ args, err := newQueryArgs(o.chainID).
+ withStartBlock(start).
+ toArgs()
+ if err != nil {
+ o.lggr.Error("Cant build args for DeleteLogsAndBlocksAfter queries", "err", err)
+ return err
+ }
+
+ _, err = tx.NamedExec(`DELETE FROM evm.log_poller_blocks WHERE block_number >= :start_block AND evm_chain_id = :evm_chain_id`, args)
+ if err != nil {
+ o.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err)
+ return err
+ }
+
+ _, err = tx.NamedExec(`DELETE FROM evm.logs WHERE block_number >= :start_block AND evm_chain_id = :evm_chain_id`, args)
+ if err != nil {
+ o.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err)
+ return err
+ }
+ return nil
+ })
}
type Exp struct {
@@ -233,13 +245,35 @@ func (o *DbORM) DeleteExpiredLogs(qopts ...pg.QOpt) error {
// InsertLogs is idempotent to support replays.
func (o *DbORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error {
- for _, log := range logs {
- if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 {
- return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID)
- }
+ if err := o.validateLogs(logs); err != nil {
+ return err
}
- q := o.q.WithOpts(qopts...)
+ return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error {
+ return o.insertLogsWithinTx(logs, tx)
+ })
+}
+
+func (o *DbORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error {
+ // Optimization, don't open TX when there is only a block to be persisted
+ if len(logs) == 0 {
+ return o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, qopts...)
+ }
+
+ if err := o.validateLogs(logs); err != nil {
+ return err
+ }
+
+ // Block and logs goes with the same TX to ensure atomicity
+ return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error {
+ if err := o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, pg.WithQueryer(tx)); err != nil {
+ return err
+ }
+ return o.insertLogsWithinTx(logs, tx)
+ })
+}
+
+func (o *DbORM) insertLogsWithinTx(logs []Log, tx pg.Queryer) error {
batchInsertSize := 4000
for i := 0; i < len(logs); i += batchInsertSize {
start, end := i, i+batchInsertSize
@@ -247,12 +281,14 @@ func (o *DbORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error {
end = len(logs)
}
- err := q.ExecQNamed(`
- INSERT INTO evm.logs
- (evm_chain_id, log_index, block_hash, block_number, block_timestamp, address, event_sig, topics, tx_hash, data, created_at)
+ _, err := tx.NamedExec(`
+ INSERT INTO evm.logs
+ (evm_chain_id, log_index, block_hash, block_number, block_timestamp, address, event_sig, topics, tx_hash, data, created_at)
VALUES
- (:evm_chain_id, :log_index, :block_hash, :block_number, :block_timestamp, :address, :event_sig, :topics, :tx_hash, :data, NOW())
- ON CONFLICT DO NOTHING`, logs[start:end])
+ (:evm_chain_id, :log_index, :block_hash, :block_number, :block_timestamp, :address, :event_sig, :topics, :tx_hash, :data, NOW())
+ ON CONFLICT DO NOTHING`,
+ logs[start:end],
+ )
if err != nil {
if errors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 {
@@ -267,6 +303,15 @@ func (o *DbORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error {
return nil
}
+func (o *DbORM) validateLogs(logs []Log) error {
+ for _, log := range logs {
+ if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 {
+ return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID)
+ }
+ }
+ return nil
+}
+
func (o *DbORM) SelectLogsByBlockRange(start, end int64) ([]Log, error) {
args, err := newQueryArgs(o.chainID).
withStartBlock(start).
diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go
index 66e1afdc939..887984055ef 100644
--- a/core/chains/evm/logpoller/orm_test.go
+++ b/core/chains/evm/logpoller/orm_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"database/sql"
"fmt"
+ "math"
"math/big"
"testing"
"time"
@@ -15,7 +16,10 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -179,13 +183,13 @@ func TestORM(t *testing.T) {
assert.Equal(t, int64(12), latest.BlockNumber)
// Delete a block (only 10 on chain).
- require.NoError(t, o1.DeleteBlocksAfter(10))
+ require.NoError(t, o1.DeleteLogsAndBlocksAfter(10))
_, err = o1.SelectBlockByHash(common.HexToHash("0x1234"))
require.Error(t, err)
assert.True(t, errors.Is(err, sql.ErrNoRows))
// Delete blocks from another chain.
- require.NoError(t, o2.DeleteBlocksAfter(11))
+ require.NoError(t, o2.DeleteLogsAndBlocksAfter(11))
_, err = o2.SelectBlockByHash(common.HexToHash("0x1234"))
require.Error(t, err)
assert.True(t, errors.Is(err, sql.ErrNoRows))
@@ -318,7 +322,6 @@ func TestORM(t *testing.T) {
require.Error(t, err)
assert.True(t, errors.Is(err, sql.ErrNoRows))
// With block 12, anything <=2 should work
- require.NoError(t, o1.DeleteBlocksAfter(10))
require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0))
require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0))
_, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0)
@@ -421,7 +424,7 @@ func TestORM(t *testing.T) {
assert.Len(t, logs, 7)
// Delete logs after should delete all logs.
- err = o1.DeleteLogsAfter(1)
+ err = o1.DeleteLogsAndBlocksAfter(1)
require.NoError(t, err)
logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber)
require.NoError(t, err)
@@ -1301,3 +1304,133 @@ func TestNestedLogPollerBlocksQuery(t *testing.T) {
require.NoError(t, err)
require.Len(t, logs, 0)
}
+
+func TestInsertLogsWithBlock(t *testing.T) {
+ chainID := testutils.NewRandomEVMChainID()
+ event := utils.RandomBytes32()
+ address := utils.RandomAddress()
+
+ // We need full db here, because we want to test transaction rollbacks.
+ // Using pgtest.NewSqlxDB(t) will run all tests in TXs which is not desired for this type of test
+ // (inner tx rollback will rollback outer tx, blocking rest of execution)
+ _, db := heavyweight.FullTestDBV2(t, nil)
+ o := logpoller.NewORM(chainID, db, logger.TestLogger(t), pgtest.NewQConfig(true))
+
+ correctLog := GenLog(chainID, 1, 1, utils.RandomAddress().String(), event[:], address)
+ invalidLog := GenLog(chainID, -10, -10, utils.RandomAddress().String(), event[:], address)
+ correctBlock := logpoller.NewLogPollerBlock(utils.RandomBytes32(), 20, time.Now(), 10)
+ invalidBlock := logpoller.NewLogPollerBlock(utils.RandomBytes32(), -10, time.Now(), -10)
+
+ tests := []struct {
+ name string
+ logs []logpoller.Log
+ block logpoller.LogPollerBlock
+ shouldRollback bool
+ }{
+ {
+ name: "properly persist all data",
+ logs: []logpoller.Log{correctLog},
+ block: correctBlock,
+ shouldRollback: false,
+ },
+ {
+ name: "rollbacks transaction when block is invalid",
+ logs: []logpoller.Log{correctLog},
+ block: invalidBlock,
+ shouldRollback: true,
+ },
+ {
+ name: "rollbacks transaction when log is invalid",
+ logs: []logpoller.Log{invalidLog},
+ block: correctBlock,
+ shouldRollback: true,
+ },
+ {
+ name: "rollback when only some logs are invalid",
+ logs: []logpoller.Log{correctLog, invalidLog},
+ block: correctBlock,
+ shouldRollback: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // clean all logs and blocks between test cases
+ defer func() { _ = o.DeleteLogsAndBlocksAfter(0) }()
+ insertError := o.InsertLogsWithBlock(tt.logs, tt.block)
+
+ logs, logsErr := o.SelectLogs(0, math.MaxInt, address, event)
+ block, blockErr := o.SelectLatestBlock()
+
+ if tt.shouldRollback {
+ assert.Error(t, insertError)
+
+ assert.NoError(t, logsErr)
+ assert.Len(t, logs, 0)
+
+ assert.Error(t, blockErr)
+ } else {
+ assert.NoError(t, insertError)
+
+ assert.NoError(t, logsErr)
+ assert.Len(t, logs, len(tt.logs))
+
+ assert.NoError(t, blockErr)
+ assert.Equal(t, block.BlockNumber, tt.block.BlockNumber)
+ }
+ })
+ }
+}
+
+func TestInsertLogsInTx(t *testing.T) {
+ chainID := testutils.NewRandomEVMChainID()
+ event := utils.RandomBytes32()
+ address := utils.RandomAddress()
+ maxLogsSize := 9000
+
+ // We need full db here, because we want to test transaction rollbacks.
+ _, db := heavyweight.FullTestDBV2(t, nil)
+ o := logpoller.NewORM(chainID, db, logger.TestLogger(t), pgtest.NewQConfig(true))
+
+ logs := make([]logpoller.Log, maxLogsSize, maxLogsSize+1)
+ for i := 0; i < maxLogsSize; i++ {
+ logs[i] = GenLog(chainID, int64(i+1), int64(i+1), utils.RandomAddress().String(), event[:], address)
+ }
+ invalidLog := GenLog(chainID, -10, -10, utils.RandomAddress().String(), event[:], address)
+
+ tests := []struct {
+ name string
+ logs []logpoller.Log
+ shouldRollback bool
+ }{
+ {
+ name: "all logs persisted",
+ logs: logs,
+ shouldRollback: false,
+ },
+ {
+ name: "rollback when invalid log is passed",
+ logs: append(logs, invalidLog),
+ shouldRollback: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // clean all logs and blocks between test cases
+ defer func() { _ = o.DeleteLogsAndBlocksAfter(0) }()
+
+ insertErr := o.InsertLogs(tt.logs)
+ logsFromDb, err := o.SelectLogs(0, math.MaxInt, address, event)
+ assert.NoError(t, err)
+
+ if tt.shouldRollback {
+ assert.Error(t, insertErr)
+ assert.Len(t, logsFromDb, 0)
+ } else {
+ assert.NoError(t, insertErr)
+ assert.Len(t, logsFromDb, len(tt.logs))
+ }
+ })
+ }
+}
diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go
index 3901da59eeb..460f9629fb8 100644
--- a/core/chains/evm/txmgr/broadcaster_test.go
+++ b/core/chains/evm/txmgr/broadcaster_test.go
@@ -15,7 +15,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/google/uuid"
- "github.com/onsi/gomega"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -23,11 +22,11 @@ import (
"go.uber.org/zap/zapcore"
"gopkg.in/guregu/null.v4"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/assets"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks"
@@ -44,9 +43,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
ksmocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pg/datatypes"
- pgmocks "github.com/smartcontractkit/chainlink/v2/core/services/pg/mocks"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -54,24 +51,21 @@ import (
func NewTestEthBroadcaster(
t testing.TB,
txStore txmgr.TestEvmTxStore,
- ethClient evmclient.Client,
+ ethClient client.Client,
keyStore keystore.Eth,
config evmconfig.ChainScopedConfig,
checkerFactory txmgr.TransmitCheckerFactory,
nonceAutoSync bool,
) *txmgr.Broadcaster {
t.Helper()
- eb := cltest.NewEventBroadcaster(t, config.Database().URL())
ctx := testutils.Context(t)
- require.NoError(t, eb.Start(ctx))
- t.Cleanup(func() { assert.NoError(t, eb.Close()) })
lggr := logger.TestLogger(t)
ge := config.EVM().GasEstimator()
estimator := gas.NewWrappedEvmEstimator(gas.NewFixedPriceEstimator(config.EVM().GasEstimator(), ge.BlockHistory(), lggr), ge.EIP1559DynamicFees(), nil)
txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, keyStore, estimator)
txNonceSyncer := txmgr.NewNonceSyncer(txStore, lggr, ethClient)
- ethBroadcaster := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(config.EVM().GasEstimator()), config.EVM().Transactions(), config.Database().Listener(), keyStore, eb, txBuilder, txNonceSyncer, lggr, checkerFactory, nonceAutoSync)
+ ethBroadcaster := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(config.EVM().GasEstimator()), config.EVM().Transactions(), config.Database().Listener(), keyStore, txBuilder, txNonceSyncer, lggr, checkerFactory, nonceAutoSync)
// Mark instance as test
ethBroadcaster.XXXTestDisableUnstartedTxAutoProcessing()
@@ -81,11 +75,7 @@ func NewTestEthBroadcaster(
}
func TestEthBroadcaster_Lifecycle(t *testing.T) {
- cfg, db := heavyweight.FullTestDBV2(t, "eth_broadcaster_optimistic_locking", nil)
- eventBroadcaster := cltest.NewEventBroadcaster(t, cfg.Database().URL())
- err := eventBroadcaster.Start(testutils.Context(t))
- require.NoError(t, err)
- t.Cleanup(func() { assert.NoError(t, eventBroadcaster.Close()) })
+ cfg, db := heavyweight.FullTestDBV2(t, nil)
txStore := cltest.NewTestTxStore(t, db, cfg.Database())
evmcfg := evmtest.NewChainScopedConfig(t, cfg)
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
@@ -102,7 +92,6 @@ func TestEthBroadcaster_Lifecycle(t *testing.T) {
evmcfg.EVM().Transactions(),
evmcfg.Database().Listener(),
ethKeyStore,
- eventBroadcaster,
txBuilder,
nil,
logger.TestLogger(t),
@@ -111,7 +100,7 @@ func TestEthBroadcaster_Lifecycle(t *testing.T) {
)
// Can't close an unstarted instance
- err = eb.Close()
+ err := eb.Close()
require.Error(t, err)
ctx := testutils.Context(t)
@@ -228,7 +217,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
}
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(2) && tx.Value().Cmp(big.NewInt(242)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Earlier
tr := int32(99)
@@ -256,7 +245,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
require.Equal(t, value.String(), tx.Value().String())
require.Equal(t, earlierEthTx.EncodedPayload, tx.Data())
return true
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Later
laterEthTx := txmgr.Tx{
@@ -279,7 +268,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
require.Equal(t, value.String(), tx.Value().String())
require.Equal(t, laterEthTx.EncodedPayload, tx.Data())
return true
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Insertion order deliberately reversed to test ordering
require.NoError(t, txStore.InsertTx(&expensiveEthTx))
@@ -360,7 +349,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
t.Run("sends transactions with type 0x2 in EIP-1559 mode", func(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(343) && tx.Value().Cmp(big.NewInt(242)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, []byte{42, 42, 0}, gasLimit, big.Int(assets.NewEthValue(242)), &cltest.FixtureChainID)
// Do the thing
@@ -411,7 +400,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
}
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(344) && tx.Value().Cmp(big.NewInt(442)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
ethClient.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", mock.MatchedBy(func(callarg map[string]interface{}) bool {
if fmt.Sprintf("%s", callarg["value"]) == "0x1ba" { // 442
assert.Equal(t, txRequest.FromAddress, callarg["from"])
@@ -444,7 +433,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
t.Run("with unknown error, sends tx as normal", func(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(345) && tx.Value().Cmp(big.NewInt(542)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
ethClient.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", mock.MatchedBy(func(callarg map[string]interface{}) bool {
return fmt.Sprintf("%s", callarg["value"]) == "0x21e" // 542
}), "latest").Return(errors.New("this is not a revert, something unexpected went wrong")).Once()
@@ -465,7 +454,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
})
t.Run("on revert, marks tx as fatally errored and does not send", func(t *testing.T) {
- jerr := evmclient.JsonError{
+ jerr := client.JsonError{
Code: 42,
Message: "oh no, it reverted",
Data: []byte{42, 166, 34},
@@ -514,7 +503,7 @@ func TestEthBroadcaster_TransmitChecking(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == 0 && tx.Value().Cmp(big.NewInt(442)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
ethTx := cltest.MustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID,
cltest.EvmTxRequestWithValue(big.Int(assets.NewEthValue(442))),
@@ -537,7 +526,7 @@ func TestEthBroadcaster_TransmitChecking(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == 1 && tx.Value().Cmp(big.NewInt(442)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
ethTx := cltest.MustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID,
cltest.EvmTxRequestWithValue(big.Int(assets.NewEthValue(442))),
@@ -576,10 +565,7 @@ func TestEthBroadcaster_TransmitChecking(t *testing.T) {
func TestEthBroadcaster_ProcessUnstartedEthTxs_OptimisticLockingOnEthTx(t *testing.T) {
// non-transactional DB needed because we deliberately test for FK violation
- cfg, db := heavyweight.FullTestDBV2(t, "eth_broadcaster_optimistic_locking", nil)
- eventBroadcaster := cltest.NewEventBroadcaster(t, cfg.Database().URL())
- require.NoError(t, eventBroadcaster.Start(testutils.Context(t)))
- t.Cleanup(func() { assert.NoError(t, eventBroadcaster.Close()) })
+ cfg, db := heavyweight.FullTestDBV2(t, nil)
txStore := cltest.NewTestTxStore(t, db, cfg.Database())
ccfg := evmtest.NewChainScopedConfig(t, cfg)
evmcfg := txmgr.NewEvmTxmConfig(ccfg.EVM())
@@ -605,7 +591,6 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_OptimisticLockingOnEthTx(t *testi
ccfg.EVM().Transactions(),
cfg.Database().Listener(),
ethKeyStore,
- eventBroadcaster,
txBuilder,
nil,
logger.TestLogger(t),
@@ -661,7 +646,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success_WithMultiplier(t *testing
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
assert.Equal(t, int(1600), int(tx.Gas()))
return true
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
txRequest := txmgr.TxRequest{
FromAddress: fromAddress,
@@ -745,7 +730,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(firstNonce)
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
{
@@ -781,7 +766,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(firstNonce)
- }), fromAddress).Return(clienttypes.Fatal, errors.New("exceeds block gas limit")).Once()
+ }), fromAddress).Return(commonclient.Fatal, errors.New("exceeds block gas limit")).Once()
// Do the thing
{
@@ -817,7 +802,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(firstNonce)
- }), fromAddress).Return(clienttypes.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once()
// Do the thing
{
@@ -852,7 +837,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(firstNonce)
- }), fromAddress).Return(clienttypes.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
+ }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
// Do the thing
{
@@ -889,7 +874,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(firstNonce)
- }), fromAddress).Return(clienttypes.Retryable, failedToReachNodeError).Once()
+ }), fromAddress).Return(commonclient.Retryable, failedToReachNodeError).Once()
// Do the thing
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -935,7 +920,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
s, e := txmgr.GetGethSignedTx(attempt.SignedRawTx)
require.NoError(t, e)
return tx.Nonce() == uint64(firstNonce) && tx.GasPrice().Int64() == s.GasPrice().Int64()
- }), fromAddress).Return(clienttypes.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once()
// Do the thing
{
@@ -995,7 +980,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// First send, replacement underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(0)
- }), fromAddress).Return(clienttypes.Successful, errors.New("replacement transaction underpriced")).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once()
// Do the thing
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1032,7 +1017,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once()
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
assert.NoError(t, err)
@@ -1070,6 +1055,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
FeeLimit: gasLimit,
State: txmgrcommon.TxUnstarted,
PipelineTaskRunID: uuid.NullUUID{UUID: tr.ID, Valid: true},
+ SignalCallback: true,
}
t.Run("with erroring callback bails out", func(t *testing.T) {
@@ -1082,7 +1068,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once()
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
require.Error(t, err)
@@ -1103,7 +1089,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once()
{
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1113,17 +1099,12 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// same as the parent test, but callback is set by ctor
t.Run("callback set by ctor", func(t *testing.T) {
- eventBroadcaster := pg.NewEventBroadcaster(cfg.Database().URL(), 0, 0, logger.TestLogger(t), uuid.New())
- err := eventBroadcaster.Start(testutils.Context(t))
- require.NoError(t, err)
- t.Cleanup(func() { assert.NoError(t, eventBroadcaster.Close()) })
lggr := logger.TestLogger(t)
estimator := gas.NewWrappedEvmEstimator(gas.NewFixedPriceEstimator(evmcfg.EVM().GasEstimator(), evmcfg.EVM().GasEstimator().BlockHistory(), lggr), evmcfg.EVM().GasEstimator().EIP1559DynamicFees(), nil)
txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator)
localNextNonce = getLocalNextNonce(t, eb, fromAddress)
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce), nil).Once()
- eb2 := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), evmcfg.Database().Listener(), ethKeyStore, eventBroadcaster, txBuilder, nil, lggr, &testCheckerFactory{}, false)
- require.NoError(t, err)
+ eb2 := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), evmcfg.Database().Listener(), ethKeyStore, txBuilder, nil, lggr, &testCheckerFactory{}, false)
retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1140,7 +1121,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.ExceedsMaxFee, errors.New(TxFeeExceedsCapError)).Twice()
+ }), fromAddress).Return(commonclient.ExceedsMaxFee, errors.New(TxFeeExceedsCapError)).Twice()
// In the first case, the tx was NOT accepted into the mempool. In the case
// of multiple RPC nodes, it is possible that it can be accepted by
// another node even if the primary one returns "exceeds the configured
@@ -1198,7 +1179,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(localNextNonce)
- }), fromAddress).Return(clienttypes.Unknown, errors.New(retryableErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once()
// Nonce is the same as localNextNonce, implying that this sent transaction has not been accepted
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce), nil).Once()
@@ -1224,7 +1205,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// Now on the second run, it is successful
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
retryable, err = eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
assert.NoError(t, err)
@@ -1250,7 +1231,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == uint64(localNextNonce)
- }), fromAddress).Return(clienttypes.Unknown, errors.New(retryableErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), errors.New("pending nonce fetch failed")).Once()
// Do the thing
@@ -1276,7 +1257,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// Now on the second run, it is successful
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
retryable, err = eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
assert.NoError(t, err)
@@ -1302,7 +1283,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Unknown, errors.New(retryableErrorExample)).Once()
+ }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once()
// Nonce is one higher than localNextNonce, implying that despite the error, this sent transaction has been accepted into the mempool
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce+1), nil).Once()
@@ -1336,17 +1317,17 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// First was underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(evmcfg.EVM().GasEstimator().PriceDefault().ToInt()) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Second with gas bump was still underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(big.NewInt(25000000000)) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Third succeeded
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(big.NewInt(30000000000)) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1382,7 +1363,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Retryable, failedToReachNodeError).Once()
+ }), fromAddress).Return(commonclient.Retryable, failedToReachNodeError).Once()
// Do the thing
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1413,7 +1394,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Successful, errors.New(temporarilyUnderpricedError)).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New(temporarilyUnderpricedError)).Once()
// Do the thing
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1453,7 +1434,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// First was underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(evmcfg2.EVM().GasEstimator().PriceDefault().ToInt()) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Do the thing
retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1471,7 +1452,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.InsufficientFunds, errors.New(insufficientEthError)).Once()
+ }), fromAddress).Return(commonclient.InsufficientFunds, errors.New(insufficientEthError)).Once()
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
require.Error(t, err)
@@ -1501,7 +1482,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
etx := cltest.MustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce
- }), fromAddress).Return(clienttypes.Retryable, errors.New(nonceGapError)).Once()
+ }), fromAddress).Return(commonclient.Retryable, errors.New(nonceGapError)).Once()
retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
require.Error(t, err)
@@ -1545,7 +1526,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
localNextNonce = getLocalNextNonce(t, eb, fromAddress)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(1)) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Check gas tip cap verification
retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
@@ -1589,15 +1570,15 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// Second was underpriced but above minimum
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(gasTipCapDefault.ToInt()) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Resend at the bumped price
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(0).Add(gasTipCapDefault.ToInt(), evmcfg2.EVM().GasEstimator().BumpMin().ToInt())) == 0
- }), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
+ }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once()
// Final bump succeeds
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(0).Add(gasTipCapDefault.ToInt(), big.NewInt(0).Mul(evmcfg2.EVM().GasEstimator().BumpMin().ToInt(), big.NewInt(2)))) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
retryable, err = eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
require.NoError(t, err)
@@ -1724,29 +1705,6 @@ func TestEthBroadcaster_Trigger(t *testing.T) {
eb.Trigger(testutils.NewAddress())
}
-func TestEthBroadcaster_EthTxInsertEventCausesTriggerToFire(t *testing.T) {
- // NOTE: Testing triggers requires committing transactions and does not work with transactional tests
- cfg, db := heavyweight.FullTestDBV2(t, "eth_tx_triggers", nil)
- txStore := cltest.NewTestTxStore(t, db, cfg.Database())
-
- evmcfg := evmtest.NewChainScopedConfig(t, cfg)
-
- ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth()
- _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore)
- eventBroadcaster := cltest.NewEventBroadcaster(t, evmcfg.Database().URL())
- require.NoError(t, eventBroadcaster.Start(testutils.Context(t)))
- t.Cleanup(func() { require.NoError(t, eventBroadcaster.Close()) })
-
- ethTxInsertListener, err := eventBroadcaster.Subscribe(pg.ChannelInsertOnTx, "")
- require.NoError(t, err)
-
- // Give it some time to start listening
- time.Sleep(100 * time.Millisecond)
-
- cltest.MustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID)
- gomega.NewWithT(t).Eventually(ethTxInsertListener.Events()).Should(gomega.Receive())
-}
-
func TestEthBroadcaster_SyncNonce(t *testing.T) {
db := pgtest.NewSqlxDB(t)
ctx := testutils.Context(t)
@@ -1765,11 +1723,6 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
ethNodeNonce := uint64(22)
- eventBroadcaster := pgmocks.NewEventBroadcaster(t)
- sub := pgmocks.NewSubscription(t)
- sub.On("Events").Return(make(<-chan pg.Event))
- sub.On("Close")
- eventBroadcaster.On("Subscribe", "evm.insert_on_txes", "").Return(sub, nil)
estimator := gas.NewWrappedEvmEstimator(gas.NewFixedPriceEstimator(evmcfg.EVM().GasEstimator(), evmcfg.EVM().GasEstimator().BlockHistory(), lggr), evmcfg.EVM().GasEstimator().EIP1559DynamicFees(), nil)
checkerFactory := &testCheckerFactory{}
@@ -1783,7 +1736,7 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
addresses := []gethCommon.Address{fromAddress}
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
- eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, eventBroadcaster, txBuilder, nil, lggr, checkerFactory, false)
+ eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, nil, lggr, checkerFactory, false)
err := eb.Start(testutils.Context(t))
assert.NoError(t, err)
@@ -1801,7 +1754,7 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
addresses := []gethCommon.Address{fromAddress}
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
- eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, eventBroadcaster, txBuilder, txNonceSyncer, lggr, checkerFactory, true)
+ eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, txNonceSyncer, lggr, checkerFactory, true)
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(ethNodeNonce), nil).Once()
require.NoError(t, eb.Start(ctx))
@@ -1832,7 +1785,7 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
- eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, eventBroadcaster, txBuilder, txNonceSyncer, lggr, checkerFactory, true)
+ eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, txNonceSyncer, lggr, checkerFactory, true)
eb.XXXTestDisableUnstartedTxAutoProcessing()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), errors.New("something exploded")).Once()
diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go
index 39781e83f4c..5e3d61301ca 100644
--- a/core/chains/evm/txmgr/builder.go
+++ b/core/chains/evm/txmgr/builder.go
@@ -4,7 +4,7 @@ import (
"math/big"
"time"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
@@ -16,7 +16,6 @@ import (
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
)
// NewTxm constructs the necessary dependencies for the EvmTxm (broadcaster, confirmer, etc) and returns a new EvmTxManager
@@ -31,7 +30,6 @@ func NewTxm(
lggr logger.Logger,
logPoller logpoller.LogPoller,
keyStore keystore.Eth,
- eventBroadcaster pg.EventBroadcaster,
estimator gas.EvmFeeEstimator,
) (txm TxManager,
err error,
@@ -52,7 +50,7 @@ func NewTxm(
txmCfg := NewEvmTxmConfig(chainConfig) // wrap Evm specific config
feeCfg := NewEvmTxmFeeConfig(fCfg) // wrap Evm specific config
txmClient := NewEvmTxmClient(client) // wrap Evm specific client
- ethBroadcaster := NewEvmBroadcaster(txStore, txmClient, txmCfg, feeCfg, txConfig, listenerConfig, keyStore, eventBroadcaster, txAttemptBuilder, txNonceSyncer, lggr, checker, chainConfig.NonceAutoSync())
+ ethBroadcaster := NewEvmBroadcaster(txStore, txmClient, txmCfg, feeCfg, txConfig, listenerConfig, keyStore, txAttemptBuilder, txNonceSyncer, lggr, checker, chainConfig.NonceAutoSync())
ethConfirmer := NewEvmConfirmer(txStore, txmClient, txmCfg, feeCfg, txConfig, dbConfig, keyStore, txAttemptBuilder, lggr)
var ethResender *Resender
if txConfig.ResendAfterThreshold() > 0 {
@@ -123,12 +121,11 @@ func NewEvmBroadcaster(
txConfig txmgrtypes.BroadcasterTransactionsConfig,
listenerConfig txmgrtypes.BroadcasterListenerConfig,
keystore KeyStore,
- eventBroadcaster pg.EventBroadcaster,
txAttemptBuilder TxAttemptBuilder,
nonceSyncer NonceSyncer,
logger logger.Logger,
checkerFactory TransmitCheckerFactory,
autoSyncNonce bool,
) *Broadcaster {
- return txmgr.NewBroadcaster(txStore, client, chainConfig, feeConfig, txConfig, listenerConfig, keystore, eventBroadcaster, txAttemptBuilder, nonceSyncer, logger, checkerFactory, autoSyncNonce, stringToGethAddress, evmtypes.GenerateNextNonce)
+ return txmgr.NewBroadcaster(txStore, client, chainConfig, feeConfig, txConfig, listenerConfig, keystore, txAttemptBuilder, nonceSyncer, logger, checkerFactory, autoSyncNonce, evmtypes.GenerateNextNonce)
}
diff --git a/core/chains/evm/txmgr/client.go b/core/chains/evm/txmgr/client.go
index 150ee277577..8789f5f173e 100644
--- a/core/chains/evm/txmgr/client.go
+++ b/core/chains/evm/txmgr/client.go
@@ -13,8 +13,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -24,10 +24,10 @@ import (
var _ TxmClient = (*evmTxmClient)(nil)
type evmTxmClient struct {
- client evmclient.Client
+ client client.Client
}
-func NewEvmTxmClient(c evmclient.Client) *evmTxmClient {
+func NewEvmTxmClient(c client.Client) *evmTxmClient {
return &evmTxmClient{client: c}
}
@@ -45,14 +45,14 @@ func (c *evmTxmClient) BatchSendTransactions(
batchSize int,
lggr logger.Logger,
) (
- codes []clienttypes.SendTxReturnCode,
+ codes []commonclient.SendTxReturnCode,
txErrs []error,
broadcastTime time.Time,
successfulTxIDs []int64,
err error,
) {
// preallocate
- codes = make([]clienttypes.SendTxReturnCode, len(attempts))
+ codes = make([]commonclient.SendTxReturnCode, len(attempts))
txErrs = make([]error, len(attempts))
reqs, broadcastTime, successfulTxIDs, batchErr := batchSendTransactions(ctx, attempts, batchSize, lggr, c.client)
@@ -80,7 +80,7 @@ func (c *evmTxmClient) BatchSendTransactions(
processingErr[i] = fmt.Errorf("failed to process tx (index %d): %w", i, signedErr)
return
}
- codes[i], txErrs[i] = evmclient.NewSendErrorReturnCode(reqs[i].Error, lggr, tx, attempts[i].Tx.FromAddress, c.client.IsL2())
+ codes[i], txErrs[i] = client.ClassifySendError(reqs[i].Error, lggr, tx, attempts[i].Tx.FromAddress, c.client.IsL2())
}(index)
}
wg.Wait()
@@ -88,11 +88,11 @@ func (c *evmTxmClient) BatchSendTransactions(
return
}
-func (c *evmTxmClient) SendTransactionReturnCode(ctx context.Context, etx Tx, attempt TxAttempt, lggr logger.Logger) (clienttypes.SendTxReturnCode, error) {
+func (c *evmTxmClient) SendTransactionReturnCode(ctx context.Context, etx Tx, attempt TxAttempt, lggr logger.Logger) (commonclient.SendTxReturnCode, error) {
signedTx, err := GetGethSignedTx(attempt.SignedRawTx)
if err != nil {
lggr.Criticalw("Fatal error signing transaction", "err", err, "etx", etx)
- return clienttypes.Fatal, err
+ return commonclient.Fatal, err
}
return c.client.SendTransactionReturnCode(ctx, signedTx, etx.FromAddress)
}
@@ -174,5 +174,5 @@ func (c *evmTxmClient) CallContract(ctx context.Context, a TxAttempt, blockNumbe
Data: a.Tx.EncodedPayload,
AccessList: nil,
}, blockNumber)
- return evmclient.ExtractRPCError(errCall)
+ return client.ExtractRPCError(errCall)
}
diff --git a/core/chains/evm/txmgr/common.go b/core/chains/evm/txmgr/common.go
index 5dbb2ef9611..37cc89dd7ac 100644
--- a/core/chains/evm/txmgr/common.go
+++ b/core/chains/evm/txmgr/common.go
@@ -69,10 +69,3 @@ func batchSendTransactions(
}
return reqs, now, successfulBroadcast, nil
}
-
-func stringToGethAddress(s string) (common.Address, error) {
- if !common.IsHexAddress(s) {
- return common.Address{}, fmt.Errorf("invalid hex address: %s", s)
- }
- return common.HexToAddress(s), nil
-}
diff --git a/core/chains/evm/txmgr/confirmer_test.go b/core/chains/evm/txmgr/confirmer_test.go
index 8fbdb7696d9..3a0d33f7ba0 100644
--- a/core/chains/evm/txmgr/confirmer_test.go
+++ b/core/chains/evm/txmgr/confirmer_test.go
@@ -20,12 +20,12 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ commonclient "github.com/smartcontractkit/chainlink/v2/common/client"
commonfee "github.com/smartcontractkit/chainlink/v2/common/fee"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/assets"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks"
@@ -564,7 +564,7 @@ func TestEthConfirmer_CheckForReceipts(t *testing.T) {
data, err := utils.ABIEncode(`[{"type":"uint256"}]`, big.NewInt(10))
require.NoError(t, err)
sig := utils.Keccak256Fixed([]byte(`MyError(uint256)`))
- ethClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, &evmclient.JsonError{
+ ethClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, &client.JsonError{
Code: 1,
Message: "reverted",
Data: utils.ConcatBytes(sig[:4], data),
@@ -1658,7 +1658,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WithConnectivityCheck(t *testing
require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID))
// Send transaction and assume success.
- ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(clienttypes.Successful, nil).Once()
+ ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(commonclient.Successful, nil).Once()
err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)
require.NoError(t, err)
@@ -1703,7 +1703,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WithConnectivityCheck(t *testing
require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID))
// Send transaction and assume success.
- ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(clienttypes.Successful, nil).Once()
+ ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(commonclient.Successful, nil).Once()
err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)
require.NoError(t, err)
@@ -1787,7 +1787,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
})).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(*etx.Sequence)
- }), fromAddress).Return(clienttypes.Fatal, errors.New("exceeds block gas limit")).Once()
+ }), fromAddress).Return(commonclient.Fatal, errors.New("exceeds block gas limit")).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -1819,7 +1819,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
// Once for the bumped attempt which exceeds limit
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(*etx.Sequence) && tx.GasPrice().Int64() == int64(20000000000)
- }), fromAddress).Return(clienttypes.ExceedsMaxFee, errors.New("tx fee (1.10 ether) exceeds the configured cap (1.00 ether)")).Once()
+ }), fromAddress).Return(commonclient.ExceedsMaxFee, errors.New("tx fee (1.10 ether) exceeds the configured cap (1.00 ether)")).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -1858,7 +1858,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
})).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -1904,7 +1904,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once()
+ }), fromAddress).Return(commonclient.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -1944,7 +1944,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
+ }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -1996,7 +1996,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Unknown, errors.New("some network error")).Once()
+ }), fromAddress).Return(commonclient.Unknown, errors.New("some network error")).Once()
// Do the thing
err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)
@@ -2024,7 +2024,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
n = *etx2.Sequence
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2063,7 +2063,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
+ }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once()
// Creates new attempt as normal if currentHead is not high enough
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2104,7 +2104,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, errors.New("replacement transaction underpriced")).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2141,7 +2141,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once()
+ }), fromAddress).Return(commonclient.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2180,7 +2180,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, errors.New(temporarilyUnderpricedError)).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New(temporarilyUnderpricedError)).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2209,7 +2209,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx
+ }), fromAddress).Return(commonclient.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx
// Do the thing
require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2239,7 +2239,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx
+ }), fromAddress).Return(commonclient.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx
// Do the thing
require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2278,7 +2278,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
gasTipCap := assets.GWei(42)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && gasTipCap.ToInt().Cmp(tx.GasTipCap()) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
var err error
etx4, err = txStore.FindTxWithAttempts(etx4.ID)
@@ -2308,7 +2308,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
// Third attempt failed to bump, resubmits old one instead
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && attempt4_2.Hash.String() == tx.Hash().String()
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
var err error
@@ -2344,7 +2344,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) {
mock.Anything).Return(ðTx, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && expectedBumpedTipCap.ToInt().Cmp(tx.GasTipCap()) == 0
- }), fromAddress).Return(clienttypes.Successful, errors.New("replacement transaction underpriced")).Once()
+ }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once()
// Do it
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2401,10 +2401,10 @@ func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesTh
// Fail the first time with terminally underpriced.
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Once()
+ commonclient.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Once()
// Succeed the second time after bumping gas.
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Successful, nil).Once()
+ commonclient.Successful, nil).Once()
kst.On("SignTx", mock.Anything, mock.Anything, mock.Anything).Return(
signedTx, nil,
).Once()
@@ -2424,10 +2424,10 @@ func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesTh
// Fail a few times with terminally underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Times(3)
+ commonclient.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Times(3)
// Succeed the second time after bumping gas.
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Successful, nil).Once()
+ commonclient.Successful, nil).Once()
signedLegacyTx := new(types.Transaction)
kst.On("SignTx", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Type() == 0x0 && tx.Nonce() == uint64(*etx.Sequence)
@@ -2456,10 +2456,10 @@ func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesTh
// Fail a few times with terminally underpriced
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Underpriced, errors.New("transaction underpriced")).Times(3)
+ commonclient.Underpriced, errors.New("transaction underpriced")).Times(3)
// Succeed the second time after bumping gas.
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Successful, nil).Once()
+ commonclient.Successful, nil).Once()
signedDxFeeTx := new(types.Transaction)
kst.On("SignTx", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Type() == 0x2 && tx.Nonce() == uint64(*etx.Sequence)
@@ -2517,7 +2517,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.InsufficientFunds, insufficientEthError).Once()
+ }), fromAddress).Return(commonclient.InsufficientFunds, insufficientEthError).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2543,7 +2543,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.InsufficientFunds, insufficientEthError).Once()
+ }), fromAddress).Return(commonclient.InsufficientFunds, insufficientEthError).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2568,7 +2568,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead))
@@ -2600,7 +2600,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) {
cltest.MustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, nonce, fromAddress)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(n)
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
nonce++
}
@@ -2695,7 +2695,7 @@ func TestEthConfirmer_EnsureConfirmedTransactionsInLongestChain(t *testing.T) {
require.NoError(t, err)
// Keeps gas price and nonce the same
return atx.GasPrice().Cmp(tx.GasPrice()) == 0 && atx.Nonce() == tx.Nonce()
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head))
@@ -2718,7 +2718,7 @@ func TestEthConfirmer_EnsureConfirmedTransactionsInLongestChain(t *testing.T) {
cltest.MustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attemptHash)
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(
- clienttypes.Successful, nil).Once()
+ commonclient.Successful, nil).Once()
// Do the thing
require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head))
@@ -2753,7 +2753,7 @@ func TestEthConfirmer_EnsureConfirmedTransactionsInLongestChain(t *testing.T) {
s, err := txmgr.GetGethSignedTx(attempt3.SignedRawTx)
require.NoError(t, err)
return tx.Hash() == s.Hash()
- }), fromAddress).Return(clienttypes.Successful, nil).Once()
+ }), fromAddress).Return(commonclient.Successful, nil).Once()
// Do the thing
require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head))
@@ -2817,7 +2817,7 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
tx.Gas() == uint64(overrideGasLimit) &&
reflect.DeepEqual(tx.Data(), etx1.EncodedPayload) &&
tx.To().String() == etx1.ToAddress.String()
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.ForceRebroadcast([]evmtypes.Nonce{1}, gasPriceWei, fromAddress, overrideGasLimit))
})
@@ -2832,7 +2832,7 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
tx.Gas() == uint64(etx1.FeeLimit) &&
reflect.DeepEqual(tx.Data(), etx1.EncodedPayload) &&
tx.To().String() == etx1.ToAddress.String()
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.ForceRebroadcast([]evmtypes.Nonce{(1)}, gasPriceWei, fromAddress, 0))
})
@@ -2843,10 +2843,10 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(*etx1.Sequence) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && tx.Gas() == uint64(overrideGasLimit)
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(*etx2.Sequence) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && tx.Gas() == uint64(overrideGasLimit)
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.ForceRebroadcast([]evmtypes.Nonce{(1), (2)}, gasPriceWei, fromAddress, overrideGasLimit))
})
@@ -2857,10 +2857,10 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(1)
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(2)
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
for i := 3; i <= 5; i++ {
nonce := i
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
@@ -2870,7 +2870,7 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
*tx.To() == fromAddress &&
tx.Value().Cmp(big.NewInt(0)) == 0 &&
len(tx.Data()) == 0
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
}
nonces := []evmtypes.Nonce{(1), (2), (3), (4), (5)}
@@ -2883,7 +2883,7 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) {
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool {
return tx.Nonce() == uint64(0) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && uint32(tx.Gas()) == config.EVM().GasEstimator().LimitDefault()
- }), mock.Anything).Return(clienttypes.Successful, nil).Once()
+ }), mock.Anything).Return(commonclient.Successful, nil).Once()
require.NoError(t, ec.ForceRebroadcast([]evmtypes.Nonce{(0)}, gasPriceWei, fromAddress, 0))
})
@@ -2933,11 +2933,12 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 1, 1, fromAddress)
cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash)
- pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2 WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
+ // Setting both signal_callback and callback_completed to TRUE to simulate a completed pipeline task
+ // It would only be in a state past suspended if the resume callback was called and callback_completed was set to TRUE
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE, callback_completed = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
err := ec.ResumePendingTaskRuns(testutils.Context(t), &head)
require.NoError(t, err)
-
})
t.Run("doesn't process task runs where the receipt is younger than minConfirmations", func(t *testing.T) {
@@ -2952,15 +2953,15 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 2, 1, fromAddress)
cltest.MustInsertEthReceipt(t, txStore, head.Number, head.Hash, etx.TxAttempts[0].Hash)
- pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2 WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
err := ec.ResumePendingTaskRuns(testutils.Context(t), &head)
require.NoError(t, err)
-
})
t.Run("processes eth_txes with receipts older than minConfirmations", func(t *testing.T) {
ch := make(chan interface{})
+ nonce := evmtypes.Nonce(3)
var err error
ec := cltest.NewEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(id uuid.UUID, value interface{}, thisErr error) error {
err = thisErr
@@ -2972,15 +2973,19 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID)
pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run.ID)
- etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 1, fromAddress)
+ etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress)
pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`)
receipt := cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash)
- pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2 WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
go func() {
err2 := ec.ResumePendingTaskRuns(testutils.Context(t), &head)
require.NoError(t, err2)
+ // Retrieve Tx to check if callback completed flag was set to true
+ updateTx, err3 := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce)
+ require.NoError(t, err3)
+ require.Equal(t, true, updateTx.CallbackCompleted)
}()
select {
@@ -3000,6 +3005,7 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
t.Run("processes eth_txes with receipt older than minConfirmations that reverted", func(t *testing.T) {
ch := make(chan interface{})
+ nonce := evmtypes.Nonce(4)
var err error
ec := cltest.NewEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(id uuid.UUID, value interface{}, thisErr error) error {
err = thisErr
@@ -3011,17 +3017,21 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID)
pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run.ID)
- etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 1, fromAddress)
+ etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress)
pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`)
// receipt is not passed through as a value since it reverted and caused an error
cltest.MustInsertRevertedEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash)
- pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2 WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
go func() {
err2 := ec.ResumePendingTaskRuns(testutils.Context(t), &head)
require.NoError(t, err2)
+ // Retrieve Tx to check if callback completed flag was set to true
+ updateTx, err3 := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce)
+ require.NoError(t, err3)
+ require.Equal(t, true, updateTx.CallbackCompleted)
}()
select {
@@ -3036,6 +3046,28 @@ func TestEthConfirmer_ResumePendingRuns(t *testing.T) {
t.Fatal("no value received")
}
})
+
+ t.Run("does not mark callback complete if callback fails", func(t *testing.T) {
+ nonce := evmtypes.Nonce(5)
+ ec := cltest.NewEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(uuid.UUID, interface{}, error) error {
+ return errors.New("error")
+ })
+
+ run := cltest.MustInsertPipelineRun(t, db)
+ tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID)
+
+ etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress)
+ cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
+
+ err := ec.ResumePendingTaskRuns(testutils.Context(t), &head)
+ require.Error(t, err)
+
+ // Retrieve Tx to check if callback completed flag was left unchanged
+ updateTx, err := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce)
+ require.NoError(t, err)
+ require.Equal(t, false, updateTx.CallbackCompleted)
+ })
}
func ptr[T any](t T) *T { return &t }
diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go
index 86a06b60250..c3371fee80b 100644
--- a/core/chains/evm/txmgr/evm_tx_store.go
+++ b/core/chains/evm/txmgr/evm_tx_store.go
@@ -13,9 +13,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/jackc/pgconn"
+ "github.com/jmoiron/sqlx"
"github.com/lib/pq"
pkgerrors "github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
nullv4 "gopkg.in/guregu/null.v4"
"github.com/smartcontractkit/chainlink/v2/common/txmgr"
@@ -115,7 +115,7 @@ type rawOnchainReceipt = evmtypes.Receipt
// Does not map to a single database table.
// It's comprised of fields from different tables.
type dbReceiptPlus struct {
- ID uuid.UUID `db:"id"`
+ ID uuid.UUID `db:"pipeline_task_run_id"`
Receipt evmtypes.Receipt `db:"receipt"`
FailOnRevert bool `db:"FailOnRevert"`
}
@@ -180,10 +180,15 @@ type DbEthTx struct {
// chain.
TransmitChecker *datatypes.JSON
InitialBroadcastAt *time.Time
+ // Marks tx requiring callback
+ SignalCallback bool
+ // Marks tx callback as signaled
+ CallbackCompleted bool
}
func (db *DbEthTx) FromTx(tx *Tx) {
db.ID = tx.ID
+ db.IdempotencyKey = tx.IdempotencyKey
db.FromAddress = tx.FromAddress
db.ToAddress = tx.ToAddress
db.EncodedPayload = tx.EncodedPayload
@@ -199,6 +204,8 @@ func (db *DbEthTx) FromTx(tx *Tx) {
db.MinConfirmations = tx.MinConfirmations
db.TransmitChecker = tx.TransmitChecker
db.InitialBroadcastAt = tx.InitialBroadcastAt
+ db.SignalCallback = tx.SignalCallback
+ db.CallbackCompleted = tx.CallbackCompleted
if tx.ChainID != nil {
db.EVMChainID = *utils.NewBig(tx.ChainID)
@@ -232,6 +239,8 @@ func (db DbEthTx) ToTx(tx *Tx) {
tx.ChainID = db.EVMChainID.ToInt()
tx.TransmitChecker = db.TransmitChecker
tx.InitialBroadcastAt = db.InitialBroadcastAt
+ tx.SignalCallback = db.SignalCallback
+ tx.CallbackCompleted = db.CallbackCompleted
}
func dbEthTxsToEvmEthTxs(dbEthTxs []DbEthTx) []Tx {
@@ -511,8 +520,8 @@ func (o *evmTxStore) InsertTx(etx *Tx) error {
if etx.CreatedAt == (time.Time{}) {
etx.CreatedAt = time.Now()
}
- const insertEthTxSQL = `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, error, broadcast_at, initial_broadcast_at, created_at, state, meta, subject, pipeline_task_run_id, min_confirmations, evm_chain_id, transmit_checker) VALUES (
-:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :error, :broadcast_at, :initial_broadcast_at, :created_at, :state, :meta, :subject, :pipeline_task_run_id, :min_confirmations, :evm_chain_id, :transmit_checker
+ const insertEthTxSQL = `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, error, broadcast_at, initial_broadcast_at, created_at, state, meta, subject, pipeline_task_run_id, min_confirmations, evm_chain_id, transmit_checker, idempotency_key, signal_callback, callback_completed) VALUES (
+:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :error, :broadcast_at, :initial_broadcast_at, :created_at, :state, :meta, :subject, :pipeline_task_run_id, :min_confirmations, :evm_chain_id, :transmit_checker, :idempotency_key, :signal_callback, :callback_completed
) RETURNING *`
var dbTx DbEthTx
dbTx.FromTx(etx)
@@ -548,14 +557,14 @@ func (o *evmTxStore) FindTxWithAttempts(etxID int64) (etx Tx, err error) {
err = o.q.Transaction(func(tx pg.Queryer) error {
var dbEtx DbEthTx
if err = tx.Get(&dbEtx, `SELECT * FROM evm.txes WHERE id = $1 ORDER BY created_at ASC, id ASC`, etxID); err != nil {
- return pkgerrors.Wrapf(err, "failed to find eth_tx with id %d", etxID)
+ return pkgerrors.Wrapf(err, "failed to find evm.tx with id %d", etxID)
}
dbEtx.ToTx(&etx)
if err = o.loadTxAttemptsAtomic(&etx, pg.WithQueryer(tx)); err != nil {
- return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for eth_tx with id %d", etxID)
+ return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for evm.tx with id %d", etxID)
}
if err = loadEthTxAttemptsReceipts(tx, &etx); err != nil {
- return pkgerrors.Wrapf(err, "failed to load evm.receipts for eth_tx with id %d", etxID)
+ return pkgerrors.Wrapf(err, "failed to load evm.receipts for evm.tx with id %d", etxID)
}
return nil
}, pg.OptReadOnlyTx())
@@ -637,6 +646,8 @@ func loadEthTxesAttemptsReceipts(q pg.Queryer, etxs []*Tx) (err error) {
for _, receipt := range receipts {
attempt := attemptHashM[receipt.TxHash]
+ // Although the attempts struct supports multiple receipts, the expectation for EVM is that there is only one receipt
+ // per tx and therefore attempt too.
attempt.Receipts = append(attempt.Receipts, receipt)
}
return nil
@@ -938,25 +949,40 @@ WHERE evm.tx_attempts.state = 'in_progress' AND evm.txes.from_address = $1 AND e
return attempts, pkgerrors.Wrap(err, "getInProgressEthTxAttempts failed")
}
-func (o *evmTxStore) FindReceiptsPendingConfirmation(ctx context.Context, blockNum int64, chainID *big.Int) (receiptsPlus []ReceiptPlus, err error) {
+// Find confirmed txes requiring callback but have not yet been signaled
+func (o *evmTxStore) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID *big.Int) (receiptsPlus []ReceiptPlus, err error) {
var rs []dbReceiptPlus
var cancel context.CancelFunc
ctx, cancel = o.mergeContexts(ctx)
defer cancel()
err = o.q.SelectContext(ctx, &rs, `
- SELECT pipeline_task_runs.id, evm.receipts.receipt, COALESCE((evm.txes.meta->>'FailOnRevert')::boolean, false) "FailOnRevert" FROM pipeline_task_runs
- INNER JOIN pipeline_runs ON pipeline_runs.id = pipeline_task_runs.pipeline_run_id
- INNER JOIN evm.txes ON evm.txes.pipeline_task_run_id = pipeline_task_runs.id
+ SELECT evm.txes.pipeline_task_run_id, evm.receipts.receipt, COALESCE((evm.txes.meta->>'FailOnRevert')::boolean, false) "FailOnRevert" FROM evm.txes
INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id
INNER JOIN evm.receipts ON evm.tx_attempts.hash = evm.receipts.tx_hash
- WHERE pipeline_runs.state = 'suspended' AND evm.receipts.block_number <= ($1 - evm.txes.min_confirmations) AND evm.txes.evm_chain_id = $2
+ WHERE evm.txes.pipeline_task_run_id IS NOT NULL AND evm.txes.signal_callback = TRUE AND evm.txes.callback_completed = FALSE
+ AND evm.receipts.block_number <= ($1 - evm.txes.min_confirmations) AND evm.txes.evm_chain_id = $2
`, blockNum, chainID.String())
-
+ if err != nil {
+ return nil, fmt.Errorf("failed to retrieve transactions pending pipeline resume callback: %w", err)
+ }
receiptsPlus = fromDBReceiptsPlus(rs)
return
}
+// Update tx to mark that its callback has been signaled
+func (o *evmTxStore) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunId uuid.UUID, chainId *big.Int) error {
+ var cancel context.CancelFunc
+ ctx, cancel = o.mergeContexts(ctx)
+ defer cancel()
+ qq := o.q.WithOpts(pg.WithParentCtx(ctx))
+ _, err := qq.Exec(`UPDATE evm.txes SET callback_completed = TRUE WHERE pipeline_task_run_id = $1 AND evm_chain_id = $2`, pipelineTaskRunId, chainId.String())
+ if err != nil {
+ return fmt.Errorf("failed to mark callback completed for transaction: %w", err)
+ }
+ return nil
+}
+
func (o *evmTxStore) FindLatestSequence(ctx context.Context, fromAddress common.Address, chainId *big.Int) (nonce evmtypes.Nonce, err error) {
var cancel context.CancelFunc
ctx, cancel = o.mergeContexts(ctx)
@@ -1658,12 +1684,12 @@ func (o *evmTxStore) CreateTransaction(ctx context.Context, txRequest TxRequest,
}
}
err = tx.Get(&dbEtx, `
-INSERT INTO evm.txes (from_address, to_address, encoded_payload, value, gas_limit, state, created_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id, transmit_checker, idempotency_key)
+INSERT INTO evm.txes (from_address, to_address, encoded_payload, value, gas_limit, state, created_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id, transmit_checker, idempotency_key, signal_callback)
VALUES (
-$1,$2,$3,$4,$5,'unstarted',NOW(),$6,$7,$8,$9,$10,$11,$12
+$1,$2,$3,$4,$5,'unstarted',NOW(),$6,$7,$8,$9,$10,$11,$12,$13
)
RETURNING "txes".*
-`, txRequest.FromAddress, txRequest.ToAddress, txRequest.EncodedPayload, assets.Eth(txRequest.Value), txRequest.FeeLimit, txRequest.Meta, txRequest.Strategy.Subject(), chainID.String(), txRequest.MinConfirmations, txRequest.PipelineTaskRunID, txRequest.Checker, txRequest.IdempotencyKey)
+`, txRequest.FromAddress, txRequest.ToAddress, txRequest.EncodedPayload, assets.Eth(txRequest.Value), txRequest.FeeLimit, txRequest.Meta, txRequest.Strategy.Subject(), chainID.String(), txRequest.MinConfirmations, txRequest.PipelineTaskRunID, txRequest.Checker, txRequest.IdempotencyKey, txRequest.SignalCallback)
if err != nil {
return pkgerrors.Wrap(err, "CreateEthTransaction failed to insert evm tx")
}
@@ -1776,6 +1802,72 @@ func (o *evmTxStore) Abandon(ctx context.Context, chainID *big.Int, addr common.
return err
}
+// Find transactions by a field in the TxMeta blob and transaction states
+func (o *evmTxStore) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*Tx, error) {
+ var cancel context.CancelFunc
+ ctx, cancel = o.mergeContexts(ctx)
+ defer cancel()
+ qq := o.q.WithOpts(pg.WithParentCtx(ctx))
+ var dbEtxs []DbEthTx
+ sql := fmt.Sprintf("SELECT * FROM evm.txes WHERE evm_chain_id = $1 AND meta->>'%s' = $2 AND state = ANY($3)", metaField)
+ err := qq.Select(&dbEtxs, sql, chainID.String(), metaValue, pq.Array(states))
+ txes := make([]*Tx, len(dbEtxs))
+ dbEthTxsToEvmEthTxPtrs(dbEtxs, txes)
+ return txes, pkgerrors.Wrap(err, "failed to FindTxesByMetaFieldAndStates")
+}
+
+// Find transactions with a non-null TxMeta field that was provided by transaction states
+func (o *evmTxStore) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*Tx, err error) {
+ var cancel context.CancelFunc
+ ctx, cancel = o.mergeContexts(ctx)
+ defer cancel()
+ qq := o.q.WithOpts(pg.WithParentCtx(ctx))
+ var dbEtxs []DbEthTx
+ sql := fmt.Sprintf("SELECT * FROM evm.txes WHERE meta->'%s' IS NOT NULL AND state = ANY($1) AND evm_chain_id = $2", metaField)
+ err = qq.Select(&dbEtxs, sql, pq.Array(states), chainID.String())
+ txes = make([]*Tx, len(dbEtxs))
+ dbEthTxsToEvmEthTxPtrs(dbEtxs, txes)
+ return txes, pkgerrors.Wrap(err, "failed to FindTxesWithMetaFieldByStates")
+}
+
+// Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided
+func (o *evmTxStore) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*Tx, err error) {
+ var cancel context.CancelFunc
+ ctx, cancel = o.mergeContexts(ctx)
+ defer cancel()
+ qq := o.q.WithOpts(pg.WithParentCtx(ctx))
+ var dbEtxs []DbEthTx
+ sql := fmt.Sprintf("SELECT et.* FROM evm.txes et JOIN evm.tx_attempts eta on et.id = eta.eth_tx_id JOIN evm.receipts er on eta.hash = er.tx_hash WHERE et.meta->'%s' IS NOT NULL AND er.block_number >= $1 AND et.evm_chain_id = $2", metaField)
+ err = qq.Select(&dbEtxs, sql, blockNum, chainID.String())
+ txes = make([]*Tx, len(dbEtxs))
+ dbEthTxsToEvmEthTxPtrs(dbEtxs, txes)
+ return txes, pkgerrors.Wrap(err, "failed to FindTxesWithMetaFieldByReceiptBlockNum")
+}
+
+// Find transactions loaded with transaction attempts and receipts by transaction IDs and states
+func (o *evmTxStore) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*Tx, err error) {
+ var cancel context.CancelFunc
+ ctx, cancel = o.mergeContexts(ctx)
+ defer cancel()
+ qq := o.q.WithOpts(pg.WithParentCtx(ctx))
+ err = qq.Transaction(func(tx pg.Queryer) error {
+ var dbEtxs []DbEthTx
+ if err = tx.Select(&dbEtxs, `SELECT * FROM evm.txes WHERE id = ANY($1) AND state = ANY($2) AND evm_chain_id = $3`, pq.Array(ids), pq.Array(states), chainID.String()); err != nil {
+ return pkgerrors.Wrapf(err, "failed to find evm.txes")
+ }
+ txes = make([]*Tx, len(dbEtxs))
+ dbEthTxsToEvmEthTxPtrs(dbEtxs, txes)
+ if err = o.LoadTxesAttempts(txes, pg.WithQueryer(tx)); err != nil {
+ return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for evm.tx")
+ }
+ if err = loadEthTxesAttemptsReceipts(tx, txes); err != nil {
+ return pkgerrors.Wrapf(err, "failed to load evm.receipts for evm.tx")
+ }
+ return nil
+ })
+ return txes, pkgerrors.Wrap(err, "FindTxesWithAttemptsAndReceiptsByIdsAndState failed")
+}
+
// Returns a context that contains the values of the provided context,
// and which is canceled when either the provided contextg or TxStore parent context is canceled.
func (o *evmTxStore) mergeContexts(ctx context.Context) (context.Context, context.CancelFunc) {
diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go
index ba02f118cf5..f8798f9f836 100644
--- a/core/chains/evm/txmgr/evm_tx_store_test.go
+++ b/core/chains/evm/txmgr/evm_tx_store_test.go
@@ -21,6 +21,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -617,7 +618,7 @@ func TestORM_GetInProgressTxAttempts(t *testing.T) {
assert.Equal(t, etx.TxAttempts[0].ID, attempts[0].ID)
}
-func TestORM_FindReceiptsPendingConfirmation(t *testing.T) {
+func TestORM_FindTxesPendingCallback(t *testing.T) {
t.Parallel()
db := pgtest.NewSqlxDB(t)
@@ -645,21 +646,50 @@ func TestORM_FindReceiptsPendingConfirmation(t *testing.T) {
minConfirmations := int64(2)
- run := cltest.MustInsertPipelineRun(t, db)
- tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID)
- pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run.ID)
-
- etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 1, fromAddress)
+ // Suspended run waiting for callback
+ run1 := cltest.MustInsertPipelineRun(t, db)
+ tr1 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run1.ID)
+ pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run1.ID)
+ etx1 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 1, fromAddress)
pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`)
- attempt := etx.TxAttempts[0]
- cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, attempt.Hash)
-
- pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2 WHERE id = $3`, &tr.ID, minConfirmations, etx.ID)
-
- receiptsPlus, err := txStore.FindReceiptsPendingConfirmation(testutils.Context(t), head.Number, ethClient.ConfiguredChainID())
+ attempt1 := etx1.TxAttempts[0]
+ cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, attempt1.Hash)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr1.ID, minConfirmations, etx1.ID)
+
+ // Callback to pipeline service completed. Should be ignored
+ run2 := cltest.MustInsertPipelineRunWithStatus(t, db, 0, pipeline.RunStatusCompleted)
+ tr2 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run2.ID)
+ etx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 1, fromAddress)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": false}'`)
+ attempt2 := etx2.TxAttempts[0]
+ cltest.MustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, attempt2.Hash)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE, callback_completed = TRUE WHERE id = $3`, &tr2.ID, minConfirmations, etx2.ID)
+
+ // Suspended run younger than minConfirmations. Should be ignored
+ run3 := cltest.MustInsertPipelineRun(t, db)
+ tr3 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run3.ID)
+ pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run3.ID)
+ etx3 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 5, 1, fromAddress)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": false}'`)
+ attempt3 := etx3.TxAttempts[0]
+ cltest.MustInsertEthReceipt(t, txStore, head.Number, head.Hash, attempt3.Hash)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr3.ID, minConfirmations, etx3.ID)
+
+ // Tx not marked for callback. Should be ignore
+ etx4 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 6, 1, fromAddress)
+ attempt4 := etx4.TxAttempts[0]
+ cltest.MustInsertEthReceipt(t, txStore, head.Number, head.Hash, attempt4.Hash)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET min_confirmations = $1 WHERE id = $2`, minConfirmations, etx4.ID)
+
+ // Unconfirmed Tx without receipts. Should be ignored
+ etx5 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 7, 1, fromAddress)
+ pgtest.MustExec(t, db, `UPDATE evm.txes SET min_confirmations = $1 WHERE id = $2`, minConfirmations, etx5.ID)
+
+ // Search evm.txes table for tx requiring callback
+ receiptsPlus, err := txStore.FindTxesPendingCallback(testutils.Context(t), head.Number, ethClient.ConfiguredChainID())
require.NoError(t, err)
assert.Len(t, receiptsPlus, 1)
- assert.Equal(t, tr.ID, receiptsPlus[0].ID)
+ assert.Equal(t, tr1.ID, receiptsPlus[0].ID)
}
func Test_FindTxWithIdempotencyKey(t *testing.T) {
@@ -1569,6 +1599,35 @@ func TestORM_CreateTransaction(t *testing.T) {
assert.Equal(t, tx1.GetID(), tx2.GetID())
})
+
+ t.Run("sets signal callback flag", func(t *testing.T) {
+ subject := uuid.New()
+ strategy := newMockTxStrategy(t)
+ strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true})
+ strategy.On("PruneQueue", mock.Anything, mock.AnythingOfType("*txmgr.evmTxStore")).Return(int64(0), nil)
+ etx, err := txStore.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
+ FromAddress: fromAddress,
+ ToAddress: toAddress,
+ EncodedPayload: payload,
+ FeeLimit: gasLimit,
+ Meta: nil,
+ Strategy: strategy,
+ SignalCallback: true,
+ }, ethClient.ConfiguredChainID())
+ assert.NoError(t, err)
+
+ assert.Greater(t, etx.ID, int64(0))
+ assert.Equal(t, fromAddress, etx.FromAddress)
+ assert.Equal(t, true, etx.SignalCallback)
+
+ cltest.AssertCount(t, db, "evm.txes", 3)
+
+ var dbEthTx txmgr.DbEthTx
+ require.NoError(t, db.Get(&dbEthTx, `SELECT * FROM evm.txes ORDER BY id DESC LIMIT 1`))
+
+ assert.Equal(t, fromAddress, dbEthTx.FromAddress)
+ assert.Equal(t, true, dbEthTx.SignalCallback)
+ })
}
func TestORM_PruneUnstartedTxQueue(t *testing.T) {
diff --git a/core/chains/evm/txmgr/mocks/evm_tx_store.go b/core/chains/evm/txmgr/mocks/evm_tx_store.go
index 69a0d257f7a..f491bda40bb 100644
--- a/core/chains/evm/txmgr/mocks/evm_tx_store.go
+++ b/core/chains/evm/txmgr/mocks/evm_tx_store.go
@@ -183,32 +183,6 @@ func (_m *EvmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Contex
return r0
}
-// FindReceiptsPendingConfirmation provides a mock function with given fields: ctx, blockNum, chainID
-func (_m *EvmTxStore) FindReceiptsPendingConfirmation(ctx context.Context, blockNum int64, chainID *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error) {
- ret := _m.Called(ctx, blockNum, chainID)
-
- var r0 []types.ReceiptPlus[*evmtypes.Receipt]
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error)); ok {
- return rf(ctx, blockNum, chainID)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) []types.ReceiptPlus[*evmtypes.Receipt]); ok {
- r0 = rf(ctx, blockNum, chainID)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]types.ReceiptPlus[*evmtypes.Receipt])
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int64, *big.Int) error); ok {
- r1 = rf(ctx, blockNum, chainID)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
// FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID
func (_m *EvmTxStore) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber int64, lowBlockNumber int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
ret := _m.Called(ctx, highBlockNumber, lowBlockNumber, chainID)
@@ -467,6 +441,136 @@ func (_m *EvmTxStore) FindTxWithSequence(ctx context.Context, fromAddress common
return r0, r1
}
+// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID
+func (_m *EvmTxStore) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
+ ret := _m.Called(ctx, metaField, metaValue, states, chainID)
+
+ var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok {
+ return rf(ctx, metaField, metaValue, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, string, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok {
+ r0 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, string, []types.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, metaValue, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesPendingCallback provides a mock function with given fields: ctx, blockNum, chainID
+func (_m *EvmTxStore) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error) {
+ ret := _m.Called(ctx, blockNum, chainID)
+
+ var r0 []types.ReceiptPlus[*evmtypes.Receipt]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error)); ok {
+ return rf(ctx, blockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) []types.ReceiptPlus[*evmtypes.Receipt]); ok {
+ r0 = rf(ctx, blockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]types.ReceiptPlus[*evmtypes.Receipt])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, int64, *big.Int) error); ok {
+ r1 = rf(ctx, blockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID
+func (_m *EvmTxStore) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
+ ret := _m.Called(ctx, ids, states, chainID)
+
+ var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok {
+ return rf(ctx, ids, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok {
+ r0 = rf(ctx, ids, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []types.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, ids, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID
+func (_m *EvmTxStore) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
+ ret := _m.Called(ctx, metaField, blockNum, chainID)
+
+ var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok {
+ return rf(ctx, metaField, blockNum, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok {
+ r0 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, blockNum, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID
+func (_m *EvmTxStore) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
+ ret := _m.Called(ctx, metaField, states, chainID)
+
+ var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok {
+ return rf(ctx, metaField, states, chainID)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok {
+ r0 = rf(ctx, metaField, states, chainID)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee])
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string, []types.TxState, *big.Int) error); ok {
+ r1 = rf(ctx, metaField, states, chainID)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// FindTxsRequiringGasBump provides a mock function with given fields: ctx, address, blockNum, gasBumpThreshold, depth, chainID
func (_m *EvmTxStore) FindTxsRequiringGasBump(ctx context.Context, address common.Address, blockNum int64, gasBumpThreshold int64, depth int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) {
ret := _m.Called(ctx, address, blockNum, gasBumpThreshold, depth, chainID)
@@ -914,6 +1018,20 @@ func (_m *EvmTxStore) UpdateTxAttemptInProgressToBroadcast(ctx context.Context,
return r0
}
+// UpdateTxCallbackCompleted provides a mock function with given fields: ctx, pipelineTaskRunRid, chainId
+func (_m *EvmTxStore) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId *big.Int) error {
+ ret := _m.Called(ctx, pipelineTaskRunRid, chainId)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, *big.Int) error); ok {
+ r0 = rf(ctx, pipelineTaskRunRid, chainId)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// UpdateTxFatalError provides a mock function with given fields: ctx, etx
func (_m *EvmTxStore) UpdateTxFatalError(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error {
ret := _m.Called(ctx, etx)
diff --git a/core/chains/evm/txmgr/test_helpers.go b/core/chains/evm/txmgr/test_helpers.go
new file mode 100644
index 00000000000..f9c0423a620
--- /dev/null
+++ b/core/chains/evm/txmgr/test_helpers.go
@@ -0,0 +1,151 @@
+package txmgr
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/v2/core/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg"
+
+ evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
+)
+
+func ptr[T any](t T) *T { return &t }
+
+type TestDatabaseConfig struct {
+ config.Database
+ defaultQueryTimeout time.Duration
+}
+
+func (d *TestDatabaseConfig) DefaultQueryTimeout() time.Duration {
+ return d.defaultQueryTimeout
+}
+
+func (d *TestDatabaseConfig) LogSQL() bool {
+ return false
+}
+
+type TestListenerConfig struct {
+ config.Listener
+}
+
+func (l *TestListenerConfig) FallbackPollInterval() time.Duration {
+ return 1 * time.Minute
+}
+
+func (d *TestDatabaseConfig) Listener() config.Listener {
+ return &TestListenerConfig{}
+}
+
+type TestEvmConfig struct {
+ evmconfig.EVM
+ MaxInFlight uint32
+ ReaperInterval time.Duration
+ ReaperThreshold time.Duration
+ ResendAfterThreshold time.Duration
+ BumpThreshold uint64
+ MaxQueued uint64
+}
+
+func (e *TestEvmConfig) Transactions() evmconfig.Transactions {
+ return &transactionsConfig{e: e}
+}
+
+func (e *TestEvmConfig) NonceAutoSync() bool { return true }
+
+func (e *TestEvmConfig) FinalityDepth() uint32 { return 42 }
+
+type TestGasEstimatorConfig struct {
+ bumpThreshold uint64
+}
+
+func (g *TestGasEstimatorConfig) BlockHistory() evmconfig.BlockHistory {
+ return &TestBlockHistoryConfig{}
+}
+
+func (g *TestGasEstimatorConfig) EIP1559DynamicFees() bool { return false }
+func (g *TestGasEstimatorConfig) LimitDefault() uint32 { return 42 }
+func (g *TestGasEstimatorConfig) BumpPercent() uint16 { return 42 }
+func (g *TestGasEstimatorConfig) BumpThreshold() uint64 { return g.bumpThreshold }
+func (g *TestGasEstimatorConfig) BumpMin() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) FeeCapDefault() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) PriceDefault() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) TipCapDefault() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) TipCapMin() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) LimitMax() uint32 { return 0 }
+func (g *TestGasEstimatorConfig) LimitMultiplier() float32 { return 0 }
+func (g *TestGasEstimatorConfig) BumpTxDepth() uint32 { return 42 }
+func (g *TestGasEstimatorConfig) LimitTransfer() uint32 { return 42 }
+func (g *TestGasEstimatorConfig) PriceMax() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) PriceMin() *assets.Wei { return assets.NewWeiI(42) }
+func (g *TestGasEstimatorConfig) Mode() string { return "FixedPrice" }
+func (g *TestGasEstimatorConfig) LimitJobType() evmconfig.LimitJobType {
+ return &TestLimitJobTypeConfig{}
+}
+func (g *TestGasEstimatorConfig) PriceMaxKey(addr common.Address) *assets.Wei {
+ return assets.NewWeiI(42)
+}
+
+func (e *TestEvmConfig) GasEstimator() evmconfig.GasEstimator {
+ return &TestGasEstimatorConfig{bumpThreshold: e.BumpThreshold}
+}
+
+type TestLimitJobTypeConfig struct {
+}
+
+func (l *TestLimitJobTypeConfig) OCR() *uint32 { return ptr(uint32(0)) }
+func (l *TestLimitJobTypeConfig) OCR2() *uint32 { return ptr(uint32(0)) }
+func (l *TestLimitJobTypeConfig) DR() *uint32 { return ptr(uint32(0)) }
+func (l *TestLimitJobTypeConfig) FM() *uint32 { return ptr(uint32(0)) }
+func (l *TestLimitJobTypeConfig) Keeper() *uint32 { return ptr(uint32(0)) }
+func (l *TestLimitJobTypeConfig) VRF() *uint32 { return ptr(uint32(0)) }
+
+type TestBlockHistoryConfig struct {
+ evmconfig.BlockHistory
+}
+
+func (b *TestBlockHistoryConfig) BatchSize() uint32 { return 42 }
+func (b *TestBlockHistoryConfig) BlockDelay() uint16 { return 42 }
+func (b *TestBlockHistoryConfig) BlockHistorySize() uint16 { return 42 }
+func (b *TestBlockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { return 42 }
+func (b *TestBlockHistoryConfig) TransactionPercentile() uint16 { return 42 }
+
+type transactionsConfig struct {
+ evmconfig.Transactions
+ e *TestEvmConfig
+}
+
+func (*transactionsConfig) ForwardersEnabled() bool { return true }
+func (t *transactionsConfig) MaxInFlight() uint32 { return t.e.MaxInFlight }
+func (t *transactionsConfig) MaxQueued() uint64 { return t.e.MaxQueued }
+func (t *transactionsConfig) ReaperInterval() time.Duration { return t.e.ReaperInterval }
+func (t *transactionsConfig) ReaperThreshold() time.Duration { return t.e.ReaperThreshold }
+func (t *transactionsConfig) ResendAfterThreshold() time.Duration { return t.e.ResendAfterThreshold }
+
+type MockConfig struct {
+ EvmConfig *TestEvmConfig
+ RpcDefaultBatchSize uint32
+ finalityDepth uint32
+ finalityTagEnabled bool
+}
+
+func (c *MockConfig) EVM() evmconfig.EVM {
+ return c.EvmConfig
+}
+
+func (c *MockConfig) NonceAutoSync() bool { return true }
+func (c *MockConfig) ChainType() config.ChainType { return "" }
+func (c *MockConfig) FinalityDepth() uint32 { return c.finalityDepth }
+func (c *MockConfig) SetFinalityDepth(fd uint32) { c.finalityDepth = fd }
+func (c *MockConfig) FinalityTagEnabled() bool { return c.finalityTagEnabled }
+func (c *MockConfig) RPCDefaultBatchSize() uint32 { return c.RpcDefaultBatchSize }
+
+func MakeTestConfigs(t *testing.T) (*MockConfig, *TestDatabaseConfig, *TestEvmConfig) {
+ db := &TestDatabaseConfig{defaultQueryTimeout: pg.DefaultQueryTimeout}
+ ec := &TestEvmConfig{BumpThreshold: 42, MaxInFlight: uint32(42), MaxQueued: uint64(0), ReaperInterval: time.Duration(0), ReaperThreshold: time.Duration(0)}
+ config := &MockConfig{EvmConfig: ec}
+ return config, db, ec
+}
diff --git a/core/chains/evm/txmgr/transmitchecker.go b/core/chains/evm/txmgr/transmitchecker.go
index 4636b708489..eb6edd3f587 100644
--- a/core/chains/evm/txmgr/transmitchecker.go
+++ b/core/chains/evm/txmgr/transmitchecker.go
@@ -217,7 +217,7 @@ func (v *VRFV1Checker) Check(
requestTransactionReceipt := &gethtypes.Receipt{}
batch := []rpc.BatchElem{{
Method: "eth_getBlockByNumber",
- Args: []interface{}{nil},
+ Args: []interface{}{"latest", false},
Result: mostRecentHead,
}, {
Method: "eth_getTransactionReceipt",
diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go
index 6cb43b27716..4e201b9c6fe 100644
--- a/core/chains/evm/txmgr/txmgr_test.go
+++ b/core/chains/evm/txmgr/txmgr_test.go
@@ -8,7 +8,6 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/common"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
@@ -16,7 +15,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
commontxmmocks "github.com/smartcontractkit/chainlink/v2/common/txmgr/types/mocks"
@@ -27,7 +26,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
- "github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
@@ -36,13 +34,11 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
ksmocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
- pgmocks "github.com/smartcontractkit/chainlink/v2/core/services/pg/mocks"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
func makeTestEvmTxm(
- t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth, eventBroadcaster pg.EventBroadcaster) (txmgr.TxManager, error) {
+ t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth) (txmgr.TxManager, error) {
lggr := logger.TestLogger(t)
lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000)
@@ -66,7 +62,6 @@ func makeTestEvmTxm(
lggr,
lp,
keyStore,
- eventBroadcaster,
estimator)
}
@@ -78,12 +73,12 @@ func TestTxm_SendNativeToken_DoesNotSendToZero(t *testing.T) {
to := utils.ZeroAddress
value := assets.NewEth(1).ToInt()
- config, dbConfig, evmConfig := makeConfigs(t)
+ config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
keyStore := cltest.NewKeyStore(t, db, dbConfig).Eth()
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator())
- txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), keyStore, nil)
+ txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), keyStore)
require.NoError(t, err)
_, err = txm.SendNativeToken(testutils.Context(t), big.NewInt(0), from, to, *value, 21000)
@@ -104,12 +99,12 @@ func TestTxm_CreateTransaction(t *testing.T) {
gasLimit := uint32(1000)
payload := []byte{1, 2, 3}
- config, dbConfig, evmConfig := makeConfigs(t)
+ config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator())
- txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst.Eth(), nil)
+ txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst.Eth())
require.NoError(t, err)
t.Run("with queue under capacity inserts eth_tx", func(t *testing.T) {
@@ -117,7 +112,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
strategy := newMockTxStrategy(t)
strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true})
strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(int64(0), nil)
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: toAddress,
@@ -153,7 +148,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
cltest.MustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, fromAddress)
t.Run("with queue at capacity does not insert eth_tx", func(t *testing.T) {
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
_, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: testutils.NewAddress(),
@@ -167,7 +162,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
})
t.Run("doesn't insert eth_tx if a matching tx already exists for that pipeline_task_run_id", func(t *testing.T) {
- evmConfig.maxQueued = uint64(3)
+ evmConfig.MaxQueued = uint64(3)
id := uuid.New()
tx1, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
FromAddress: fromAddress,
@@ -223,7 +218,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
checker := txmgr.TransmitCheckerSpec{
CheckerType: txmgr.TransmitCheckerTypeSimulate,
}
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: toAddress,
@@ -262,7 +257,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
SubID: &testDefaultSubID,
GlobalSubID: &testDefaultGlobalSubID,
}
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
checker := txmgr.TransmitCheckerSpec{
CheckerType: txmgr.TransmitCheckerTypeVRFV2,
VRFCoordinatorAddress: testutils.NewAddressPtr(),
@@ -294,7 +289,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
t.Run("forwards tx when a proper forwarder is set up", func(t *testing.T) {
pgtest.MustExec(t, db, `DELETE FROM evm.txes`)
pgtest.MustExec(t, db, `DELETE FROM evm.forwarders`)
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
// Create mock forwarder, mock authorizedsenders call.
form := forwarders.NewORM(db, logger.TestLogger(t), cfg.Database())
@@ -324,7 +319,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
})
t.Run("insert Tx successfully with a IdempotencyKey", func(t *testing.T) {
- evmConfig.maxQueued = uint64(3)
+ evmConfig.MaxQueued = uint64(3)
id := uuid.New()
idempotencyKey := "1"
_, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
@@ -340,7 +335,7 @@ func TestTxm_CreateTransaction(t *testing.T) {
})
t.Run("doesn't insert eth_tx if a matching tx already exists for that IdempotencyKey", func(t *testing.T) {
- evmConfig.maxQueued = uint64(3)
+ evmConfig.MaxQueued = uint64(3)
id := uuid.New()
idempotencyKey := "2"
tx1, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
@@ -373,138 +368,6 @@ func newMockTxStrategy(t *testing.T) *commontxmmocks.TxStrategy {
return commontxmmocks.NewTxStrategy(t)
}
-type databaseConfig struct {
- config.Database
- defaultQueryTimeout time.Duration
-}
-
-func (d *databaseConfig) DefaultQueryTimeout() time.Duration {
- return d.defaultQueryTimeout
-}
-
-func (d *databaseConfig) LogSQL() bool {
- return false
-}
-
-type listenerConfig struct {
- config.Listener
-}
-
-func (l *listenerConfig) FallbackPollInterval() time.Duration {
- return 1 * time.Minute
-}
-
-func (d *databaseConfig) Listener() config.Listener {
- return &listenerConfig{}
-}
-
-type evmConfig struct {
- evmconfig.EVM
- maxInFlight uint32
- reaperInterval time.Duration
- reaperThreshold time.Duration
- resendAfterThreshold time.Duration
- bumpThreshold uint64
- maxQueued uint64
-}
-
-func (e *evmConfig) Transactions() evmconfig.Transactions {
- return &transactionsConfig{e: e}
-}
-
-func (e *evmConfig) GasEstimator() evmconfig.GasEstimator {
- return &gasEstimatorConfig{bumpThreshold: e.bumpThreshold}
-}
-
-func (e *evmConfig) NonceAutoSync() bool { return true }
-
-func (e *evmConfig) FinalityDepth() uint32 { return 42 }
-
-type gasEstimatorConfig struct {
- bumpThreshold uint64
-}
-
-func (g *gasEstimatorConfig) BlockHistory() evmconfig.BlockHistory {
- return &blockHistoryConfig{}
-}
-
-func (g *gasEstimatorConfig) EIP1559DynamicFees() bool { return false }
-func (g *gasEstimatorConfig) LimitDefault() uint32 { return 42 }
-func (g *gasEstimatorConfig) BumpPercent() uint16 { return 42 }
-func (g *gasEstimatorConfig) BumpThreshold() uint64 { return g.bumpThreshold }
-func (g *gasEstimatorConfig) BumpMin() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) FeeCapDefault() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) PriceDefault() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) TipCapDefault() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) TipCapMin() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) LimitMax() uint32 { return 0 }
-func (g *gasEstimatorConfig) LimitMultiplier() float32 { return 0 }
-func (g *gasEstimatorConfig) BumpTxDepth() uint32 { return 42 }
-func (g *gasEstimatorConfig) LimitTransfer() uint32 { return 42 }
-func (g *gasEstimatorConfig) PriceMax() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) PriceMin() *assets.Wei { return assets.NewWeiI(42) }
-func (g *gasEstimatorConfig) Mode() string { return "FixedPrice" }
-func (g *gasEstimatorConfig) LimitJobType() evmconfig.LimitJobType { return &limitJobTypeConfig{} }
-func (g *gasEstimatorConfig) PriceMaxKey(addr common.Address) *assets.Wei {
- return assets.NewWeiI(42)
-}
-
-type limitJobTypeConfig struct {
-}
-
-func (l *limitJobTypeConfig) OCR() *uint32 { return ptr(uint32(0)) }
-func (l *limitJobTypeConfig) OCR2() *uint32 { return ptr(uint32(0)) }
-func (l *limitJobTypeConfig) DR() *uint32 { return ptr(uint32(0)) }
-func (l *limitJobTypeConfig) FM() *uint32 { return ptr(uint32(0)) }
-func (l *limitJobTypeConfig) Keeper() *uint32 { return ptr(uint32(0)) }
-func (l *limitJobTypeConfig) VRF() *uint32 { return ptr(uint32(0)) }
-
-type blockHistoryConfig struct {
- evmconfig.BlockHistory
-}
-
-func (b *blockHistoryConfig) BatchSize() uint32 { return 42 }
-func (b *blockHistoryConfig) BlockDelay() uint16 { return 42 }
-func (b *blockHistoryConfig) BlockHistorySize() uint16 { return 42 }
-func (b *blockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { return 42 }
-func (b *blockHistoryConfig) TransactionPercentile() uint16 { return 42 }
-
-type transactionsConfig struct {
- evmconfig.Transactions
- e *evmConfig
-}
-
-func (*transactionsConfig) ForwardersEnabled() bool { return true }
-func (t *transactionsConfig) MaxInFlight() uint32 { return t.e.maxInFlight }
-func (t *transactionsConfig) MaxQueued() uint64 { return t.e.maxQueued }
-func (t *transactionsConfig) ReaperInterval() time.Duration { return t.e.reaperInterval }
-func (t *transactionsConfig) ReaperThreshold() time.Duration { return t.e.reaperThreshold }
-func (t *transactionsConfig) ResendAfterThreshold() time.Duration { return t.e.resendAfterThreshold }
-
-type mockConfig struct {
- evmConfig *evmConfig
- rpcDefaultBatchSize uint32
- finalityDepth uint32
- finalityTagEnabled bool
-}
-
-func (c *mockConfig) EVM() evmconfig.EVM {
- return c.evmConfig
-}
-
-func (c *mockConfig) NonceAutoSync() bool { return true }
-func (c *mockConfig) ChainType() config.ChainType { return "" }
-func (c *mockConfig) FinalityDepth() uint32 { return c.finalityDepth }
-func (c *mockConfig) FinalityTagEnabled() bool { return c.finalityTagEnabled }
-func (c *mockConfig) RPCDefaultBatchSize() uint32 { return c.rpcDefaultBatchSize }
-
-func makeConfigs(t *testing.T) (*mockConfig, *databaseConfig, *evmConfig) {
- db := &databaseConfig{defaultQueryTimeout: pg.DefaultQueryTimeout}
- ec := &evmConfig{bumpThreshold: 42, maxInFlight: uint32(42), maxQueued: uint64(0), reaperInterval: time.Duration(0), reaperThreshold: time.Duration(0)}
- config := &mockConfig{evmConfig: ec}
- return config, db, ec
-}
-
func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
db := pgtest.NewSqlxDB(t)
cfg := configtest.NewGeneralConfig(t, nil)
@@ -519,17 +382,17 @@ func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
gasLimit := uint32(1000)
toAddress := testutils.NewAddress()
- config, dbConfig, evmConfig := makeConfigs(t)
+ config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator())
- txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), etKeyStore, nil)
+ txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), etKeyStore)
require.NoError(t, err)
t.Run("if another key has any transactions with insufficient eth errors, transmits as normal", func(t *testing.T) {
payload := cltest.MustRandomBytes(t, 100)
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
cltest.MustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, otherKey.Address)
strategy := newMockTxStrategy(t)
strategy.On("Subject").Return(uuid.NullUUID{})
@@ -552,7 +415,7 @@ func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
t.Run("if this key has any transactions with insufficient eth errors, inserts it anyway", func(t *testing.T) {
payload := cltest.MustRandomBytes(t, 100)
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
cltest.MustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, thisKey.Address)
strategy := newMockTxStrategy(t)
@@ -567,7 +430,7 @@ func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
Meta: nil,
Strategy: strategy,
})
- assert.NoError(t, err)
+ require.NoError(t, err)
require.Equal(t, payload, etx.EncodedPayload)
})
@@ -580,7 +443,7 @@ func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
strategy.On("Subject").Return(uuid.NullUUID{})
strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(int64(0), nil)
- evmConfig.maxQueued = uint64(1)
+ evmConfig.MaxQueued = uint64(1)
etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{
FromAddress: evmFromAddress,
ToAddress: toAddress,
@@ -589,7 +452,7 @@ func TestTxm_CreateTransaction_OutOfEth(t *testing.T) {
Meta: nil,
Strategy: strategy,
})
- assert.NoError(t, err)
+ require.NoError(t, err)
require.Equal(t, payload, etx.EncodedPayload)
})
}
@@ -599,15 +462,14 @@ func TestTxm_Lifecycle(t *testing.T) {
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
kst := ksmocks.NewEth(t)
- eventBroadcaster := pgmocks.NewEventBroadcaster(t)
- config, dbConfig, evmConfig := makeConfigs(t)
- config.finalityDepth = uint32(42)
- config.rpcDefaultBatchSize = uint32(4)
+ config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
+ config.SetFinalityDepth(uint32(42))
+ config.RpcDefaultBatchSize = uint32(4)
- evmConfig.resendAfterThreshold = 1 * time.Hour
- evmConfig.reaperThreshold = 1 * time.Hour
- evmConfig.reaperInterval = 1 * time.Hour
+ evmConfig.ResendAfterThreshold = 1 * time.Hour
+ evmConfig.ReaperThreshold = 1 * time.Hour
+ evmConfig.ReaperInterval = 1 * time.Hour
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return([]gethcommon.Address{}, nil)
@@ -615,17 +477,14 @@ func TestTxm_Lifecycle(t *testing.T) {
unsub := cltest.NewAwaiter()
kst.On("SubscribeToKeyChanges").Return(keyChangeCh, unsub.ItHappened)
estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator())
- txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst, eventBroadcaster)
+ txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst)
require.NoError(t, err)
head := cltest.Head(42)
// It should not hang or panic
txm.OnNewLongestChain(testutils.Context(t), head)
- sub := pgmocks.NewSubscription(t)
- sub.On("Events").Return(make(<-chan pg.Event))
- eventBroadcaster.On("Subscribe", "evm.insert_on_txes", "").Return(sub, nil)
- evmConfig.bumpThreshold = uint64(1)
+ evmConfig.BumpThreshold = uint64(1)
require.NoError(t, txm.Start(testutils.Context(t)))
@@ -638,7 +497,6 @@ func TestTxm_Lifecycle(t *testing.T) {
addr := []gethcommon.Address{keyState.Address.Address()}
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addr, nil)
- sub.On("Close").Return()
ethClient.On("PendingNonceAt", mock.AnythingOfType("*context.cancelCtx"), gethcommon.Address{}).Return(uint64(0), nil).Maybe()
keyChangeCh <- struct{}{}
@@ -670,14 +528,9 @@ func TestTxm_Reset(t *testing.T) {
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, nil)
ethClient.On("BatchCallContextAll", mock.Anything, mock.Anything).Return(nil).Maybe()
- eventBroadcaster := pgmocks.NewEventBroadcaster(t)
- sub := pgmocks.NewSubscription(t)
- sub.On("Events").Return(make(<-chan pg.Event))
- sub.On("Close")
- eventBroadcaster.On("Subscribe", "evm.insert_on_txes", "").Return(sub, nil)
estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, cfg.EVM(), cfg.EVM().GasEstimator())
- txm, err := makeTestEvmTxm(t, db, ethClient, estimator, cfg.EVM(), cfg.EVM().GasEstimator(), cfg.EVM().Transactions(), cfg.Database(), cfg.Database().Listener(), kst.Eth(), eventBroadcaster)
+ txm, err := makeTestEvmTxm(t, db, ethClient, estimator, cfg.EVM(), cfg.EVM().GasEstimator(), cfg.EVM().Transactions(), cfg.Database(), cfg.Database().Listener(), kst.Eth())
require.NoError(t, err)
cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, addr2)
diff --git a/core/chains/evm/types/models.go b/core/chains/evm/types/models.go
index 6210226120f..c2d61e00703 100644
--- a/core/chains/evm/types/models.go
+++ b/core/chains/evm/types/models.go
@@ -76,6 +76,14 @@ func (h *Head) GetParent() commontypes.Head[common.Hash] {
return h.Parent
}
+func (h *Head) GetTimestamp() time.Time {
+ return h.Timestamp
+}
+
+func (h *Head) BlockDifficulty() *utils.Big {
+ return h.Difficulty
+}
+
// EarliestInChain recurses through parents until it finds the earliest one
func (h *Head) EarliestInChain() *Head {
for h.Parent != nil {
@@ -223,6 +231,21 @@ func (h *Head) NextInt() *big.Int {
return new(big.Int).Add(h.ToInt(), big.NewInt(1))
}
+// AsSlice returns a slice of heads up to length k
+// len(heads) may be less than k if the available chain is not long enough
+func (h *Head) AsSlice(k int) (heads []*Head) {
+ if k < 1 || h == nil {
+ return
+ }
+ heads = make([]*Head, 1)
+ heads[0] = h
+ for len(heads) < k && h.Parent != nil {
+ h = h.Parent
+ heads = append(heads, h)
+ }
+ return
+}
+
func (h *Head) UnmarshalJSON(bs []byte) error {
type head struct {
Hash common.Hash `json:"hash"`
diff --git a/core/chains/evm/types/models_test.go b/core/chains/evm/types/models_test.go
index 2f9dc7dd7c3..2911e426e86 100644
--- a/core/chains/evm/types/models_test.go
+++ b/core/chains/evm/types/models_test.go
@@ -129,6 +129,29 @@ func TestHead_ChainLength(t *testing.T) {
assert.Equal(t, uint32(0), head2.ChainLength())
}
+func TestHead_AsSlice(t *testing.T) {
+ h1 := &evmtypes.Head{
+ Number: 1,
+ }
+ h2 := &evmtypes.Head{
+ Number: 2,
+ Parent: h1,
+ }
+ h3 := &evmtypes.Head{
+ Number: 3,
+ Parent: h2,
+ }
+
+ assert.Len(t, (*evmtypes.Head)(nil).AsSlice(0), 0)
+ assert.Len(t, (*evmtypes.Head)(nil).AsSlice(1), 0)
+
+ assert.Len(t, h3.AsSlice(0), 0)
+ assert.Equal(t, []*evmtypes.Head{h3}, h3.AsSlice(1))
+ assert.Equal(t, []*evmtypes.Head{h3, h2}, h3.AsSlice(2))
+ assert.Equal(t, []*evmtypes.Head{h3, h2, h1}, h3.AsSlice(3))
+ assert.Equal(t, []*evmtypes.Head{h3, h2, h1}, h3.AsSlice(4))
+}
+
func TestModels_HexToFunctionSelector(t *testing.T) {
t.Parallel()
fid := evmtypes.HexToFunctionSelector("0xb3f98adc")
diff --git a/core/cmd/admin_commands_test.go b/core/cmd/admin_commands_test.go
index a5512fdddaa..954e3577d3d 100644
--- a/core/cmd/admin_commands_test.go
+++ b/core/cmd/admin_commands_test.go
@@ -62,7 +62,7 @@ func TestShell_ChangeRole(t *testing.T) {
app := startNewApplicationV2(t, nil)
client, _ := app.NewShellAndRenderer()
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
tests := []struct {
name string
@@ -101,7 +101,7 @@ func TestShell_DeleteUser(t *testing.T) {
app := startNewApplicationV2(t, nil)
client, _ := app.NewShellAndRenderer()
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user))
tests := []struct {
name string
@@ -135,7 +135,7 @@ func TestShell_ListUsers(t *testing.T) {
app := startNewApplicationV2(t, nil)
client, _ := app.NewShellAndRenderer()
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
set := flag.NewFlagSet("test", 0)
cltest.FlagSetApplyFromAction(client.ListUsers, set, "")
diff --git a/core/cmd/app_test.go b/core/cmd/app_test.go
index bbb00bff3ec..e5e29406426 100644
--- a/core/cmd/app_test.go
+++ b/core/cmd/app_test.go
@@ -151,6 +151,7 @@ func Test_initServerConfig(t *testing.T) {
"../services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-one.toml",
"../services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-two.toml",
"../services/chainlink/testdata/mergingsecretsdata/secrets-threshold.toml",
+ "../services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml",
},
},
wantErr: false,
diff --git a/core/cmd/ocr2vrf_configure_commands.go b/core/cmd/ocr2vrf_configure_commands.go
index a3feddd611d..bb4cef4708b 100644
--- a/core/cmd/ocr2vrf_configure_commands.go
+++ b/core/cmd/ocr2vrf_configure_commands.go
@@ -14,7 +14,7 @@ import (
"github.com/pkg/errors"
"github.com/urfave/cli"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/forwarders"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/authorized_forwarder"
diff --git a/core/cmd/shell.go b/core/cmd/shell.go
index 1ef99992a66..e1ac0b99caa 100644
--- a/core/cmd/shell.go
+++ b/core/cmd/shell.go
@@ -29,7 +29,7 @@ import (
"go.uber.org/zap/zapcore"
"golang.org/x/sync/errgroup"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
@@ -60,7 +60,7 @@ var (
grpcOpts loop.GRPCOpts
)
-func initGlobals(cfgProm config.Prometheus, cfgTracing config.Tracing) error {
+func initGlobals(cfgProm config.Prometheus, cfgTracing config.Tracing, logger logger.Logger) error {
// Avoid double initializations, but does not prevent relay methods from being called multiple times.
var err error
initGlobalsOnce.Do(func() {
@@ -71,6 +71,7 @@ func initGlobals(cfgProm config.Prometheus, cfgTracing config.Tracing) error {
CollectorTarget: cfgTracing.CollectorTarget(),
NodeAttributes: cfgTracing.Attributes(),
SamplingRatio: cfgTracing.SamplingRatio(),
+ OnDialError: func(error) { logger.Errorw("Failed to dial", "err", err) },
})
})
return err
@@ -134,7 +135,7 @@ type ChainlinkAppFactory struct{}
// NewApplication returns a new instance of the node with the given config.
func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.GeneralConfig, appLggr logger.Logger, db *sqlx.DB) (app chainlink.Application, err error) {
- err = initGlobals(cfg.Prometheus(), cfg.Tracing())
+ err = initGlobals(cfg.Prometheus(), cfg.Tracing(), appLggr)
if err != nil {
appLggr.Errorf("Failed to initialize globals: %v", err)
}
@@ -173,9 +174,8 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G
if cfg.CosmosEnabled() {
cosmosCfg := chainlink.CosmosFactoryConfig{
- Keystore: keyStore.Cosmos(),
- TOMLConfigs: cfg.CosmosConfigs(),
- EventBroadcaster: eventBroadcaster,
+ Keystore: keyStore.Cosmos(),
+ TOMLConfigs: cfg.CosmosConfigs(),
}
initOps = append(initOps, chainlink.InitCosmos(ctx, relayerFactory, cosmosCfg))
}
@@ -777,8 +777,8 @@ func (f *fileSessionRequestBuilder) Build(file string) (sessions.SessionRequest,
// APIInitializer is the interface used to create the API User credentials
// needed to access the API. Does nothing if API user already exists.
type APIInitializer interface {
- // Initialize creates a new user for API access, or does nothing if one exists.
- Initialize(orm sessions.ORM, lggr logger.Logger) (sessions.User, error)
+ // Initialize creates a new local Admin user for API access, or does nothing if one exists.
+ Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error)
}
type promptingAPIInitializer struct {
@@ -792,11 +792,11 @@ func NewPromptingAPIInitializer(prompter Prompter) APIInitializer {
}
// Initialize uses the terminal to get credentials that it then saves in the store.
-func (t *promptingAPIInitializer) Initialize(orm sessions.ORM, lggr logger.Logger) (sessions.User, error) {
+func (t *promptingAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) {
// Load list of users to determine which to assume, or if a user needs to be created
dbUsers, err := orm.ListUsers()
if err != nil {
- return sessions.User{}, err
+ return sessions.User{}, errors.Wrap(err, "Unable to List users for initialization")
}
// If there are no users in the database, prompt for initial admin user creation
@@ -846,7 +846,7 @@ func NewFileAPIInitializer(file string) APIInitializer {
return fileAPIInitializer{file: file}
}
-func (f fileAPIInitializer) Initialize(orm sessions.ORM, lggr logger.Logger) (sessions.User, error) {
+func (f fileAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) {
request, err := credentialsFromFile(f.file, lggr)
if err != nil {
return sessions.User{}, err
@@ -855,7 +855,7 @@ func (f fileAPIInitializer) Initialize(orm sessions.ORM, lggr logger.Logger) (se
// Load list of users to determine which to assume, or if a user needs to be created
dbUsers, err := orm.ListUsers()
if err != nil {
- return sessions.User{}, err
+ return sessions.User{}, errors.Wrap(err, "Unable to List users for initialization")
}
// If there are no users in the database, create initial admin user from session request from file creds
diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go
index 401375238d8..954cead5c37 100644
--- a/core/cmd/shell_local.go
+++ b/core/cmd/shell_local.go
@@ -29,7 +29,7 @@ import (
"golang.org/x/sync/errgroup"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/build"
@@ -362,7 +362,8 @@ func (s *Shell) runNode(c *cli.Context) error {
return s.errorOut(errors.Wrap(err, "fatal error instantiating application"))
}
- sessionORM := app.SessionORM()
+ // Local shell initialization always uses local auth users table for admin auth
+ authProviderORM := app.BasicAdminUsersORM()
keyStore := app.GetKeyStore()
err = s.KeyStoreAuthenticator.authenticate(keyStore, s.Config.Password())
if err != nil {
@@ -449,11 +450,11 @@ func (s *Shell) runNode(c *cli.Context) error {
}
var user sessions.User
- if user, err = NewFileAPIInitializer(c.String("api")).Initialize(sessionORM, lggr); err != nil {
+ if user, err = NewFileAPIInitializer(c.String("api")).Initialize(authProviderORM, lggr); err != nil {
if !errors.Is(err, ErrNoCredentialFile) {
return errors.Wrap(err, "error creating api initializer")
}
- if user, err = s.FallbackAPIInitializer.Initialize(sessionORM, lggr); err != nil {
+ if user, err = s.FallbackAPIInitializer.Initialize(authProviderORM, lggr); err != nil {
if errors.Is(err, ErrorNoAPICredentialsAvailable) {
return errors.WithStack(err)
}
diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go
index d70b06f5a98..4d906214ef4 100644
--- a/core/cmd/shell_local_test.go
+++ b/core/cmd/shell_local_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/cmd"
cmdMocks "github.com/smartcontractkit/chainlink/v2/core/cmd/mocks"
@@ -25,7 +25,7 @@ import (
chainlinkmocks "github.com/smartcontractkit/chainlink/v2/core/services/chainlink/mocks"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
evmrelayer "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
- "github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
"github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -79,7 +79,7 @@ func TestShell_RunNodeWithPasswords(t *testing.T) {
})
db := pgtest.NewSqlxDB(t)
keyStore := cltest.NewKeyStore(t, db, cfg.Database())
- sessionORM := sessions.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
+ authProviderORM := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
lggr := logger.TestLogger(t)
@@ -100,7 +100,8 @@ func TestShell_RunNodeWithPasswords(t *testing.T) {
pgtest.MustExec(t, db, "DELETE FROM users;")
app := mocks.NewApplication(t)
- app.On("SessionORM").Return(sessionORM).Maybe()
+ app.On("AuthenticationProvider").Return(authProviderORM).Maybe()
+ app.On("BasicAdminUsersORM").Return(authProviderORM).Maybe()
app.On("GetKeyStore").Return(keyStore).Maybe()
app.On("GetRelayers").Return(testRelayers).Maybe()
app.On("Start", mock.Anything).Maybe().Return(nil)
@@ -171,7 +172,7 @@ func TestShell_RunNodeWithAPICredentialsFile(t *testing.T) {
c.Insecure.OCRDevelopmentMode = nil
})
db := pgtest.NewSqlxDB(t)
- sessionORM := sessions.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
+ authProviderORM := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
// Clear out fixture users/users created from the other test cases
// This asserts that on initial run with an empty users table that the credentials file will instantiate and
@@ -199,7 +200,7 @@ func TestShell_RunNodeWithAPICredentialsFile(t *testing.T) {
}
testRelayers := genTestEVMRelayers(t, opts, keyStore)
app := mocks.NewApplication(t)
- app.On("SessionORM").Return(sessionORM)
+ app.On("BasicAdminUsersORM").Return(authProviderORM)
app.On("GetKeyStore").Return(keyStore)
app.On("GetRelayers").Return(testRelayers).Maybe()
app.On("Start", mock.Anything).Maybe().Return(nil)
@@ -279,7 +280,7 @@ func TestShell_RebroadcastTransactions_Txm(t *testing.T) {
// Use a non-transactional db for this test because we need to
// test multiple connections to the database, and changes made within
// the transaction cannot be seen from another connection.
- config, sqlxDB := heavyweight.FullTestDBV2(t, "rebroadcasttransactions", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Database.Dialect = dialects.Postgres
// evm config is used in this test. but if set, it must be pass config validation.
// simplest to make it nil
@@ -302,12 +303,11 @@ func TestShell_RebroadcastTransactions_Txm(t *testing.T) {
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config)
- mockRelayerChainInteroperators := chainlinkmocks.NewRelayerChainInteroperators(t)
- mockRelayerChainInteroperators.On("LegacyEVMChains").Return(legacy, nil)
+ mockRelayerChainInteroperators := &chainlinkmocks.FakeRelayerChainInteroperators{EVMChains: legacy}
app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe()
ethClient.On("Dial", mock.Anything).Return(nil)
- client := cmd.Shell{
+ c := cmd.Shell{
Config: config,
AppFactory: cltest.InstanceAppFactory{App: app},
FallbackAPIInitializer: cltest.NewMockAPIInitializer(t),
@@ -318,7 +318,7 @@ func TestShell_RebroadcastTransactions_Txm(t *testing.T) {
beginningNonce := uint64(7)
endingNonce := uint64(10)
set := flag.NewFlagSet("test", 0)
- cltest.FlagSetApplyFromAction(client.RebroadcastTransactions, set, "")
+ cltest.FlagSetApplyFromAction(c.RebroadcastTransactions, set, "")
require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String()))
require.NoError(t, set.Set("beginningNonce", strconv.FormatUint(beginningNonce, 10)))
@@ -328,16 +328,16 @@ func TestShell_RebroadcastTransactions_Txm(t *testing.T) {
require.NoError(t, set.Set("address", fromAddress.Hex()))
require.NoError(t, set.Set("password", "../internal/fixtures/correct_password.txt"))
- c := cli.NewContext(nil, set, nil)
+ ctx := cli.NewContext(nil, set, nil)
for i := beginningNonce; i <= endingNonce; i++ {
n := i
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return tx.Nonce() == n
- }), mock.Anything).Once().Return(clienttypes.Successful, nil)
+ }), mock.Anything).Once().Return(client.Successful, nil)
}
- assert.NoError(t, client.RebroadcastTransactions(c))
+ assert.NoError(t, c.RebroadcastTransactions(ctx))
}
func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
@@ -359,7 +359,7 @@ func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
// Use the non-transactional db for this test because we need to
// test multiple connections to the database, and changes made within
// the transaction cannot be seen from another connection.
- config, sqlxDB := heavyweight.FullTestDBV2(t, "rebroadcasttransactions_outsiderange", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Database.Dialect = dialects.Postgres
// evm config is used in this test. but if set, it must be pass config validation.
// simplest to make it nil
@@ -385,11 +385,10 @@ func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
ethClient.On("Dial", mock.Anything).Return(nil)
legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config)
- mockRelayerChainInteroperators := chainlinkmocks.NewRelayerChainInteroperators(t)
- mockRelayerChainInteroperators.On("LegacyEVMChains").Return(legacy, nil)
+ mockRelayerChainInteroperators := &chainlinkmocks.FakeRelayerChainInteroperators{EVMChains: legacy}
app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe()
- client := cmd.Shell{
+ c := cmd.Shell{
Config: config,
AppFactory: cltest.InstanceAppFactory{App: app},
FallbackAPIInitializer: cltest.NewMockAPIInitializer(t),
@@ -398,7 +397,7 @@ func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
}
set := flag.NewFlagSet("test", 0)
- cltest.FlagSetApplyFromAction(client.RebroadcastTransactions, set, "")
+ cltest.FlagSetApplyFromAction(c.RebroadcastTransactions, set, "")
require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String()))
require.NoError(t, set.Set("beginningNonce", strconv.FormatUint(uint64(beginningNonce), 10)))
@@ -408,16 +407,16 @@ func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) {
require.NoError(t, set.Set("address", fromAddress.Hex()))
require.NoError(t, set.Set("password", "../internal/fixtures/correct_password.txt"))
- c := cli.NewContext(nil, set, nil)
+ ctx := cli.NewContext(nil, set, nil)
for i := beginningNonce; i <= endingNonce; i++ {
n := i
ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool {
return uint(tx.Nonce()) == n
- }), mock.Anything).Once().Return(clienttypes.Successful, nil)
+ }), mock.Anything).Once().Return(client.Successful, nil)
}
- assert.NoError(t, client.RebroadcastTransactions(c))
+ assert.NoError(t, c.RebroadcastTransactions(ctx))
cltest.AssertEthTxAttemptCountStays(t, app.GetSqlxDB(), 1)
})
@@ -438,7 +437,7 @@ func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- config, sqlxDB := heavyweight.FullTestDBV2(t, "rebroadcasttransactions_outsiderange", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Database.Dialect = dialects.Postgres
c.EVM = nil
@@ -465,10 +464,9 @@ func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) {
ethClient.On("Dial", mock.Anything).Return(nil)
legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config)
- mockRelayerChainInteroperators := chainlinkmocks.NewRelayerChainInteroperators(t)
- mockRelayerChainInteroperators.On("LegacyEVMChains").Return(legacy, nil)
+ mockRelayerChainInteroperators := &chainlinkmocks.FakeRelayerChainInteroperators{EVMChains: legacy}
app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe()
- ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(clienttypes.Successful, nil)
+ ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(client.Successful, nil)
client := cmd.Shell{
Config: config,
diff --git a/core/cmd/shell_remote_test.go b/core/cmd/shell_remote_test.go
index 7f998225f63..91b56ee53a4 100644
--- a/core/cmd/shell_remote_test.go
+++ b/core/cmd/shell_remote_test.go
@@ -258,7 +258,7 @@ func TestShell_DestroyExternalInitiator_NotFound(t *testing.T) {
func TestShell_RemoteLogin(t *testing.T) {
app := startNewApplicationV2(t, nil)
- orm := app.SessionORM()
+ orm := app.AuthenticationProvider()
u := cltest.NewUserWithSession(t, orm)
@@ -301,7 +301,7 @@ func TestShell_RemoteBuildCompatibility(t *testing.T) {
t.Parallel()
app := startNewApplicationV2(t, nil)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
enteredStrings := []string{u.Email, cltest.Password}
prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: append(enteredStrings, enteredStrings...)}
client := app.NewAuthenticatingShell(prompter)
@@ -340,7 +340,7 @@ func TestShell_CheckRemoteBuildCompatibility(t *testing.T) {
t.Parallel()
app := startNewApplicationV2(t, nil)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
tests := []struct {
name string
remoteVersion, remoteSha string
@@ -416,7 +416,7 @@ func TestShell_ChangePassword(t *testing.T) {
t.Parallel()
app := startNewApplicationV2(t, nil)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
enteredStrings := []string{u.Email, cltest.Password}
prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings}
@@ -466,7 +466,7 @@ func TestShell_Profile_InvalidSecondsParam(t *testing.T) {
t.Parallel()
app := startNewApplicationV2(t, nil)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
enteredStrings := []string{u.Email, cltest.Password}
prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings}
@@ -497,7 +497,7 @@ func TestShell_Profile(t *testing.T) {
t.Parallel()
app := startNewApplicationV2(t, nil)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
enteredStrings := []string{u.Email, cltest.Password}
prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings}
@@ -648,7 +648,7 @@ func TestShell_AutoLogin(t *testing.T) {
app := startNewApplicationV2(t, nil)
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user))
sr := sessions.SessionRequest{
Email: user.Email,
@@ -676,7 +676,7 @@ func TestShell_AutoLogin_AuthFails(t *testing.T) {
app := startNewApplicationV2(t, nil)
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user))
sr := sessions.SessionRequest{
Email: user.Email,
diff --git a/core/cmd/shell_test.go b/core/cmd/shell_test.go
index 9b87e8fb1da..2a8c2c55861 100644
--- a/core/cmd/shell_test.go
+++ b/core/cmd/shell_test.go
@@ -26,6 +26,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks"
"github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -33,7 +34,7 @@ func TestTerminalCookieAuthenticator_AuthenticateWithoutSession(t *testing.T) {
t.Parallel()
app := cltest.NewApplicationEVMDisabled(t)
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
tests := []struct {
name, email, pwd string
@@ -65,7 +66,7 @@ func TestTerminalCookieAuthenticator_AuthenticateWithSession(t *testing.T) {
app := cltest.NewApplicationEVMDisabled(t)
require.NoError(t, app.Start(testutils.Context(t)))
- u := cltest.NewUserWithSession(t, app.SessionORM())
+ u := cltest.NewUserWithSession(t, app.AuthenticationProvider())
tests := []struct {
name, email, pwd string
@@ -155,7 +156,7 @@ func TestTerminalAPIInitializer_InitializeWithoutAPIUser(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
db := pgtest.NewSqlxDB(t)
lggr := logger.TestLogger(t)
- orm := sessions.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger)
+ orm := localauth.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger)
mock := &cltest.MockCountingPrompter{T: t, EnteredStrings: test.enteredStrings, NotTerminal: !test.isTerminal}
tai := cmd.NewPromptingAPIInitializer(mock)
@@ -186,7 +187,7 @@ func TestTerminalAPIInitializer_InitializeWithExistingAPIUser(t *testing.T) {
db := pgtest.NewSqlxDB(t)
cfg := configtest.NewGeneralConfig(t, nil)
lggr := logger.TestLogger(t)
- orm := sessions.NewORM(db, time.Minute, lggr, cfg.Database(), audit.NoopLogger)
+ orm := localauth.NewORM(db, time.Minute, lggr, cfg.Database(), audit.NoopLogger)
// Clear out fixture users/users created from the other test cases
// This asserts that on initial run with an empty users table that the credentials file will instantiate and
@@ -223,7 +224,7 @@ func TestFileAPIInitializer_InitializeWithoutAPIUser(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
db := pgtest.NewSqlxDB(t)
lggr := logger.TestLogger(t)
- orm := sessions.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger)
+ orm := localauth.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger)
// Clear out fixture users/users created from the other test cases
// This asserts that on initial run with an empty users table that the credentials file will instantiate and
@@ -248,7 +249,7 @@ func TestFileAPIInitializer_InitializeWithoutAPIUser(t *testing.T) {
func TestFileAPIInitializer_InitializeWithExistingAPIUser(t *testing.T) {
db := pgtest.NewSqlxDB(t)
cfg := configtest.NewGeneralConfig(t, nil)
- orm := sessions.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
+ orm := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger)
tests := []struct {
name string
diff --git a/core/config/chaintype.go b/core/config/chaintype.go
index fe67b0925aa..21fb8cd297d 100644
--- a/core/config/chaintype.go
+++ b/core/config/chaintype.go
@@ -15,16 +15,19 @@ const (
ChainOptimismBedrock ChainType = "optimismBedrock"
ChainXDai ChainType = "xdai"
ChainCelo ChainType = "celo"
+ ChainWeMix ChainType = "wemix"
+ ChainKroma ChainType = "kroma"
+ ChainZkSync ChainType = "zksync"
)
var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Join([]string{
string(ChainArbitrum), string(ChainMetis), string(ChainXDai), string(ChainOptimismBedrock), string(ChainCelo),
-}, ", "))
+ string(ChainKroma), string(ChainWeMix), string(ChainZkSync)}, ", "))
// IsValid returns true if the ChainType value is known or empty.
func (c ChainType) IsValid() bool {
switch c {
- case "", ChainArbitrum, ChainMetis, ChainOptimismBedrock, ChainXDai, ChainCelo:
+ case "", ChainArbitrum, ChainMetis, ChainOptimismBedrock, ChainXDai, ChainCelo, ChainKroma, ChainWeMix, ChainZkSync:
return true
}
return false
diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml
index c8b5395d6d7..381ab794d60 100644
--- a/core/config/docs/chains-evm.toml
+++ b/core/config/docs/chains-evm.toml
@@ -14,7 +14,7 @@ BlockBackfillDepth = 10 # Default
# BlockBackfillSkip enables skipping of very long backfills.
BlockBackfillSkip = false # Default
# ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID.
-# Available types: arbitrum, metis, optimismBedrock, xdai
+# Available types: arbitrum, metis, optimismBedrock, xdai, celo, kroma, wemix, zksync
ChainType = 'arbitrum' # Example
# FinalityDepth is the number of blocks after which an ethereum transaction is considered "final". Note that the default is automatically set based on chain ID so it should not be necessary to change this under normal operation.
# BlocksConsideredFinal determines how deeply we look back to ensure that transactions are confirmed onto the longest chain
@@ -112,7 +112,8 @@ Enabled = true # Default
#
# - `FixedPrice` uses static configured values for gas price (can be set via API call).
# - `BlockHistory` dynamically adjusts default gas price based on heuristics from mined blocks.
-# - `L2Suggested` is a special mode only for use with L2 blockchains. This mode will use the gas price suggested by the rpc endpoint via `eth_gasPrice`.
+# - `L2Suggested` mode is deprecated and replaced with `SuggestedPrice`.
+# - `SuggestedPrice` is a mode which uses the gas price suggested by the rpc endpoint via `eth_gasPrice`.
# - `Arbitrum` is a special mode only for use with Arbitrum blockchains. It uses the suggested gas price (up to `ETH_MAX_GAS_PRICE_WEI`, with `1000 gwei` default) as well as an estimated gas limit (up to `ETH_GAS_LIMIT_MAX`, with `1,000,000,000` default).
#
# Chainlink nodes decide what gas price to use using an `Estimator`. It ships with several simple and battle-hardened built-in estimators that should work well for almost all use-cases. Note that estimators will change their behaviour slightly depending on if you are in EIP-1559 mode or not.
diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml
index 1ca4c656a7f..0a8e6aba3be 100644
--- a/core/config/docs/core.toml
+++ b/core/config/docs/core.toml
@@ -161,6 +161,8 @@ MaxAgeDays = 0 # Default
MaxBackups = 1 # Default
[WebServer]
+# AuthenticationMethod defines which pluggable auth interface to use for user login and role assumption. Options include 'local' and 'ldap'. See docs for more details
+AuthenticationMethod = 'local' # Default
# AllowOrigins controls the URLs Chainlink nodes emit in the `Allow-Origins` header of its API responses. The setting can be a comma-separated list with no spaces. You might experience CORS issues if this is not set correctly.
#
# You should set this to the external URL that you use to access the Chainlink UI.
@@ -191,6 +193,44 @@ StartTimeout = '15s' # Default
# ListenIP specifies the IP to bind the HTTP server to
ListenIP = '0.0.0.0' # Default
+# Optional LDAP config if WebServer.AuthenticationMethod is set to 'ldap'
+# LDAP queries are all parameterized to support custom LDAP 'dn', 'cn', and attributes
+[WebServer.LDAP]
+# ServerTLS defines the option to require the secure ldaps
+ServerTLS = true # Default
+# SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions.
+SessionTimeout = '15m0s' # Default
+# QueryTimeout defines how long queries should wait before timing out, defined in seconds
+QueryTimeout = '2m0s' # Default
+# BaseUserAttr defines the base attribute used to populate LDAP queries such as "uid=$", default is example
+BaseUserAttr = 'uid' # Default
+# BaseDN defines the base LDAP 'dn' search filter to apply to every LDAP query, replace example,com with the appropriate LDAP server's structure
+BaseDN = 'dc=custom,dc=example,dc=com' # Example
+# UsersDN defines the 'dn' query to use when querying for the 'users' 'ou' group
+UsersDN = 'ou=users' # Default
+# GroupsDN defines the 'dn' query to use when querying for the 'groups' 'ou' group
+GroupsDN = 'ou=groups' # Default
+# ActiveAttribute is an optional user field to check truthiness for if a user is valid/active. This is only required if the LDAP provider lists inactive users as members of groups
+ActiveAttribute = '' # Default
+# ActiveAttributeAllowedValue is the value to check against for the above optional user attribute
+ActiveAttributeAllowedValue = '' # Default
+# AdminUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Admin' role
+AdminUserGroupCN = 'NodeAdmins' # Default
+# EditUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Edit' role
+EditUserGroupCN = 'NodeEditors' # Default
+# RunUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Run' role
+RunUserGroupCN = 'NodeRunners' # Default
+# ReadUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Read' role
+ReadUserGroupCN = 'NodeReadOnly' # Default
+# UserApiTokenEnabled enables the users to issue API tokens with the same access of their role
+UserApiTokenEnabled = false # Default
+# UserAPITokenDuration is the duration of time an API token is active for before expiring
+UserAPITokenDuration = '240h0m0s' # Default
+# UpstreamSyncInterval is the interval at which the background LDAP sync task will be called. A '0s' value disables the background sync being run on an interval. This check is already performed during login/logout actions, all sessions and API tokens stored in the local ldap tables are updated to match the remote server
+UpstreamSyncInterval = '0s' # Default
+# UpstreamSyncRateLimit defines a duration to limit the number of query/API calls to the upstream LDAP provider. It prevents the sync functionality from being called multiple times within the defined duration
+UpstreamSyncRateLimit = '2m0s' # Default
+
[WebServer.RateLimit]
# Authenticated defines the threshold to which authenticated requests get limited. More than this many authenticated requests per `AuthenticatedRateLimitPeriod` will be rejected.
Authenticated = 1000 # Default
diff --git a/core/config/docs/secrets.toml b/core/config/docs/secrets.toml
index 2b491a77497..4ed2325dfb2 100644
--- a/core/config/docs/secrets.toml
+++ b/core/config/docs/secrets.toml
@@ -14,6 +14,15 @@ BackupURL = "postgresql://user:pass@read-replica.example.com:5432/dbname?sslmode
# Environment variable: `CL_DATABASE_ALLOW_SIMPLE_PASSWORDS`
AllowSimplePasswords = false # Default
+# Optional LDAP config
+[WebServer.LDAP]
+# ServerAddress is the full ldaps:// address of the ldap server to authenticate with and query
+ServerAddress = 'ldaps://127.0.0.1' # Example
+# ReadOnlyUserLogin is the username of the read only root user used to authenticate the requested LDAP queries
+ReadOnlyUserLogin = 'viewer@example.com' # Example
+# ReadOnlyUserPass is the password for the above account
+ReadOnlyUserPass = 'password' # Example
+
[Password]
# Keystore is the password for the node's account.
#
diff --git a/core/config/toml/types.go b/core/config/toml/types.go
index b7c8cfbc473..61962d43e5f 100644
--- a/core/config/toml/types.go
+++ b/core/config/toml/types.go
@@ -20,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/config/parse"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions"
"github.com/smartcontractkit/chainlink/v2/core/store/dialects"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -101,6 +102,7 @@ func (c *Core) ValidateConfig() (err error) {
type Secrets struct {
Database DatabaseSecrets `toml:",omitempty"`
Password Passwords `toml:",omitempty"`
+ WebServer WebServerSecrets `toml:",omitempty"`
Pyroscope PyroscopeSecrets `toml:",omitempty"`
Prometheus PrometheusSecrets `toml:",omitempty"`
Mercury MercurySecrets `toml:",omitempty"`
@@ -592,6 +594,7 @@ func (l *LogFile) setFrom(f *LogFile) {
}
type WebServer struct {
+ AuthenticationMethod *string
AllowOrigins *string
BridgeResponseURL *models.URL
BridgeCacheTTL *models.Duration
@@ -604,12 +607,16 @@ type WebServer struct {
StartTimeout *models.Duration
ListenIP *net.IP
+ LDAP WebServerLDAP `toml:",omitempty"`
MFA WebServerMFA `toml:",omitempty"`
RateLimit WebServerRateLimit `toml:",omitempty"`
TLS WebServerTLS `toml:",omitempty"`
}
func (w *WebServer) setFrom(f *WebServer) {
+ if v := f.AuthenticationMethod; v != nil {
+ w.AuthenticationMethod = v
+ }
if v := f.AllowOrigins; v != nil {
w.AllowOrigins = v
}
@@ -644,11 +651,46 @@ func (w *WebServer) setFrom(f *WebServer) {
w.HTTPMaxSize = v
}
+ w.LDAP.setFrom(&f.LDAP)
w.MFA.setFrom(&f.MFA)
w.RateLimit.setFrom(&f.RateLimit)
w.TLS.setFrom(&f.TLS)
}
+func (w *WebServer) ValidateConfig() (err error) {
+ // Validate LDAP fields when authentication method is LDAPAuth
+ if *w.AuthenticationMethod != string(sessions.LDAPAuth) {
+ return
+ }
+
+ // Assert LDAP fields when AuthMethod set to LDAP
+ if *w.LDAP.BaseDN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.BaseDN", Msg: "LDAP BaseDN can not be empty"})
+ }
+ if *w.LDAP.BaseUserAttr == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.BaseUserAttr", Msg: "LDAP BaseUserAttr can not be empty"})
+ }
+ if *w.LDAP.UsersDN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.UsersDN", Msg: "LDAP UsersDN can not be empty"})
+ }
+ if *w.LDAP.GroupsDN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.GroupsDN", Msg: "LDAP GroupsDN can not be empty"})
+ }
+ if *w.LDAP.AdminUserGroupCN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.AdminUserGroupCN", Msg: "LDAP AdminUserGroupCN can not be empty"})
+ }
+ if *w.LDAP.EditUserGroupCN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.RunUserGroupCN", Msg: "LDAP ReadUserGroupCN can not be empty"})
+ }
+ if *w.LDAP.RunUserGroupCN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.RunUserGroupCN", Msg: "LDAP RunUserGroupCN can not be empty"})
+ }
+ if *w.LDAP.ReadUserGroupCN == "" {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.ReadUserGroupCN", Msg: "LDAP ReadUserGroupCN can not be empty"})
+ }
+ return err
+}
+
type WebServerMFA struct {
RPID *string
RPOrigin *string
@@ -715,6 +757,110 @@ func (w *WebServerTLS) setFrom(f *WebServerTLS) {
}
}
+type WebServerLDAP struct {
+ ServerTLS *bool
+ SessionTimeout *models.Duration
+ QueryTimeout *models.Duration
+ BaseUserAttr *string
+ BaseDN *string
+ UsersDN *string
+ GroupsDN *string
+ ActiveAttribute *string
+ ActiveAttributeAllowedValue *string
+ AdminUserGroupCN *string
+ EditUserGroupCN *string
+ RunUserGroupCN *string
+ ReadUserGroupCN *string
+ UserApiTokenEnabled *bool
+ UserAPITokenDuration *models.Duration
+ UpstreamSyncInterval *models.Duration
+ UpstreamSyncRateLimit *models.Duration
+}
+
+func (w *WebServerLDAP) setFrom(f *WebServerLDAP) {
+ if v := f.ServerTLS; v != nil {
+ w.ServerTLS = v
+ }
+ if v := f.SessionTimeout; v != nil {
+ w.SessionTimeout = v
+ }
+ if v := f.SessionTimeout; v != nil {
+ w.SessionTimeout = v
+ }
+ if v := f.QueryTimeout; v != nil {
+ w.QueryTimeout = v
+ }
+ if v := f.BaseUserAttr; v != nil {
+ w.BaseUserAttr = v
+ }
+ if v := f.BaseDN; v != nil {
+ w.BaseDN = v
+ }
+ if v := f.UsersDN; v != nil {
+ w.UsersDN = v
+ }
+ if v := f.GroupsDN; v != nil {
+ w.GroupsDN = v
+ }
+ if v := f.ActiveAttribute; v != nil {
+ w.ActiveAttribute = v
+ }
+ if v := f.ActiveAttributeAllowedValue; v != nil {
+ w.ActiveAttributeAllowedValue = v
+ }
+ if v := f.AdminUserGroupCN; v != nil {
+ w.AdminUserGroupCN = v
+ }
+ if v := f.EditUserGroupCN; v != nil {
+ w.EditUserGroupCN = v
+ }
+ if v := f.RunUserGroupCN; v != nil {
+ w.RunUserGroupCN = v
+ }
+ if v := f.ReadUserGroupCN; v != nil {
+ w.ReadUserGroupCN = v
+ }
+ if v := f.UserApiTokenEnabled; v != nil {
+ w.UserApiTokenEnabled = v
+ }
+ if v := f.UserAPITokenDuration; v != nil {
+ w.UserAPITokenDuration = v
+ }
+ if v := f.UpstreamSyncInterval; v != nil {
+ w.UpstreamSyncInterval = v
+ }
+ if v := f.UpstreamSyncRateLimit; v != nil {
+ w.UpstreamSyncRateLimit = v
+ }
+}
+
+type WebServerLDAPSecrets struct {
+ ServerAddress *models.SecretURL
+ ReadOnlyUserLogin *models.Secret
+ ReadOnlyUserPass *models.Secret
+}
+
+func (w *WebServerLDAPSecrets) setFrom(f *WebServerLDAPSecrets) {
+ if v := f.ServerAddress; v != nil {
+ w.ServerAddress = v
+ }
+ if v := f.ReadOnlyUserLogin; v != nil {
+ w.ReadOnlyUserLogin = v
+ }
+ if v := f.ReadOnlyUserPass; v != nil {
+ w.ReadOnlyUserPass = v
+ }
+}
+
+type WebServerSecrets struct {
+ LDAP WebServerLDAPSecrets `toml:",omitempty"`
+}
+
+func (w *WebServerSecrets) SetFrom(f *WebServerSecrets) error {
+ w.LDAP.setFrom(&f.LDAP)
+ return nil
+}
+
type JobPipeline struct {
ExternalInitiatorsEnabled *bool
MaxRunDuration *models.Duration
diff --git a/core/config/web_config.go b/core/config/web_config.go
index 12209a02670..429a31e7e82 100644
--- a/core/config/web_config.go
+++ b/core/config/web_config.go
@@ -32,7 +32,31 @@ type MFA interface {
RPOrigin() string
}
+type LDAP interface {
+ ServerAddress() string
+ ReadOnlyUserLogin() string
+ ReadOnlyUserPass() string
+ ServerTLS() bool
+ SessionTimeout() models.Duration
+ QueryTimeout() time.Duration
+ BaseUserAttr() string
+ BaseDN() string
+ UsersDN() string
+ GroupsDN() string
+ ActiveAttribute() string
+ ActiveAttributeAllowedValue() string
+ AdminUserGroupCN() string
+ EditUserGroupCN() string
+ RunUserGroupCN() string
+ ReadUserGroupCN() string
+ UserApiTokenEnabled() bool
+ UserAPITokenDuration() models.Duration
+ UpstreamSyncInterval() models.Duration
+ UpstreamSyncRateLimit() models.Duration
+}
+
type WebServer interface {
+ AuthenticationMethod() string
AllowOrigins() string
BridgeCacheTTL() time.Duration
BridgeResponseURL() *url.URL
@@ -49,4 +73,5 @@ type WebServer interface {
TLS() TLS
RateLimit() RateLimit
MFA() MFA
+ LDAP() LDAP
}
diff --git a/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go b/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go
index ffe072fc657..5f0d2d45f2d 100644
--- a/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go
+++ b/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go
@@ -71,8 +71,8 @@ type FunctionsResponseRequestMeta struct {
}
var FunctionsCoordinatorMetaData = &bind.MetaData{
- ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"router\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address\",\"name\":\"linkToNativeFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyPublicKey\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InconsistentReportData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"InvalidConfig\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoTransmittersSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouterOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RouterMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedPublicKeyChange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnsupportedRequestDataVersion\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"CommitmentDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"callbackGasLimit\",\"type\":\"uint64\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"deleteCommitment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAdminFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"getDONFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getThresholdPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"oracleWithdrawAll\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"donPublicKey\",\"type\":\"bytes\"}],\"name\":\"setDONPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"thresholdPublicKey\",\"type\":\"bytes\"}],\"name\":\"setThresholdPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"availableBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"initiatedRequests\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint64\",\"name\":\"completedRequests\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"}],\"internalType\":\"structFunctionsResponse.RequestMeta\",\"name\":\"request\",\"type\":\"tuple\"}],\"name\":\"startRequest\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"updateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
- Bin: "0x60c06040523480156200001157600080fd5b506040516200529938038062005299833981016040819052620000349162000474565b8282828260013380600081620000915760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c457620000c48162000140565b50505015156080526001600160a01b038116620000f457604051632530e88560e11b815260040160405180910390fd5b6001600160a01b0390811660a052600b80549183166c01000000000000000000000000026001600160601b039092169190911790556200013482620001eb565b50505050505062000633565b336001600160a01b038216036200019a5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000088565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001f562000349565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff16600160f01b026001600160f01b0364ffffffffff909216600160c81b0264ffffffffff60c81b196001600160481b03909416600160801b0293909316600160801b600160f01b031963ffffffff9586166c010000000000000000000000000263ffffffff60601b19978716680100000000000000000297909716600160401b600160801b0319998716640100000000026001600160401b0319909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e0830151610100840151909216600160e01b026001600160e01b0390921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862906200033e9083906200057d565b60405180910390a150565b6200035362000355565b565b6000546001600160a01b03163314620003535760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000088565b80516001600160a01b0381168114620003c957600080fd5b919050565b60405161012081016001600160401b03811182821017156200040057634e487b7160e01b600052604160045260246000fd5b60405290565b805163ffffffff81168114620003c957600080fd5b80516001600160481b0381168114620003c957600080fd5b805164ffffffffff81168114620003c957600080fd5b805161ffff81168114620003c957600080fd5b80516001600160e01b0381168114620003c957600080fd5b60008060008385036101608112156200048c57600080fd5b6200049785620003b1565b935061012080601f1983011215620004ae57600080fd5b620004b8620003ce565b9150620004c86020870162000406565b8252620004d86040870162000406565b6020830152620004eb6060870162000406565b6040830152620004fe6080870162000406565b60608301526200051160a087016200041b565b60808301526200052460c0870162000433565b60a08301526200053760e0870162000449565b60c08301526101006200054c8188016200045c565b60e08401526200055e82880162000406565b90830152509150620005746101408501620003b1565b90509250925092565b815163ffffffff908116825260208084015182169083015260408084015182169083015260608084015191821690830152610120820190506080830151620005d060808401826001600160481b03169052565b5060a0830151620005ea60a084018264ffffffffff169052565b5060c08301516200060160c084018261ffff169052565b5060e08301516200061d60e08401826001600160e01b03169052565b506101009283015163ffffffff16919092015290565b60805160a051614c166200068360003960008181610845015281816109d301528181610ca601528181610f3a0152818161104501528181611830015261332c0152600061126e0152614c166000f3fe608060405234801561001057600080fd5b506004361061018d5760003560e01c806381ff7048116100e3578063c3f909d41161008c578063e3d0e71211610066578063e3d0e71214610560578063e4ddcea614610573578063f2fde38b1461058957600080fd5b8063c3f909d4146103b0578063d227d24514610528578063d328a91e1461055857600080fd5b8063a631571e116100bd578063a631571e1461035d578063afcb95d71461037d578063b1dc65a41461039d57600080fd5b806381ff7048146102b557806385b214cf146103225780638da5cb5b1461033557600080fd5b806366316d8d116101455780637f15e1661161011f5780637f15e16614610285578063814118341461029857806381f1b938146102ad57600080fd5b806366316d8d1461026257806379ba5097146102755780637d4807871461027d57600080fd5b8063181f5a7711610176578063181f5a77146101ba5780632a905ccc1461020c57806359b5b7ac1461022e57600080fd5b8063083a5466146101925780631112dadc146101a7575b600080fd5b6101a56101a036600461361f565b61059c565b005b6101a56101b53660046137c8565b6105f1565b6101f66040518060400160405280601c81526020017f46756e6374696f6e7320436f6f7264696e61746f722076312e312e300000000081525081565b60405161020391906138e2565b60405180910390f35b610214610841565b60405168ffffffffffffffffff9091168152602001610203565b61021461023c36600461398a565b50600854700100000000000000000000000000000000900468ffffffffffffffffff1690565b6101a5610270366004613a19565b6108d7565b6101a5610a90565b6101a5610b92565b6101a561029336600461361f565b610d92565b6102a0610de2565b6040516102039190613aa3565b6101f6610e51565b6102ff60015460025463ffffffff74010000000000000000000000000000000000000000830481169378010000000000000000000000000000000000000000000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610203565b6101a5610330366004613ab6565b610f22565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610203565b61037061036b366004613acf565b610fd4565b6040516102039190613c24565b604080516001815260006020820181905291810191909152606001610203565b6101a56103ab366004613c78565b611175565b61051b6040805161012081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081019190915250604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c01000000000000000000000000810483166060830152700100000000000000000000000000000000810468ffffffffffffffffff166080830152790100000000000000000000000000000000000000000000000000810464ffffffffff1660a08301527e01000000000000000000000000000000000000000000000000000000000000900461ffff1660c08201526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08301527c0100000000000000000000000000000000000000000000000000000000900490911661010082015290565b6040516102039190613d2f565b61053b610536366004613e1f565b61182c565b6040516bffffffffffffffffffffffff9091168152602001610203565b6101f661198c565b6101a561056e366004613f38565b6119e3565b61057b61240f565b604051908152602001610203565b6101a5610597366004614005565b612668565b6105a461267c565b60008190036105df576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d6105ec8284836140bb565b505050565b6105f96126ff565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff167e01000000000000000000000000000000000000000000000000000000000000027dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff64ffffffffff909216790100000000000000000000000000000000000000000000000000027fffff0000000000ffffffffffffffffffffffffffffffffffffffffffffffffff68ffffffffffffffffff90941670010000000000000000000000000000000002939093167fffff0000000000000000000000000000ffffffffffffffffffffffffffffffff63ffffffff9586166c01000000000000000000000000027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff9787166801000000000000000002979097167fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff998716640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e08301516101008401519092167c0100000000000000000000000000000000000000000000000000000000027bffffffffffffffffffffffffffffffffffffffffffffffffffffffff90921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f518486290610836908390613d2f565b60405180910390a150565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16632a905ccc6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156108ae573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d291906141e1565b905090565b6108df612707565b806bffffffffffffffffffffffff166000036109195750336000908152600a60205260409020546bffffffffffffffffffffffff16610973565b336000908152600a60205260409020546bffffffffffffffffffffffff80831691161015610973576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600a6020526040812080548392906109a09084906bffffffffffffffffffffffff1661422d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055506109f57f000000000000000000000000000000000000000000000000000000000000000090565b6040517f66316d8d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301526bffffffffffffffffffffffff8416602483015291909116906366316d8d90604401600060405180830381600087803b158015610a7457600080fd5b505af1158015610a88573d6000803e3d6000fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610b16576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610b9a6126ff565b610ba2612707565b6000610bac610de2565b905060005b8151811015610d8e576000600a6000848481518110610bd257610bd2614252565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252810191909152604001600020546bffffffffffffffffffffffff1690508015610d7d576000600a6000858581518110610c3157610c31614252565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550610cc87f000000000000000000000000000000000000000000000000000000000000000090565b73ffffffffffffffffffffffffffffffffffffffff166366316d8d848481518110610cf557610cf5614252565b6020026020010151836040518363ffffffff1660e01b8152600401610d4a92919073ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b158015610d6457600080fd5b505af1158015610d78573d6000803e3d6000fd5b505050505b50610d8781614281565b9050610bb1565b5050565b610d9a61267c565b6000819003610dd5576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c6105ec8284836140bb565b60606006805480602002602001604051908101604052809291908181526020018280548015610e4757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610e1c575b5050505050905090565b6060600d8054610e6090614022565b9050600003610e9b576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d8054610ea890614022565b80601f0160208091040260200160405190810160405280929190818152602001828054610ed490614022565b8015610e475780601f10610ef657610100808354040283529160200191610e47565b820191906000526020600020905b815481529060010190602001808311610f0457509395945050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610f91576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526007602052604080822091909155517f8a4b97add3359bd6bcf5e82874363670eb5ad0f7615abddbd0ed0a3a98f0f416906108369083815260200190565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e08101829052610100810182905261012081018290526101408101919091523373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461109c576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6110ad6110a8836142b9565b6128b2565b90506110bf6060830160408401614005565b815173ffffffffffffffffffffffffffffffffffffffff91909116907fbf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff3261110d60c0870160a088016143a6565b61111f61016088016101408901614005565b61112988806143c3565b61113b6101208b016101008c01614428565b60208b01356111516101008d0160e08e01614443565b8b60405161116799989796959493929190614460565b60405180910390a35b919050565b60005a604080518b3580825262ffffff6020808f0135600881901c929092169084015293945092917fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16040805160608101825260025480825260035460ff8082166020850152610100909104169282019290925290831461125c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f636f6e666967446967657374206d69736d6174636800000000000000000000006044820152606401610b0d565b61126a8b8b8b8b8b8b612d50565b60007f0000000000000000000000000000000000000000000000000000000000000000156112c7576002826020015183604001516112a89190614508565b6112b29190614550565b6112bd906001614508565b60ff1690506112dd565b60208201516112d7906001614508565b60ff1690505b888114611346576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e6174757265730000000000006044820152606401610b0d565b8887146113af576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f7369676e617475726573206f7574206f6620726567697374726174696f6e00006044820152606401610b0d565b3360009081526004602090815260408083208151808301909252805460ff808216845292939192918401916101009091041660028111156113f2576113f2614572565b600281111561140357611403614572565b905250905060028160200151600281111561142057611420614572565b14801561146757506006816000015160ff168154811061144257611442614252565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff1633145b6114cd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d697474657200000000000000006044820152606401610b0d565b50505050506114da6135b7565b6000808a8a6040516114ed9291906145a1565b604051908190038120611504918e906020016145b1565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b8981101561180e57600060018489846020811061156d5761156d614252565b61157a91901a601b614508565b8e8e8681811061158c5761158c614252565b905060200201358d8d878181106115a5576115a5614252565b90506020020135604051600081526020016040526040516115e2949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611604573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526004602090815290849020838501909452835460ff8082168552929650929450840191610100900416600281111561168457611684614572565b600281111561169557611695614572565b90525092506001836020015160028111156116b2576116b2614572565b14611719576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e00006044820152606401610b0d565b8251600090879060ff16601f811061173357611733614252565b602002015173ffffffffffffffffffffffffffffffffffffffff16146117b5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e61747572650000000000000000000000006044820152606401610b0d565b8086846000015160ff16601f81106117cf576117cf614252565b73ffffffffffffffffffffffffffffffffffffffff90921660209290920201526117fa600186614508565b9450508061180790614281565b905061154e565b50505061181f833383858e8e612e07565b5050505050505050505050565b60007f00000000000000000000000000000000000000000000000000000000000000006040517f10fc49c100000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8816600482015263ffffffff8516602482015273ffffffffffffffffffffffffffffffffffffffff91909116906310fc49c19060440160006040518083038186803b1580156118cc57600080fd5b505afa1580156118e0573d6000803e3d6000fd5b5050505066038d7ea4c68000821115611925576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061192f610841565b9050600061197287878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061023c92505050565b905061198085858385612fd5565b98975050505050505050565b6060600c805461199b90614022565b90506000036119d6576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c8054610ea890614022565b855185518560ff16601f831115611a56576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e657273000000000000000000000000000000006044820152606401610b0d565b80600003611ac0576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f73697469766500000000000000000000000000006044820152606401610b0d565b818314611b4e576040517f89a61989000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e000000000000000000000000000000000000000000000000000000006064820152608401610b0d565b611b598160036145c5565b8311611bc1576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f206869676800000000000000006044820152606401610b0d565b611bc961267c565b6040805160c0810182528a8152602081018a905260ff89169181018290526060810188905267ffffffffffffffff8716608082015260a0810186905290611c10908861311d565b60055415611dc557600554600090611c2a906001906145dc565b9050600060058281548110611c4157611c41614252565b60009182526020822001546006805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110611c7b57611c7b614252565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526004909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600580549192509080611cfb57611cfb6145ef565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556006805480611d6457611d646145ef565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611c10915050565b60005b81515181101561222c5760006004600084600001518481518110611dee57611dee614252565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115611e3857611e38614572565b14611e9f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e657220616464726573730000000000000000006044820152606401610b0d565b6040805180820190915260ff82168152600160208201528251805160049160009185908110611ed057611ed0614252565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115611f7157611f71614572565b021790555060009150611f819050565b6004600084602001518481518110611f9b57611f9b614252565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115611fe557611fe5614572565b1461204c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d69747465722061646472657373000000006044820152606401610b0d565b6040805180820190915260ff82168152602081016002815250600460008460200151848151811061207f5761207f614252565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000161761010083600281111561212057612120614572565b02179055505082518051600592508390811061213e5761213e614252565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90931692909217909155820151805160069190839081106121ba576121ba614252565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790558061222481614281565b915050611dc8565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600180547fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff8116780100000000000000000000000000000000000000000000000063ffffffff43811682029290921780855592048116929182916014916122e49184917401000000000000000000000000000000000000000090041661461e565b92506101000a81548163ffffffff021916908363ffffffff1602179055506123434630600160149054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a00151613136565b600281905582518051600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff1661010060ff9093169290920291909117905560015460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05986123fa988b9891977401000000000000000000000000000000000000000090920463ffffffff1696909591949193919261463b565b60405180910390a15050505050505050505050565b604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116838501526c0100000000000000000000000080830482166060850152700100000000000000000000000000000000830468ffffffffffffffffff166080850152790100000000000000000000000000000000000000000000000000830464ffffffffff1660a0808601919091527e0100000000000000000000000000000000000000000000000000000000000090930461ffff1660c08501526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08601527c01000000000000000000000000000000000000000000000000000000009004909116610100840152600b5484517ffeaf968c00000000000000000000000000000000000000000000000000000000815294516000958694859490930473ffffffffffffffffffffffffffffffffffffffff169263feaf968c926004808401938290030181865afa15801561259d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906125c191906146eb565b5093505092505080426125d491906145dc565b836020015163ffffffff161080156125f657506000836020015163ffffffff16115b1561262457505060e001517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff16919050565b60008213612661576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101839052602401610b0d565b5092915050565b61267061267c565b612679816131e1565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146126fd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610b0d565b565b6126fd61267c565b600b546bffffffffffffffffffffffff1660000361272157565b600061272b610de2565b90508051600003612768576040517f30274b3a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8051600b54600091612787916bffffffffffffffffffffffff1661473b565b905060005b82518110156128535781600a60008584815181106127ac576127ac614252565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282829054906101000a90046bffffffffffffffffffffffff166128149190614766565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508061284c90614281565b905061278c565b508151612860908261478b565b600b80546000906128809084906bffffffffffffffffffffffff1661422d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055505050565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081018290526101208101829052610140810191909152604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c0100000000000000000000000081048316606083015268ffffffffffffffffff700100000000000000000000000000000000820416608083015264ffffffffff79010000000000000000000000000000000000000000000000000082041660a083015261ffff7e01000000000000000000000000000000000000000000000000000000000000909104811660c083018190526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08501527c0100000000000000000000000000000000000000000000000000000000900490931661010080840191909152850151919291161115612a6d576040517fdada758700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600854600090700100000000000000000000000000000000900468ffffffffffffffffff1690506000612aaa8560e001513a848860800151612fd5565b9050806bffffffffffffffffffffffff1685606001516bffffffffffffffffffffffff161015612b06576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083610100015163ffffffff1642612b1f91906147b3565b905060003087604001518860a001518960c001516001612b3f91906147c6565b8a5180516020918201206101008d015160e08e0151604051612bf398979695948c918c9132910173ffffffffffffffffffffffffffffffffffffffff9a8b168152988a1660208a015267ffffffffffffffff97881660408a0152959096166060880152608087019390935261ffff9190911660a086015263ffffffff90811660c08601526bffffffffffffffffffffffff9190911660e0850152919091166101008301529091166101208201526101400190565b6040516020818303038152906040528051906020012090506040518061016001604052808281526020013073ffffffffffffffffffffffffffffffffffffffff168152602001846bffffffffffffffffffffffff168152602001886040015173ffffffffffffffffffffffffffffffffffffffff1681526020018860a0015167ffffffffffffffff1681526020018860e0015163ffffffff168152602001886080015168ffffffffffffffffff1681526020018568ffffffffffffffffff168152602001866040015163ffffffff1664ffffffffff168152602001866060015163ffffffff1664ffffffffff1681526020018363ffffffff16815250955085604051602001612d029190613c24565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152815160209283012060009384526007909252909120555092949350505050565b6000612d5d8260206145c5565b612d688560206145c5565b612d74886101446147b3565b612d7e91906147b3565b612d8891906147b3565b612d939060006147b3565b9050368114612dfe576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d6174636800000000000000006044820152606401610b0d565b50505050505050565b606080808080612e19868801886148c2565b8451949950929750909550935091501580612e3657508351855114155b80612e4357508251855114155b80612e5057508151855114155b80612e5d57508051855114155b15612e94576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b8551811015612fc7576000612f2c878381518110612eb757612eb7614252565b6020026020010151878481518110612ed157612ed1614252565b6020026020010151878581518110612eeb57612eeb614252565b6020026020010151878681518110612f0557612f05614252565b6020026020010151878781518110612f1f57612f1f614252565b60200260200101516132d6565b90506000816006811115612f4257612f42614572565b1480612f5f57506001816006811115612f5d57612f5d614572565b145b15612fb657868281518110612f7657612f76614252565b60209081029190910181015160405133815290917fc708e0440951fd63499c0f7a73819b469ee5dd3ecc356c0ab4eb7f18389009d9910160405180910390a25b50612fc081614281565b9050612e97565b505050505050505050505050565b6008546000908190869061300d9063ffffffff6c0100000000000000000000000082048116916801000000000000000090041661461e565b613017919061461e565b60085463ffffffff919091169150790100000000000000000000000000000000000000000000000000900464ffffffffff1685101561307a57600854790100000000000000000000000000000000000000000000000000900464ffffffffff1694505b600854600090612710906130949063ffffffff16886145c5565b61309e9190614994565b6130a890876147b3565b905060006130b5826134e6565b905060006130d1846bffffffffffffffffffffffff84166145c5565b905060006130ed68ffffffffffffffffff808916908a16614766565b905061310f61310a6bffffffffffffffffffffffff8316846147b3565b613515565b9a9950505050505050505050565b6000613127610de2565b511115610d8e57610d8e612707565b6000808a8a8a8a8a8a8a8a8a60405160200161315a999897969594939291906149a8565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff821603613260576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610b0d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080838060200190518101906132ed9190614a74565b905060006132fa3a6134e6565b905060008261012001518361010001516133149190614b3c565b6133259064ffffffffff168361478b565b90506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663330605298b8b878960e0015168ffffffffffffffffff16886133849190614766565b338b6040518763ffffffff1660e01b81526004016133a796959493929190614b5a565b60408051808303816000875af11580156133c5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906133e99190614bd6565b9092509050600082600681111561340257613402614572565b148061341f5750600182600681111561341d5761341d614572565b145b156134d85760008b81526007602052604081205561343d8184614766565b336000908152600a6020526040812080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff93841617905560e0870151600b805468ffffffffffffffffff909216939092916134a991859116614766565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055505b509998505050505050505050565b600061350f6134f361240f565b61350584670de0b6b3a76400006145c5565b61310a9190614994565b92915050565b60006bffffffffffffffffffffffff8211156135b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f36206269747300000000000000000000000000000000000000000000000000006064820152608401610b0d565b5090565b604051806103e00160405280601f906020820280368337509192915050565b60008083601f8401126135e857600080fd5b50813567ffffffffffffffff81111561360057600080fd5b60208301915083602082850101111561361857600080fd5b9250929050565b6000806020838503121561363257600080fd5b823567ffffffffffffffff81111561364957600080fd5b613655858286016135d6565b90969095509350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610120810167ffffffffffffffff811182821017156136b4576136b4613661565b60405290565b604051610160810167ffffffffffffffff811182821017156136b4576136b4613661565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561372557613725613661565b604052919050565b63ffffffff8116811461267957600080fd5b80356111708161372d565b68ffffffffffffffffff8116811461267957600080fd5b80356111708161374a565b64ffffffffff8116811461267957600080fd5b80356111708161376c565b803561ffff8116811461117057600080fd5b80357bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8116811461117057600080fd5b600061012082840312156137db57600080fd5b6137e3613690565b6137ec8361373f565b81526137fa6020840161373f565b602082015261380b6040840161373f565b604082015261381c6060840161373f565b606082015261382d60808401613761565b608082015261383e60a0840161377f565b60a082015261384f60c0840161378a565b60c082015261386060e0840161379c565b60e082015261010061387381850161373f565b908201529392505050565b6000815180845260005b818110156138a457602081850181015186830182015201613888565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006138f5602083018461387e565b9392505050565b600082601f83011261390d57600080fd5b813567ffffffffffffffff81111561392757613927613661565b61395860207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016136de565b81815284602083860101111561396d57600080fd5b816020850160208301376000918101602001919091529392505050565b60006020828403121561399c57600080fd5b813567ffffffffffffffff8111156139b357600080fd5b6139bf848285016138fc565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461267957600080fd5b8035611170816139c7565b6bffffffffffffffffffffffff8116811461267957600080fd5b8035611170816139f4565b60008060408385031215613a2c57600080fd5b8235613a37816139c7565b91506020830135613a47816139f4565b809150509250929050565b600081518084526020808501945080840160005b83811015613a9857815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613a66565b509495945050505050565b6020815260006138f56020830184613a52565b600060208284031215613ac857600080fd5b5035919050565b600060208284031215613ae157600080fd5b813567ffffffffffffffff811115613af857600080fd5b820161016081850312156138f557600080fd5b805182526020810151613b36602084018273ffffffffffffffffffffffffffffffffffffffff169052565b506040810151613b5660408401826bffffffffffffffffffffffff169052565b506060810151613b7e606084018273ffffffffffffffffffffffffffffffffffffffff169052565b506080810151613b9a608084018267ffffffffffffffff169052565b5060a0810151613bb260a084018263ffffffff169052565b5060c0810151613bcf60c084018268ffffffffffffffffff169052565b5060e0810151613bec60e084018268ffffffffffffffffff169052565b506101008181015164ffffffffff9081169184019190915261012080830151909116908301526101409081015163ffffffff16910152565b610160810161350f8284613b0b565b60008083601f840112613c4557600080fd5b50813567ffffffffffffffff811115613c5d57600080fd5b6020830191508360208260051b850101111561361857600080fd5b60008060008060008060008060e0898b031215613c9457600080fd5b606089018a811115613ca557600080fd5b8998503567ffffffffffffffff80821115613cbf57600080fd5b613ccb8c838d016135d6565b909950975060808b0135915080821115613ce457600080fd5b613cf08c838d01613c33565b909750955060a08b0135915080821115613d0957600080fd5b50613d168b828c01613c33565b999c989b50969995989497949560c00135949350505050565b815163ffffffff908116825260208084015182169083015260408084015182169083015260608084015191821690830152610120820190506080830151613d83608084018268ffffffffffffffffff169052565b5060a0830151613d9c60a084018264ffffffffff169052565b5060c0830151613db260c084018261ffff169052565b5060e0830151613de260e08401827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff169052565b506101008381015163ffffffff8116848301525b505092915050565b67ffffffffffffffff8116811461267957600080fd5b803561117081613dfe565b600080600080600060808688031215613e3757600080fd5b8535613e4281613dfe565b9450602086013567ffffffffffffffff811115613e5e57600080fd5b613e6a888289016135d6565b9095509350506040860135613e7e8161372d565b949793965091946060013592915050565b600067ffffffffffffffff821115613ea957613ea9613661565b5060051b60200190565b600082601f830112613ec457600080fd5b81356020613ed9613ed483613e8f565b6136de565b82815260059290921b84018101918181019086841115613ef857600080fd5b8286015b84811015613f1c578035613f0f816139c7565b8352918301918301613efc565b509695505050505050565b803560ff8116811461117057600080fd5b60008060008060008060c08789031215613f5157600080fd5b863567ffffffffffffffff80821115613f6957600080fd5b613f758a838b01613eb3565b97506020890135915080821115613f8b57600080fd5b613f978a838b01613eb3565b9650613fa560408a01613f27565b95506060890135915080821115613fbb57600080fd5b613fc78a838b016138fc565b9450613fd560808a01613e14565b935060a0890135915080821115613feb57600080fd5b50613ff889828a016138fc565b9150509295509295509295565b60006020828403121561401757600080fd5b81356138f5816139c7565b600181811c9082168061403657607f821691505b60208210810361406f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f8211156105ec57600081815260208120601f850160051c8101602086101561409c5750805b601f850160051c820191505b81811015610a88578281556001016140a8565b67ffffffffffffffff8311156140d3576140d3613661565b6140e7836140e18354614022565b83614075565b6000601f84116001811461413957600085156141035750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b1783556141cf565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b828110156141885786850135825560209485019460019092019101614168565b50868210156141c3577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b80516111708161374a565b6000602082840312156141f357600080fd5b81516138f58161374a565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6bffffffffffffffffffffffff828116828216039080821115612661576126616141fe565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036142b2576142b26141fe565b5060010190565b600061016082360312156142cc57600080fd5b6142d46136ba565b823567ffffffffffffffff8111156142eb57600080fd5b6142f7368286016138fc565b82525060208301356020820152614310604084016139e9565b604082015261432160608401613a0e565b606082015261433260808401613761565b608082015261434360a08401613e14565b60a082015261435460c08401613e14565b60c082015261436560e0840161373f565b60e082015261010061437881850161378a565b9082015261012061438a848201613e14565b9082015261014061439c8482016139e9565b9082015292915050565b6000602082840312156143b857600080fd5b81356138f581613dfe565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126143f857600080fd5b83018035915067ffffffffffffffff82111561441357600080fd5b60200191503681900382131561361857600080fd5b60006020828403121561443a57600080fd5b6138f58261378a565b60006020828403121561445557600080fd5b81356138f58161372d565b73ffffffffffffffffffffffffffffffffffffffff8a8116825267ffffffffffffffff8a166020830152881660408201526102406060820181905281018690526000610260878982850137600083890182015261ffff8716608084015260a0830186905263ffffffff851660c0840152601f88017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016830101905061310f60e0830184613b0b565b60ff818116838216019081111561350f5761350f6141fe565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600060ff83168061456357614563614521565b8060ff84160491505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8183823760009101908152919050565b828152606082602083013760800192915050565b808202811582820484141761350f5761350f6141fe565b8181038181111561350f5761350f6141fe565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b63ffffffff818116838216019080821115612661576126616141fe565b600061012063ffffffff808d1684528b6020850152808b1660408501525080606084015261466b8184018a613a52565b9050828103608084015261467f8189613a52565b905060ff871660a084015282810360c084015261469c818761387e565b905067ffffffffffffffff851660e08401528281036101008401526146c1818561387e565b9c9b505050505050505050505050565b805169ffffffffffffffffffff8116811461117057600080fd5b600080600080600060a0868803121561470357600080fd5b61470c866146d1565b945060208601519350604086015192506060860151915061472f608087016146d1565b90509295509295909350565b60006bffffffffffffffffffffffff8084168061475a5761475a614521565b92169190910492915050565b6bffffffffffffffffffffffff818116838216019080821115612661576126616141fe565b6bffffffffffffffffffffffff818116838216028082169190828114613df657613df66141fe565b8082018082111561350f5761350f6141fe565b67ffffffffffffffff818116838216019080821115612661576126616141fe565b600082601f8301126147f857600080fd5b81356020614808613ed483613e8f565b82815260059290921b8401810191818101908684111561482757600080fd5b8286015b84811015613f1c578035835291830191830161482b565b600082601f83011261485357600080fd5b81356020614863613ed483613e8f565b82815260059290921b8401810191818101908684111561488257600080fd5b8286015b84811015613f1c57803567ffffffffffffffff8111156148a65760008081fd5b6148b48986838b01016138fc565b845250918301918301614886565b600080600080600060a086880312156148da57600080fd5b853567ffffffffffffffff808211156148f257600080fd5b6148fe89838a016147e7565b9650602088013591508082111561491457600080fd5b61492089838a01614842565b9550604088013591508082111561493657600080fd5b61494289838a01614842565b9450606088013591508082111561495857600080fd5b61496489838a01614842565b9350608088013591508082111561497a57600080fd5b5061498788828901614842565b9150509295509295909350565b6000826149a3576149a3614521565b500490565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b1660408501528160608501526149ef8285018b613a52565b91508382036080850152614a03828a613a52565b915060ff881660a085015283820360c0850152614a20828861387e565b90861660e085015283810361010085015290506146c1818561387e565b8051611170816139c7565b8051611170816139f4565b805161117081613dfe565b80516111708161372d565b80516111708161376c565b60006101608284031215614a8757600080fd5b614a8f6136ba565b82518152614a9f60208401614a3d565b6020820152614ab060408401614a48565b6040820152614ac160608401614a3d565b6060820152614ad260808401614a53565b6080820152614ae360a08401614a5e565b60a0820152614af460c084016141d6565b60c0820152614b0560e084016141d6565b60e0820152610100614b18818501614a69565b90820152610120614b2a848201614a69565b90820152610140613873848201614a5e565b64ffffffffff818116838216019080821115612661576126616141fe565b6000610200808352614b6e8184018a61387e565b90508281036020840152614b82818961387e565b6bffffffffffffffffffffffff88811660408601528716606085015273ffffffffffffffffffffffffffffffffffffffff861660808501529150614bcb905060a0830184613b0b565b979650505050505050565b60008060408385031215614be957600080fd5b825160078110614bf857600080fd5b6020840151909250613a47816139f456fea164736f6c6343000813000a",
+ ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"router\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address\",\"name\":\"linkToNativeFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyPublicKey\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InconsistentReportData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"InvalidConfig\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoTransmittersSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouterOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RouterMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedPublicKeyChange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnsupportedRequestDataVersion\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"CommitmentDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"callbackGasLimit\",\"type\":\"uint64\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"juelsPerGas\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1FeeShareWei\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"callbackCostJuels\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCostJuels\",\"type\":\"uint96\"}],\"name\":\"RequestBilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"deleteCommitment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAdminFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"getDONFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getThresholdPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"oracleWithdrawAll\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"donPublicKey\",\"type\":\"bytes\"}],\"name\":\"setDONPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"thresholdPublicKey\",\"type\":\"bytes\"}],\"name\":\"setThresholdPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"availableBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"initiatedRequests\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint64\",\"name\":\"completedRequests\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"}],\"internalType\":\"structFunctionsResponse.RequestMeta\",\"name\":\"request\",\"type\":\"tuple\"}],\"name\":\"startRequest\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBilling.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"updateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
+ Bin: "0x60a06040523480156200001157600080fd5b50604051620056d0380380620056d083398101604081905262000034916200046d565b8282828233806000816200008f5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c257620000c28162000139565b5050506001600160a01b038116620000ed57604051632530e88560e11b815260040160405180910390fd5b6001600160a01b03908116608052600b80549183166c01000000000000000000000000026001600160601b039092169190911790556200012d82620001e4565b5050505050506200062c565b336001600160a01b03821603620001935760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000086565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001ee62000342565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff16600160f01b026001600160f01b0364ffffffffff909216600160c81b0264ffffffffff60c81b196001600160481b03909416600160801b0293909316600160801b600160f01b031963ffffffff9586166c010000000000000000000000000263ffffffff60601b19978716680100000000000000000297909716600160401b600160801b0319998716640100000000026001600160401b0319909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e0830151610100840151909216600160e01b026001600160e01b0390921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862906200033790839062000576565b60405180910390a150565b6200034c6200034e565b565b6000546001600160a01b031633146200034c5760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000086565b80516001600160a01b0381168114620003c257600080fd5b919050565b60405161012081016001600160401b0381118282101715620003f957634e487b7160e01b600052604160045260246000fd5b60405290565b805163ffffffff81168114620003c257600080fd5b80516001600160481b0381168114620003c257600080fd5b805164ffffffffff81168114620003c257600080fd5b805161ffff81168114620003c257600080fd5b80516001600160e01b0381168114620003c257600080fd5b60008060008385036101608112156200048557600080fd5b6200049085620003aa565b935061012080601f1983011215620004a757600080fd5b620004b1620003c7565b9150620004c160208701620003ff565b8252620004d160408701620003ff565b6020830152620004e460608701620003ff565b6040830152620004f760808701620003ff565b60608301526200050a60a0870162000414565b60808301526200051d60c087016200042c565b60a08301526200053060e0870162000442565b60c08301526101006200054581880162000455565b60e084015262000557828801620003ff565b908301525091506200056d6101408501620003aa565b90509250925092565b815163ffffffff908116825260208084015182169083015260408084015182169083015260608084015191821690830152610120820190506080830151620005c960808401826001600160481b03169052565b5060a0830151620005e360a084018264ffffffffff169052565b5060c0830151620005fa60c084018261ffff169052565b5060e08301516200061660e08401826001600160e01b03169052565b506101009283015163ffffffff16919092015290565b60805161505e6200067260003960008181610845015281816109d301528181610ca601528181610f3a01528181611045015281816117890152613490015261505e6000f3fe608060405234801561001057600080fd5b506004361061018d5760003560e01c806381ff7048116100e3578063c3f909d41161008c578063e3d0e71211610066578063e3d0e71214610560578063e4ddcea614610573578063f2fde38b1461058957600080fd5b8063c3f909d4146103b0578063d227d24514610528578063d328a91e1461055857600080fd5b8063a631571e116100bd578063a631571e1461035d578063afcb95d71461037d578063b1dc65a41461039d57600080fd5b806381ff7048146102b557806385b214cf146103225780638da5cb5b1461033557600080fd5b806366316d8d116101455780637f15e1661161011f5780637f15e16614610285578063814118341461029857806381f1b938146102ad57600080fd5b806366316d8d1461026257806379ba5097146102755780637d4807871461027d57600080fd5b8063181f5a7711610176578063181f5a77146101ba5780632a905ccc1461020c57806359b5b7ac1461022e57600080fd5b8063083a5466146101925780631112dadc146101a7575b600080fd5b6101a56101a03660046139d4565b61059c565b005b6101a56101b5366004613b7d565b6105f1565b6101f66040518060400160405280601c81526020017f46756e6374696f6e7320436f6f7264696e61746f722076312e312e300000000081525081565b6040516102039190613ca1565b60405180910390f35b610214610841565b60405168ffffffffffffffffff9091168152602001610203565b61021461023c366004613d42565b50600854700100000000000000000000000000000000900468ffffffffffffffffff1690565b6101a5610270366004613dd1565b6108d7565b6101a5610a90565b6101a5610b92565b6101a56102933660046139d4565b610d92565b6102a0610de2565b6040516102039190613e5b565b6101f6610e51565b6102ff60015460025463ffffffff74010000000000000000000000000000000000000000830481169378010000000000000000000000000000000000000000000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610203565b6101a5610330366004613e6e565b610f22565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610203565b61037061036b366004613e87565b610fd4565b6040516102039190613fdc565b604080516001815260006020820181905291810191909152606001610203565b6101a56103ab366004614030565b611175565b61051b6040805161012081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081019190915250604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c01000000000000000000000000810483166060830152700100000000000000000000000000000000810468ffffffffffffffffff166080830152790100000000000000000000000000000000000000000000000000810464ffffffffff1660a08301527e01000000000000000000000000000000000000000000000000000000000000900461ffff1660c08201526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08301527c0100000000000000000000000000000000000000000000000000000000900490911661010082015290565b60405161020391906140e7565b61053b6105363660046141d7565b611785565b6040516bffffffffffffffffffffffff9091168152602001610203565b6101f66118e5565b6101a561056e3660046142f0565b61193c565b61057b6124b8565b604051908152602001610203565b6101a56105973660046143bd565b612711565b6105a4612725565b60008190036105df576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d6105ec828483614473565b505050565b6105f96127a8565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff167e01000000000000000000000000000000000000000000000000000000000000027dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff64ffffffffff909216790100000000000000000000000000000000000000000000000000027fffff0000000000ffffffffffffffffffffffffffffffffffffffffffffffffff68ffffffffffffffffff90941670010000000000000000000000000000000002939093167fffff0000000000000000000000000000ffffffffffffffffffffffffffffffff63ffffffff9586166c01000000000000000000000000027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff9787166801000000000000000002979097167fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff998716640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e08301516101008401519092167c0100000000000000000000000000000000000000000000000000000000027bffffffffffffffffffffffffffffffffffffffffffffffffffffffff90921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862906108369083906140e7565b60405180910390a150565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16632a905ccc6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156108ae573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d29190614599565b905090565b6108df6127b0565b806bffffffffffffffffffffffff166000036109195750336000908152600a60205260409020546bffffffffffffffffffffffff16610973565b336000908152600a60205260409020546bffffffffffffffffffffffff80831691161015610973576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600a6020526040812080548392906109a09084906bffffffffffffffffffffffff166145e5565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055506109f57f000000000000000000000000000000000000000000000000000000000000000090565b6040517f66316d8d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301526bffffffffffffffffffffffff8416602483015291909116906366316d8d90604401600060405180830381600087803b158015610a7457600080fd5b505af1158015610a88573d6000803e3d6000fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610b16576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610b9a6127a8565b610ba26127b0565b6000610bac610de2565b905060005b8151811015610d8e576000600a6000848481518110610bd257610bd261460a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252810191909152604001600020546bffffffffffffffffffffffff1690508015610d7d576000600a6000858581518110610c3157610c3161460a565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550610cc87f000000000000000000000000000000000000000000000000000000000000000090565b73ffffffffffffffffffffffffffffffffffffffff166366316d8d848481518110610cf557610cf561460a565b6020026020010151836040518363ffffffff1660e01b8152600401610d4a92919073ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b158015610d6457600080fd5b505af1158015610d78573d6000803e3d6000fd5b505050505b50610d8781614639565b9050610bb1565b5050565b610d9a612725565b6000819003610dd5576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c6105ec828483614473565b60606006805480602002602001604051908101604052809291908181526020018280548015610e4757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610e1c575b5050505050905090565b6060600d8054610e60906143da565b9050600003610e9b576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d8054610ea8906143da565b80601f0160208091040260200160405190810160405280929190818152602001828054610ed4906143da565b8015610e475780601f10610ef657610100808354040283529160200191610e47565b820191906000526020600020905b815481529060010190602001808311610f0457509395945050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610f91576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526007602052604080822091909155517f8a4b97add3359bd6bcf5e82874363670eb5ad0f7615abddbd0ed0a3a98f0f416906108369083815260200190565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e08101829052610100810182905261012081018290526101408101919091523373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461109c576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6110ad6110a883614671565b61295c565b90506110bf60608301604084016143bd565b815173ffffffffffffffffffffffffffffffffffffffff91909116907fbf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff3261110d60c0870160a0880161475e565b61111f610160880161014089016143bd565b611129888061477b565b61113b6101208b016101008c016147e0565b60208b01356111516101008d0160e08e016147fb565b8b60405161116799989796959493929190614818565b60405180910390a35b919050565b60005a604080518b3580825262ffffff6020808f0135600881901c929092169084015293945092917fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16111d68a8a8a8a8a8a612dfa565b6003546000906002906111f49060ff808216916101009004166148c0565b6111fe9190614908565b6112099060016148c0565b60ff169050878114611277576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e6174757265730000000000006044820152606401610b0d565b878614611306576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152602860248201527f7265706f727420727320616e64207373206d757374206265206f66206571756160448201527f6c206c656e6774680000000000000000000000000000000000000000000000006064820152608401610b0d565b3360009081526004602090815260408083208151808301909252805460ff808216845292939192918401916101009091041660028111156113495761134961492a565b600281111561135a5761135a61492a565b90525090506002816020015160028111156113775761137761492a565b141580156113c057506006816000015160ff168154811061139a5761139a61460a565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff163314155b15611427576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d697474657200000000000000006044820152606401610b0d565b5050505061143361396c565b6000808a8a604051611446929190614959565b60405190819003812061145d918e90602001614969565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b898110156117675760006001848984602081106114c6576114c661460a565b6114d391901a601b6148c0565b8e8e868181106114e5576114e561460a565b905060200201358d8d878181106114fe576114fe61460a565b905060200201356040516000815260200160405260405161153b949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa15801561155d573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526004602090815290849020838501909452835460ff808216855292965092945084019161010090041660028111156115dd576115dd61492a565b60028111156115ee576115ee61492a565b905250925060018360200151600281111561160b5761160b61492a565b14611672576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e00006044820152606401610b0d565b8251600090879060ff16601f811061168c5761168c61460a565b602002015173ffffffffffffffffffffffffffffffffffffffff161461170e576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e61747572650000000000000000000000006044820152606401610b0d565b8086846000015160ff16601f81106117285761172861460a565b73ffffffffffffffffffffffffffffffffffffffff90921660209290920201526117536001866148c0565b9450508061176090614639565b90506114a7565b505050611778833383858e8e612eb1565b5050505050505050505050565b60007f00000000000000000000000000000000000000000000000000000000000000006040517f10fc49c100000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8816600482015263ffffffff8516602482015273ffffffffffffffffffffffffffffffffffffffff91909116906310fc49c19060440160006040518083038186803b15801561182557600080fd5b505afa158015611839573d6000803e3d6000fd5b5050505066038d7ea4c6800082111561187e576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000611888610841565b905060006118cb87878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061023c92505050565b90506118d9858583856130b0565b98975050505050505050565b6060600c80546118f4906143da565b905060000361192f576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c8054610ea8906143da565b855185518560ff16601f8311156119af576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e657273000000000000000000000000000000006044820152606401610b0d565b80600003611a19576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f73697469766500000000000000000000000000006044820152606401610b0d565b818314611aa7576040517f89a61989000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e000000000000000000000000000000000000000000000000000000006064820152608401610b0d565b611ab281600361497d565b8311611b1a576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f206869676800000000000000006044820152606401610b0d565b611b22612725565b6040805160c0810182528a8152602081018a905260ff89169181018290526060810188905267ffffffffffffffff8716608082015260a0810186905290611b69908861321d565b60055415611d1e57600554600090611b8390600190614994565b9050600060058281548110611b9a57611b9a61460a565b60009182526020822001546006805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110611bd457611bd461460a565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526004909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600580549192509080611c5457611c546149a7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556006805480611cbd57611cbd6149a7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611b69915050565b60005b8151518110156122d557815180516000919083908110611d4357611d4361460a565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603611dc8576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f7369676e6572206d757374206e6f7420626520656d70747900000000000000006044820152606401610b0d565b600073ffffffffffffffffffffffffffffffffffffffff1682602001518281518110611df657611df661460a565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603611e7b576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7472616e736d6974746572206d757374206e6f7420626520656d7074790000006044820152606401610b0d565b60006004600084600001518481518110611e9757611e9761460a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115611ee157611ee161492a565b14611f48576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e657220616464726573730000000000000000006044820152606401610b0d565b6040805180820190915260ff82168152600160208201528251805160049160009185908110611f7957611f7961460a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000161761010083600281111561201a5761201a61492a565b02179055506000915061202a9050565b60046000846020015184815181106120445761204461460a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff16600281111561208e5761208e61492a565b146120f5576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d69747465722061646472657373000000006044820152606401610b0d565b6040805180820190915260ff8216815260208101600281525060046000846020015184815181106121285761212861460a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000016176101008360028111156121c9576121c961492a565b0217905550508251805160059250839081106121e7576121e761460a565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90931692909217909155820151805160069190839081106122635761226361460a565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055806122cd81614639565b915050611d21565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600180547fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff8116780100000000000000000000000000000000000000000000000063ffffffff438116820292909217808555920481169291829160149161238d918491740100000000000000000000000000000000000000009004166149d6565b92506101000a81548163ffffffff021916908363ffffffff1602179055506123ec4630600160149054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a00151613236565b600281905582518051600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff1661010060ff9093169290920291909117905560015460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05986124a3988b9891977401000000000000000000000000000000000000000090920463ffffffff169690959194919391926149f3565b60405180910390a15050505050505050505050565b604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116838501526c0100000000000000000000000080830482166060850152700100000000000000000000000000000000830468ffffffffffffffffff166080850152790100000000000000000000000000000000000000000000000000830464ffffffffff1660a0808601919091527e0100000000000000000000000000000000000000000000000000000000000090930461ffff1660c08501526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08601527c01000000000000000000000000000000000000000000000000000000009004909116610100840152600b5484517ffeaf968c00000000000000000000000000000000000000000000000000000000815294516000958694859490930473ffffffffffffffffffffffffffffffffffffffff169263feaf968c926004808401938290030181865afa158015612646573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061266a9190614aa3565b50935050925050804261267d9190614994565b836020015163ffffffff1610801561269f57506000836020015163ffffffff16115b156126cd57505060e001517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff16919050565b6000821361270a576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101839052602401610b0d565b5092915050565b612719612725565b612722816132e1565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146127a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610b0d565b565b6127a6612725565b600b546bffffffffffffffffffffffff166000036127ca57565b60006127d4610de2565b80519091506000819003612814576040517f30274b3a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600b546000906128339083906bffffffffffffffffffffffff16614af3565b905060005b828110156128fe5781600a60008684815181106128575761285761460a565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282829054906101000a90046bffffffffffffffffffffffff166128bf9190614b1e565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550806128f790614639565b9050612838565b506129098282614b43565b600b80546000906129299084906bffffffffffffffffffffffff166145e5565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550505050565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081018290526101208101829052610140810191909152604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c0100000000000000000000000081048316606083015268ffffffffffffffffff700100000000000000000000000000000000820416608083015264ffffffffff79010000000000000000000000000000000000000000000000000082041660a083015261ffff7e01000000000000000000000000000000000000000000000000000000000000909104811660c083018190526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08501527c0100000000000000000000000000000000000000000000000000000000900490931661010080840191909152850151919291161115612b17576040517fdada758700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600854600090700100000000000000000000000000000000900468ffffffffffffffffff1690506000612b548560e001513a8488608001516130b0565b9050806bffffffffffffffffffffffff1685606001516bffffffffffffffffffffffff161015612bb0576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083610100015163ffffffff1642612bc99190614b6b565b905060003087604001518860a001518960c001516001612be99190614b7e565b8a5180516020918201206101008d015160e08e0151604051612c9d98979695948c918c9132910173ffffffffffffffffffffffffffffffffffffffff9a8b168152988a1660208a015267ffffffffffffffff97881660408a0152959096166060880152608087019390935261ffff9190911660a086015263ffffffff90811660c08601526bffffffffffffffffffffffff9190911660e0850152919091166101008301529091166101208201526101400190565b6040516020818303038152906040528051906020012090506040518061016001604052808281526020013073ffffffffffffffffffffffffffffffffffffffff168152602001846bffffffffffffffffffffffff168152602001886040015173ffffffffffffffffffffffffffffffffffffffff1681526020018860a0015167ffffffffffffffff1681526020018860e0015163ffffffff168152602001886080015168ffffffffffffffffff1681526020018568ffffffffffffffffff168152602001866040015163ffffffff1664ffffffffff168152602001866060015163ffffffff1664ffffffffff1681526020018363ffffffff16815250955085604051602001612dac9190613fdc565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152815160209283012060009384526007909252909120555092949350505050565b6000612e0782602061497d565b612e1285602061497d565b612e1e88610144614b6b565b612e289190614b6b565b612e329190614b6b565b612e3d906000614b6b565b9050368114612ea8576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d6174636800000000000000006044820152606401610b0d565b50505050505050565b600080808080612ec386880188614c7a565b84519499509297509095509350915060ff16801580612ee3575084518114155b80612eef575083518114155b80612efb575082518114155b80612f07575081518114155b15612f6e576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4669656c6473206d75737420626520657175616c206c656e67746800000000006044820152606401610b0d565b60005b818110156130a1576000613006888381518110612f9057612f9061460a565b6020026020010151888481518110612faa57612faa61460a565b6020026020010151888581518110612fc457612fc461460a565b6020026020010151888681518110612fde57612fde61460a565b6020026020010151888781518110612ff857612ff861460a565b6020026020010151886133d6565b9050600081600681111561301c5761301c61492a565b1480613039575060018160068111156130375761303761492a565b145b15613090578782815181106130505761305061460a565b60209081029190910181015160405133815290917fc708e0440951fd63499c0f7a73819b469ee5dd3ecc356c0ab4eb7f18389009d9910160405180910390a25b5061309a81614639565b9050612f71565b50505050505050505050505050565b600854600090790100000000000000000000000000000000000000000000000000900464ffffffffff1684101561310b57600854790100000000000000000000000000000000000000000000000000900464ffffffffff1693505b600854600090612710906131259063ffffffff168761497d565b61312f9190614d4c565b6131399086614b6b565b60085490915060009087906131729063ffffffff6c010000000000000000000000008204811691680100000000000000009004166149d6565b61317c91906149d6565b63ffffffff16905060006131c66000368080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506136ea92505050565b905060006131e7826131d8858761497d565b6131e29190614b6b565b61382c565b9050600061320368ffffffffffffffffff808916908a16614b1e565b905061320f8183614b1e565b9a9950505050505050505050565b6000613227610de2565b511115610d8e57610d8e6127b0565b6000808a8a8a8a8a8a8a8a8a60405160200161325a99989796959493929190614d60565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff821603613360576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610b0d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080848060200190518101906133ed9190614e2c565b905060003a8261012001518361010001516134089190614ef4565b64ffffffffff16613419919061497d565b905060008460ff166134616000368080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506136ea92505050565b61346b9190614d4c565b9050600061347c6131e28385614b6b565b905060006134893a61382c565b90506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663330605298e8e868b60e0015168ffffffffffffffffff16896134e89190614b1e565b338d6040518763ffffffff1660e01b815260040161350b96959493929190614f12565b60408051808303816000875af1158015613529573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061354d9190614f8e565b909250905060008260068111156135665761356661492a565b1480613583575060018260068111156135815761358161492a565b145b156136d95760008e8152600760205260408120556135a18185614b1e565b336000908152600a6020526040812080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff93841617905560e0890151600b805468ffffffffffffffffff9092169390929161360d91859116614b1e565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508d7f90815c2e624694e8010bffad2bcefaf96af282ef1bc2ebc0042d1b89a585e0468487848b60c0015168ffffffffffffffffff168c60e0015168ffffffffffffffffff16878b61368c9190614b1e565b6136969190614b1e565b6136a09190614b1e565b604080516bffffffffffffffffffffffff9586168152602081019490945291841683830152909216606082015290519081900360800190a25b509c9b505050505050505050505050565b6000466136f681613860565b1561377257606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613747573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061376b9190614fc1565b9392505050565b61377b81613883565b156138235773420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff166349948e0e8460405180608001604052806048815260200161500a604891396040516020016137db929190614fda565b6040516020818303038152906040526040518263ffffffff1660e01b81526004016138069190613ca1565b602060405180830381865afa158015613747573d6000803e3d6000fd5b50600092915050565b600061385a6138396124b8565b61384b84670de0b6b3a764000061497d565b6138559190614d4c565b6138ca565b92915050565b600061a4b1821480613874575062066eed82145b8061385a57505062066eee1490565b6000600a82148061389557506101a482145b806138a2575062aa37dc82145b806138ae575061210582145b806138bb575062014a3382145b8061385a57505062014a341490565b60006bffffffffffffffffffffffff821115613968576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f36206269747300000000000000000000000000000000000000000000000000006064820152608401610b0d565b5090565b604051806103e00160405280601f906020820280368337509192915050565b60008083601f84011261399d57600080fd5b50813567ffffffffffffffff8111156139b557600080fd5b6020830191508360208285010111156139cd57600080fd5b9250929050565b600080602083850312156139e757600080fd5b823567ffffffffffffffff8111156139fe57600080fd5b613a0a8582860161398b565b90969095509350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610120810167ffffffffffffffff81118282101715613a6957613a69613a16565b60405290565b604051610160810167ffffffffffffffff81118282101715613a6957613a69613a16565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715613ada57613ada613a16565b604052919050565b63ffffffff8116811461272257600080fd5b803561117081613ae2565b68ffffffffffffffffff8116811461272257600080fd5b803561117081613aff565b64ffffffffff8116811461272257600080fd5b803561117081613b21565b803561ffff8116811461117057600080fd5b80357bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8116811461117057600080fd5b60006101208284031215613b9057600080fd5b613b98613a45565b613ba183613af4565b8152613baf60208401613af4565b6020820152613bc060408401613af4565b6040820152613bd160608401613af4565b6060820152613be260808401613b16565b6080820152613bf360a08401613b34565b60a0820152613c0460c08401613b3f565b60c0820152613c1560e08401613b51565b60e0820152610100613c28818501613af4565b908201529392505050565b60005b83811015613c4e578181015183820152602001613c36565b50506000910152565b60008151808452613c6f816020860160208601613c33565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061376b6020830184613c57565b600082601f830112613cc557600080fd5b813567ffffffffffffffff811115613cdf57613cdf613a16565b613d1060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601613a93565b818152846020838601011115613d2557600080fd5b816020850160208301376000918101602001919091529392505050565b600060208284031215613d5457600080fd5b813567ffffffffffffffff811115613d6b57600080fd5b613d7784828501613cb4565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461272257600080fd5b803561117081613d7f565b6bffffffffffffffffffffffff8116811461272257600080fd5b803561117081613dac565b60008060408385031215613de457600080fd5b8235613def81613d7f565b91506020830135613dff81613dac565b809150509250929050565b600081518084526020808501945080840160005b83811015613e5057815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613e1e565b509495945050505050565b60208152600061376b6020830184613e0a565b600060208284031215613e8057600080fd5b5035919050565b600060208284031215613e9957600080fd5b813567ffffffffffffffff811115613eb057600080fd5b8201610160818503121561376b57600080fd5b805182526020810151613eee602084018273ffffffffffffffffffffffffffffffffffffffff169052565b506040810151613f0e60408401826bffffffffffffffffffffffff169052565b506060810151613f36606084018273ffffffffffffffffffffffffffffffffffffffff169052565b506080810151613f52608084018267ffffffffffffffff169052565b5060a0810151613f6a60a084018263ffffffff169052565b5060c0810151613f8760c084018268ffffffffffffffffff169052565b5060e0810151613fa460e084018268ffffffffffffffffff169052565b506101008181015164ffffffffff9081169184019190915261012080830151909116908301526101409081015163ffffffff16910152565b610160810161385a8284613ec3565b60008083601f840112613ffd57600080fd5b50813567ffffffffffffffff81111561401557600080fd5b6020830191508360208260051b85010111156139cd57600080fd5b60008060008060008060008060e0898b03121561404c57600080fd5b606089018a81111561405d57600080fd5b8998503567ffffffffffffffff8082111561407757600080fd5b6140838c838d0161398b565b909950975060808b013591508082111561409c57600080fd5b6140a88c838d01613feb565b909750955060a08b01359150808211156140c157600080fd5b506140ce8b828c01613feb565b999c989b50969995989497949560c00135949350505050565b815163ffffffff90811682526020808401518216908301526040808401518216908301526060808401519182169083015261012082019050608083015161413b608084018268ffffffffffffffffff169052565b5060a083015161415460a084018264ffffffffff169052565b5060c083015161416a60c084018261ffff169052565b5060e083015161419a60e08401827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff169052565b506101008381015163ffffffff8116848301525b505092915050565b67ffffffffffffffff8116811461272257600080fd5b8035611170816141b6565b6000806000806000608086880312156141ef57600080fd5b85356141fa816141b6565b9450602086013567ffffffffffffffff81111561421657600080fd5b6142228882890161398b565b909550935050604086013561423681613ae2565b949793965091946060013592915050565b600067ffffffffffffffff82111561426157614261613a16565b5060051b60200190565b600082601f83011261427c57600080fd5b8135602061429161428c83614247565b613a93565b82815260059290921b840181019181810190868411156142b057600080fd5b8286015b848110156142d45780356142c781613d7f565b83529183019183016142b4565b509695505050505050565b803560ff8116811461117057600080fd5b60008060008060008060c0878903121561430957600080fd5b863567ffffffffffffffff8082111561432157600080fd5b61432d8a838b0161426b565b9750602089013591508082111561434357600080fd5b61434f8a838b0161426b565b965061435d60408a016142df565b9550606089013591508082111561437357600080fd5b61437f8a838b01613cb4565b945061438d60808a016141cc565b935060a08901359150808211156143a357600080fd5b506143b089828a01613cb4565b9150509295509295509295565b6000602082840312156143cf57600080fd5b813561376b81613d7f565b600181811c908216806143ee57607f821691505b602082108103614427577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f8211156105ec57600081815260208120601f850160051c810160208610156144545750805b601f850160051c820191505b81811015610a8857828155600101614460565b67ffffffffffffffff83111561448b5761448b613a16565b61449f8361449983546143da565b8361442d565b6000601f8411600181146144f157600085156144bb5750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b178355614587565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b828110156145405786850135825560209485019460019092019101614520565b508682101561457b577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b805161117081613aff565b6000602082840312156145ab57600080fd5b815161376b81613aff565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6bffffffffffffffffffffffff82811682821603908082111561270a5761270a6145b6565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361466a5761466a6145b6565b5060010190565b6000610160823603121561468457600080fd5b61468c613a6f565b823567ffffffffffffffff8111156146a357600080fd5b6146af36828601613cb4565b825250602083013560208201526146c860408401613da1565b60408201526146d960608401613dc6565b60608201526146ea60808401613b16565b60808201526146fb60a084016141cc565b60a082015261470c60c084016141cc565b60c082015261471d60e08401613af4565b60e0820152610100614730818501613b3f565b908201526101206147428482016141cc565b90820152610140614754848201613da1565b9082015292915050565b60006020828403121561477057600080fd5b813561376b816141b6565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126147b057600080fd5b83018035915067ffffffffffffffff8211156147cb57600080fd5b6020019150368190038213156139cd57600080fd5b6000602082840312156147f257600080fd5b61376b82613b3f565b60006020828403121561480d57600080fd5b813561376b81613ae2565b73ffffffffffffffffffffffffffffffffffffffff8a8116825267ffffffffffffffff8a166020830152881660408201526102406060820181905281018690526000610260878982850137600083890182015261ffff8716608084015260a0830186905263ffffffff851660c0840152601f88017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016830101905061320f60e0830184613ec3565b60ff818116838216019081111561385a5761385a6145b6565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600060ff83168061491b5761491b6148d9565b8060ff84160491505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8183823760009101908152919050565b828152606082602083013760800192915050565b808202811582820484141761385a5761385a6145b6565b8181038181111561385a5761385a6145b6565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b63ffffffff81811683821601908082111561270a5761270a6145b6565b600061012063ffffffff808d1684528b6020850152808b16604085015250806060840152614a238184018a613e0a565b90508281036080840152614a378189613e0a565b905060ff871660a084015282810360c0840152614a548187613c57565b905067ffffffffffffffff851660e0840152828103610100840152614a798185613c57565b9c9b505050505050505050505050565b805169ffffffffffffffffffff8116811461117057600080fd5b600080600080600060a08688031215614abb57600080fd5b614ac486614a89565b9450602086015193506040860151925060608601519150614ae760808701614a89565b90509295509295909350565b60006bffffffffffffffffffffffff80841680614b1257614b126148d9565b92169190910492915050565b6bffffffffffffffffffffffff81811683821601908082111561270a5761270a6145b6565b6bffffffffffffffffffffffff8181168382160280821691908281146141ae576141ae6145b6565b8082018082111561385a5761385a6145b6565b67ffffffffffffffff81811683821601908082111561270a5761270a6145b6565b600082601f830112614bb057600080fd5b81356020614bc061428c83614247565b82815260059290921b84018101918181019086841115614bdf57600080fd5b8286015b848110156142d45780358352918301918301614be3565b600082601f830112614c0b57600080fd5b81356020614c1b61428c83614247565b82815260059290921b84018101918181019086841115614c3a57600080fd5b8286015b848110156142d457803567ffffffffffffffff811115614c5e5760008081fd5b614c6c8986838b0101613cb4565b845250918301918301614c3e565b600080600080600060a08688031215614c9257600080fd5b853567ffffffffffffffff80821115614caa57600080fd5b614cb689838a01614b9f565b96506020880135915080821115614ccc57600080fd5b614cd889838a01614bfa565b95506040880135915080821115614cee57600080fd5b614cfa89838a01614bfa565b94506060880135915080821115614d1057600080fd5b614d1c89838a01614bfa565b93506080880135915080821115614d3257600080fd5b50614d3f88828901614bfa565b9150509295509295909350565b600082614d5b57614d5b6148d9565b500490565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152614da78285018b613e0a565b91508382036080850152614dbb828a613e0a565b915060ff881660a085015283820360c0850152614dd88288613c57565b90861660e08501528381036101008501529050614a798185613c57565b805161117081613d7f565b805161117081613dac565b8051611170816141b6565b805161117081613ae2565b805161117081613b21565b60006101608284031215614e3f57600080fd5b614e47613a6f565b82518152614e5760208401614df5565b6020820152614e6860408401614e00565b6040820152614e7960608401614df5565b6060820152614e8a60808401614e0b565b6080820152614e9b60a08401614e16565b60a0820152614eac60c0840161458e565b60c0820152614ebd60e0840161458e565b60e0820152610100614ed0818501614e21565b90820152610120614ee2848201614e21565b90820152610140613c28848201614e16565b64ffffffffff81811683821601908082111561270a5761270a6145b6565b6000610200808352614f268184018a613c57565b90508281036020840152614f3a8189613c57565b6bffffffffffffffffffffffff88811660408601528716606085015273ffffffffffffffffffffffffffffffffffffffff861660808501529150614f83905060a0830184613ec3565b979650505050505050565b60008060408385031215614fa157600080fd5b825160078110614fb057600080fd5b6020840151909250613dff81613dac565b600060208284031215614fd357600080fd5b5051919050565b60008351614fec818460208801613c33565b835190830190615000818360208801613c33565b0194935050505056fe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000813000a",
}
var FunctionsCoordinatorABI = FunctionsCoordinatorMetaData.ABI
@@ -1528,6 +1528,137 @@ func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseOwnershipTransfe
return event, nil
}
+type FunctionsCoordinatorRequestBilledIterator struct {
+ Event *FunctionsCoordinatorRequestBilled
+
+ contract *bind.BoundContract
+ event string
+
+ logs chan types.Log
+ sub ethereum.Subscription
+ done bool
+ fail error
+}
+
+func (it *FunctionsCoordinatorRequestBilledIterator) Next() bool {
+
+ if it.fail != nil {
+ return false
+ }
+
+ if it.done {
+ select {
+ case log := <-it.logs:
+ it.Event = new(FunctionsCoordinatorRequestBilled)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+
+ select {
+ case log := <-it.logs:
+ it.Event = new(FunctionsCoordinatorRequestBilled)
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+}
+
+func (it *FunctionsCoordinatorRequestBilledIterator) Error() error {
+ return it.fail
+}
+
+func (it *FunctionsCoordinatorRequestBilledIterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+}
+
+type FunctionsCoordinatorRequestBilled struct {
+ RequestId [32]byte
+ JuelsPerGas *big.Int
+ L1FeeShareWei *big.Int
+ CallbackCostJuels *big.Int
+ TotalCostJuels *big.Int
+ Raw types.Log
+}
+
+func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterRequestBilled(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorRequestBilledIterator, error) {
+
+ var requestIdRule []interface{}
+ for _, requestIdItem := range requestId {
+ requestIdRule = append(requestIdRule, requestIdItem)
+ }
+
+ logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "RequestBilled", requestIdRule)
+ if err != nil {
+ return nil, err
+ }
+ return &FunctionsCoordinatorRequestBilledIterator{contract: _FunctionsCoordinator.contract, event: "RequestBilled", logs: logs, sub: sub}, nil
+}
+
+func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchRequestBilled(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorRequestBilled, requestId [][32]byte) (event.Subscription, error) {
+
+ var requestIdRule []interface{}
+ for _, requestIdItem := range requestId {
+ requestIdRule = append(requestIdRule, requestIdItem)
+ }
+
+ logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "RequestBilled", requestIdRule)
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+
+ event := new(FunctionsCoordinatorRequestBilled)
+ if err := _FunctionsCoordinator.contract.UnpackLog(event, "RequestBilled", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
+func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseRequestBilled(log types.Log) (*FunctionsCoordinatorRequestBilled, error) {
+ event := new(FunctionsCoordinatorRequestBilled)
+ if err := _FunctionsCoordinator.contract.UnpackLog(event, "RequestBilled", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+}
+
type FunctionsCoordinatorTransmittedIterator struct {
Event *FunctionsCoordinatorTransmitted
@@ -1673,6 +1804,8 @@ func (_FunctionsCoordinator *FunctionsCoordinator) ParseLog(log types.Log) (gene
return _FunctionsCoordinator.ParseOwnershipTransferRequested(log)
case _FunctionsCoordinator.abi.Events["OwnershipTransferred"].ID:
return _FunctionsCoordinator.ParseOwnershipTransferred(log)
+ case _FunctionsCoordinator.abi.Events["RequestBilled"].ID:
+ return _FunctionsCoordinator.ParseRequestBilled(log)
case _FunctionsCoordinator.abi.Events["Transmitted"].ID:
return _FunctionsCoordinator.ParseTransmitted(log)
@@ -1709,6 +1842,10 @@ func (FunctionsCoordinatorOwnershipTransferred) Topic() common.Hash {
return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0")
}
+func (FunctionsCoordinatorRequestBilled) Topic() common.Hash {
+ return common.HexToHash("0x90815c2e624694e8010bffad2bcefaf96af282ef1bc2ebc0042d1b89a585e046")
+}
+
func (FunctionsCoordinatorTransmitted) Topic() common.Hash {
return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62")
}
@@ -1810,6 +1947,12 @@ type FunctionsCoordinatorInterface interface {
ParseOwnershipTransferred(log types.Log) (*FunctionsCoordinatorOwnershipTransferred, error)
+ FilterRequestBilled(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorRequestBilledIterator, error)
+
+ WatchRequestBilled(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorRequestBilled, requestId [][32]byte) (event.Subscription, error)
+
+ ParseRequestBilled(log types.Log) (*FunctionsCoordinatorRequestBilled, error)
+
FilterTransmitted(opts *bind.FilterOpts) (*FunctionsCoordinatorTransmittedIterator, error)
WatchTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorTransmitted) (event.Subscription, error)
diff --git a/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index cff49cd07c2..ac1fc4e83d6 100644
--- a/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -4,7 +4,7 @@ functions_allow_list: ../../../contracts/solc/v0.8.19/functions/v1_X/TermsOfServ
functions_billing_registry_events_mock: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryEventsMock.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryEventsMock.bin 50deeb883bd9c3729702be335c0388f9d8553bab4be5e26ecacac496a89e2b77
functions_client: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.bin 2368f537a04489c720a46733f8596c4fc88a31062ecfa966d05f25dd98608aca
functions_client_example: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.bin abf32e69f268f40e8530eb8d8e96bf310b798a4c0049a58022d9d2fb527b601b
-functions_coordinator: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.bin 9e11effc1922d258d3fc38564b87f4466c56162f33d553ec6d66edcfa55923af
+functions_coordinator: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.bin 97aa7c56d78c703056990eff102279af86b97b11b5855b059e8dd658dc15da8a
functions_load_test_client: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.bin c8dbbd5ebb34435800d6674700068837c3a252db60046a14b0e61e829db517de
functions_oracle_events_mock: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleEventsMock.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleEventsMock.bin 3ca70f966f8fe751987f0ccb50bebb6aa5be77e4a9f835d1ae99e0e9bfb7d52c
functions_router: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.bin 9dedd3a36043605fd9bedf821e7ec5b4281a5c7ae2e4a1955f37aff8ba13519f
diff --git a/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go b/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go
index 8a4ee2c4de8..feb614aa83b 100644
--- a/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go
+++ b/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go
@@ -29,8 +29,8 @@ var (
)
var KeeperConsumerMetaData = &bind.MetaData{
- ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"updateInterval\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"interval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastTimeStamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
- Bin: "0x60a060405234801561001057600080fd5b5060405161036c38038061036c83398101604081905261002f9161003f565b6080524260015560008055610058565b60006020828403121561005157600080fd5b5051919050565b6080516102fa610072600039600060cc01526102fa6000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c806361bc221a1161005057806361bc221a1461009d5780636e04ff0d146100a6578063947a36fb146100c757600080fd5b80633f3b3b271461006c5780634585e33b14610088575b600080fd5b61007560015481565b6040519081526020015b60405180910390f35b61009b6100963660046101c5565b6100ee565b005b61007560005481565b6100b96100b43660046101c5565b610103565b60405161007f929190610237565b6100757f000000000000000000000000000000000000000000000000000000000000000081565b6000546100fc9060016102ad565b6000555050565b6000606061010f610157565b6001848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959a92995091975050505050505050565b32156101c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c7920666f722073696d756c61746564206261636b656e64000000000000604482015260640160405180910390fd5b565b600080602083850312156101d857600080fd5b823567ffffffffffffffff808211156101f057600080fd5b818501915085601f83011261020457600080fd5b81358181111561021357600080fd5b86602082850101111561022557600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b8181101561026d57858101830151858201606001528201610251565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b808201808211156102e7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b9291505056fea164736f6c6343000810000a",
+ ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"updateInterval\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"interval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastTimeStamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
+ Bin: "0x60a060405234801561001057600080fd5b5060405161033838038061033883398101604081905261002f9161003f565b6080524260015560008055610058565b60006020828403121561005157600080fd5b5051919050565b6080516102c6610072600039600060cc01526102c66000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c806361bc221a1161005057806361bc221a1461009d5780636e04ff0d146100a6578063947a36fb146100c757600080fd5b80633f3b3b271461006c5780634585e33b14610088575b600080fd5b61007560015481565b6040519081526020015b60405180910390f35b61009b610096366004610191565b6100ee565b005b61007560005481565b6100b96100b4366004610191565b610103565b60405161007f929190610203565b6100757f000000000000000000000000000000000000000000000000000000000000000081565b6000546100fc906001610279565b6000555050565b6000606061010f610157565b6001848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959a92995091975050505050505050565b321561018f576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b600080602083850312156101a457600080fd5b823567ffffffffffffffff808211156101bc57600080fd5b818501915085601f8301126101d057600080fd5b8135818111156101df57600080fd5b8660208285010111156101f157600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b818110156102395785810183015185820160600152820161021d565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b808201808211156102b3577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b9291505056fea164736f6c6343000810000a",
}
var KeeperConsumerABI = KeeperConsumerMetaData.ABI
diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index 0d0bb388f2f..6482c01cf88 100644
--- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -1,116 +1,107 @@
GETH_VERSION: 1.12.0
-KeeperConsumer: ../../contracts/solc/v0.8.16/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer.bin 53d7902867ce421641ffa9de63204b89ab9dc157b93f0beb9ac08c6450365a70
-KeeperConsumerPerformance: ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.bin eeda39f5d3e1c8ffa0fb6cd1803731b98a4bc262d41833458e3fe8b40933ae90
-PerformDataChecker: ../../contracts/solc/v0.8.16/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker.bin 48d8309c2117c29a24e1155917ab0b780956b2cd6a8a39ef06ae66a7f6d94f73
-UpkeepCounter: ../../contracts/solc/v0.8.16/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter.bin 77f000229a501f638dd2dc439859257f632894c728b31e68aea4f6d6c52f1b71
-UpkeepPerformCounterRestrictive: ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.bin 20955b21acceb58355fa287b29194a73edf5937067ba7140667301017cb2b24c
-VRFv2Consumer: ../../contracts/solc/v0.8.6/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer.bin 12368b3b5e06392440143a13b94c0ea2f79c4c897becc3b060982559e10ace40
-aggregator_v2v3_interface: ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.bin 95e8814b408bb05bf21742ef580d98698b7db6a9bac6a35c3de12b23aec4ee28
-aggregator_v3_interface: ../../contracts/solc/v0.8.6/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV3Interface.bin 351b55d3b0f04af67db6dfb5c92f1c64479400ca1fec77afc20bc0ce65cb49ab
-authorized_forwarder: ../../contracts/solc/v0.8.19/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder.bin 8ea76c883d460f8353a45a493f2aebeb5a2d9a7b4619d1bc4fff5fb590bb3e10
-authorized_receiver: ../../contracts/solc/v0.8.19/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver.bin 18e8969ba3234b027e1b16c11a783aca58d0ea5c2361010ec597f134b7bf1c4f
-automation_consumer_benchmark: ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark.bin f52c76f1aaed4be541d82d97189d70f5aa027fc9838037dd7a7d21910c8c488e
-automation_forwarder_logic: ../../contracts/solc/v0.8.16/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic.bin 15ae0c367297955fdab4b552dbb10e1f2be80a8fde0efec4a4d398693e9d72b5
-automation_registrar_wrapper2_1: ../../contracts/solc/v0.8.16/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1.bin eb06d853aab39d3196c593b03e555851cbe8386e0fe54a74c2479f62d14b3c42
-automation_utils_2_1: ../../contracts/solc/v0.8.16/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1.bin 331bfa79685aee6ddf63b64c0747abee556c454cae3fb8175edff425b615d8aa
-batch_blockhash_store: ../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore.bin 14356c48ef70f66ef74f22f644450dbf3b2a147c1b68deaa7e7d1eb8ffab15db
-batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.bin d0a54963260d8c1f1bbd984b758285e6027cfb5a7e42701bcb562ab123219332
-batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.bin 7bb76ae241cf1b37b41920830b836cb99f1ad33efd7435ca2398ff6cd2fe5d48
-blockhash_store: ../../contracts/solc/v0.8.6/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore.bin 12b0662f1636a341c8863bdec7a20f2ddd97c3a4fd1a7ae353fe316609face4e
-chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.bin 5f10664e31abc768f4a37901cae7a3bef90146180f97303e5a1bde5a08d84595
-consumer_wrapper: ../../contracts/solc/v0.7/Consumer.abi ../../contracts/solc/v0.7/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5
-cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7
-cron_upkeep_wrapper: ../../contracts/solc/v0.8.6/CronUpkeep.abi - 362fcfcf30a6ab3acff83095ea4b2b9056dd5e9dcb94bc5411aae58995d22709
-dummy_protocol_wrapper: ../../contracts/solc/v0.8.16/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol.bin 583a448170b13abf7ed64e406e8177d78c9e55ab44efd141eee60de23a71ee3b
-flags_wrapper: ../../contracts/solc/v0.6/Flags.abi ../../contracts/solc/v0.6/Flags.bin 2034d1b562ca37a63068851915e3703980276e8d5f7db6db8a3351a49d69fc4a
-flux_aggregator_wrapper: ../../contracts/solc/v0.6/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator.bin a3b0a6396c4aa3b5ee39b3c4bd45efc89789d4859379a8a92caca3a0496c5794
-functions_billing_registry_events_mock: ../../contracts/solc/v0.8.6/FunctionsBillingRegistryEventsMock.abi ../../contracts/solc/v0.8.6/FunctionsBillingRegistryEventsMock.bin 50deeb883bd9c3729702be335c0388f9d8553bab4be5e26ecacac496a89e2b77
-functions_oracle_events_mock: ../../contracts/solc/v0.8.6/FunctionsOracleEventsMock.abi ../../contracts/solc/v0.8.6/FunctionsOracleEventsMock.bin 3ca70f966f8fe751987f0ccb50bebb6aa5be77e4a9f835d1ae99e0e9bfb7d52c
-gas_wrapper: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin 4a5dcdac486d18fcd58e3488c15c1710ae76b977556a3f3191bd269a4bc75723
-gas_wrapper_mock: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin a9b08f18da59125c6fc305855710241f3d35161b8b9f3e3f635a7b1d5c6da9c8
-i_keeper_registry_master_wrapper_2_1: ../../contracts/solc/v0.8.16/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster.bin 6501bb9bcf5048bab2737b00685c6984a24867e234ddf5b60a65904eee9a4ebc
-i_log_automation: ../../contracts/solc/v0.8.16/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation.bin 296beccb6af655d6fc3a6e676b244831cce2da6688d3afc4f21f8738ae59e03e
-keeper_consumer_performance_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.bin eeda39f5d3e1c8ffa0fb6cd1803731b98a4bc262d41833458e3fe8b40933ae90
-keeper_consumer_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer.bin 53d7902867ce421641ffa9de63204b89ab9dc157b93f0beb9ac08c6450365a70
-keeper_registrar_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar.bin e49b2f8b23da17af1ed2209b8ae0968cc04350554d636711e6c24a3ad3118692
-keeper_registrar_wrapper1_2_mock: ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock.bin 5b155a7cb3def309fd7525de1d7cd364ebf8491bdc3060eac08ea0ff55ab29bc
-keeper_registrar_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0.bin 647f125c2f0dafabcdc545cb77b15dc2ec3ea9429357806813179b1fd555c2d2
-keeper_registry_logic1_3: ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3.bin 903f8b9c8e25425ca6d0b81b89e339d695a83630bfbfa24a6f3b38869676bc5a
-keeper_registry_logic2_0: ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0.bin d69d2bc8e4844293dbc2d45abcddc50b84c88554ecccfa4fa77c0ca45ec80871
-keeper_registry_logic_a_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1.bin 77481ab75c9aa86a62a7b2a708599b5ea1a6346ed1c0def6d4826e7ae523f1ee
-keeper_registry_logic_b_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1.bin 467d10741a04601b136553a2b1c6ab37f2a65d809366faf03180a22ff26be215
-keeper_registry_wrapper1_1: ../../contracts/solc/v0.7/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1.bin 6ce079f2738f015f7374673a2816e8e9787143d00b780ea7652c8aa9ad9e1e20
-keeper_registry_wrapper1_1_mock: ../../contracts/solc/v0.7/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock.bin 98ddb3680e86359de3b5d17e648253ba29a84703f087a1b52237824003a8c6df
-keeper_registry_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2.bin a40ff877dd7c280f984cbbb2b428e160662b0c295e881d5f778f941c0088ca22
-keeper_registry_wrapper1_3: ../../contracts/solc/v0.8.6/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3.bin d4dc760b767ae274ee25c4a604ea371e1fa603a7b6421b69efb2088ad9e8abb3
-keeper_registry_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0.bin c32dea7d5ef66b7c58ddc84ddf69aa44df1b3ae8601fbc271c95be4ff5853056
-keeper_registry_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1.bin 604e4a0cd980c713929b523b999462a3aa0ed06f96ff563a4c8566cf59c8445b
-keepers_vrf_consumer: ../../contracts/solc/v0.8.6/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer.bin fa75572e689c9e84705c63e8dbe1b7b8aa1a8fe82d66356c4873d024bb9166e8
-log_emitter: ../../contracts/solc/v0.8.19/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter.bin 244ba13730c036de0b02beef4e3d9c9a96946ce353c27f366baecc7f5be5a6fd
-log_triggered_streams_lookup_wrapper: ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup.bin f8da43a927c1a66238a9f4fd5d5dd7e280e361daa0444da1f7f79498ace901e1
-log_upkeep_counter_wrapper: ../../contracts/solc/v0.8.6/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter.bin 42426bbb83f96dfbe55fc576d6c65020eaeed690e2289cf99b0c4aa810a5f4ec
-mock_aggregator_proxy: ../../contracts/solc/v0.8.6/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy.bin b16c108f3dd384c342ddff5e94da7c0a8d39d1be5e3d8f2cf61ecc7f0e50ff42
-mock_ethlink_aggregator_wrapper: ../../contracts/solc/v0.6/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator.bin 1c52c24f797b8482aa12b8251dcea1c072827bd5b3426b822621261944b99ca0
-mock_gas_aggregator_wrapper: ../../contracts/solc/v0.6/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator.bin bacbb1ea4dc6beac0db8a13ca5c75e2fd61b903d70feea9b3b1c8b10fe8df4f3
-multiwordconsumer_wrapper: ../../contracts/solc/v0.7/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer.bin 6e68abdf614e3ed0f5066c1b5f9d7c1199f1e7c5c5251fe8a471344a59afc6ba
+aggregator_v2v3_interface: ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.bin 95e8814b408bb05bf21742ef580d98698b7db6a9bac6a35c3de12b23aec4ee28
+aggregator_v3_interface: ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.bin 351b55d3b0f04af67db6dfb5c92f1c64479400ca1fec77afc20bc0ce65cb49ab
+authorized_forwarder: ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.bin 8ea76c883d460f8353a45a493f2aebeb5a2d9a7b4619d1bc4fff5fb590bb3e10
+authorized_receiver: ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.bin 18e8969ba3234b027e1b16c11a783aca58d0ea5c2361010ec597f134b7bf1c4f
+automation_consumer_benchmark: ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.bin f52c76f1aaed4be541d82d97189d70f5aa027fc9838037dd7a7d21910c8c488e
+automation_forwarder_logic: ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.bin 15ae0c367297955fdab4b552dbb10e1f2be80a8fde0efec4a4d398693e9d72b5
+automation_registrar_wrapper2_1: ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.bin eb06d853aab39d3196c593b03e555851cbe8386e0fe54a74c2479f62d14b3c42
+automation_utils_2_1: ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.bin 331bfa79685aee6ddf63b64c0747abee556c454cae3fb8175edff425b615d8aa
+batch_blockhash_store: ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.bin 14356c48ef70f66ef74f22f644450dbf3b2a147c1b68deaa7e7d1eb8ffab15db
+batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.bin d0a54963260d8c1f1bbd984b758285e6027cfb5a7e42701bcb562ab123219332
+batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin 7bb76ae241cf1b37b41920830b836cb99f1ad33efd7435ca2398ff6cd2fe5d48
+blockhash_store: ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.bin 12b0662f1636a341c8863bdec7a20f2ddd97c3a4fd1a7ae353fe316609face4e
+chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin 5f10664e31abc768f4a37901cae7a3bef90146180f97303e5a1bde5a08d84595
+consumer_wrapper: ../../contracts/solc/v0.7/Consumer/Consumer.abi ../../contracts/solc/v0.7/Consumer/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5
+cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7
+cron_upkeep_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeep.abi - 362fcfcf30a6ab3acff83095ea4b2b9056dd5e9dcb94bc5411aae58995d22709
+dummy_protocol_wrapper: ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.bin 583a448170b13abf7ed64e406e8177d78c9e55ab44efd141eee60de23a71ee3b
+flags_wrapper: ../../contracts/solc/v0.6/Flags/Flags.abi ../../contracts/solc/v0.6/Flags/Flags.bin 2034d1b562ca37a63068851915e3703980276e8d5f7db6db8a3351a49d69fc4a
+flux_aggregator_wrapper: ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.bin a3b0a6396c4aa3b5ee39b3c4bd45efc89789d4859379a8a92caca3a0496c5794
+gas_wrapper: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin 4a5dcdac486d18fcd58e3488c15c1710ae76b977556a3f3191bd269a4bc75723
+gas_wrapper_mock: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin a9b08f18da59125c6fc305855710241f3d35161b8b9f3e3f635a7b1d5c6da9c8
+i_keeper_registry_master_wrapper_2_1: ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.bin 6501bb9bcf5048bab2737b00685c6984a24867e234ddf5b60a65904eee9a4ebc
+i_log_automation: ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.bin 296beccb6af655d6fc3a6e676b244831cce2da6688d3afc4f21f8738ae59e03e
+keeper_consumer_performance_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.bin eeda39f5d3e1c8ffa0fb6cd1803731b98a4bc262d41833458e3fe8b40933ae90
+keeper_consumer_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.bin 2c6163b145082fbab74b7343577a9cec8fda8b0da9daccf2a82581b1f5a84b83
+keeper_registrar_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.bin e49b2f8b23da17af1ed2209b8ae0968cc04350554d636711e6c24a3ad3118692
+keeper_registrar_wrapper1_2_mock: ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.bin 5b155a7cb3def309fd7525de1d7cd364ebf8491bdc3060eac08ea0ff55ab29bc
+keeper_registrar_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.bin 647f125c2f0dafabcdc545cb77b15dc2ec3ea9429357806813179b1fd555c2d2
+keeper_registry_logic1_3: ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.bin 903f8b9c8e25425ca6d0b81b89e339d695a83630bfbfa24a6f3b38869676bc5a
+keeper_registry_logic2_0: ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.bin d69d2bc8e4844293dbc2d45abcddc50b84c88554ecccfa4fa77c0ca45ec80871
+keeper_registry_logic_a_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.bin 77481ab75c9aa86a62a7b2a708599b5ea1a6346ed1c0def6d4826e7ae523f1ee
+keeper_registry_logic_b_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.bin 467d10741a04601b136553a2b1c6ab37f2a65d809366faf03180a22ff26be215
+keeper_registry_wrapper1_1: ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.bin 6ce079f2738f015f7374673a2816e8e9787143d00b780ea7652c8aa9ad9e1e20
+keeper_registry_wrapper1_1_mock: ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.bin 98ddb3680e86359de3b5d17e648253ba29a84703f087a1b52237824003a8c6df
+keeper_registry_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.bin a40ff877dd7c280f984cbbb2b428e160662b0c295e881d5f778f941c0088ca22
+keeper_registry_wrapper1_3: ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.bin d4dc760b767ae274ee25c4a604ea371e1fa603a7b6421b69efb2088ad9e8abb3
+keeper_registry_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.bin c32dea7d5ef66b7c58ddc84ddf69aa44df1b3ae8601fbc271c95be4ff5853056
+keeper_registry_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.bin 604e4a0cd980c713929b523b999462a3aa0ed06f96ff563a4c8566cf59c8445b
+keepers_vrf_consumer: ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.bin fa75572e689c9e84705c63e8dbe1b7b8aa1a8fe82d66356c4873d024bb9166e8
+log_emitter: ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.bin 244ba13730c036de0b02beef4e3d9c9a96946ce353c27f366baecc7f5be5a6fd
+log_triggered_streams_lookup_wrapper: ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.bin f8da43a927c1a66238a9f4fd5d5dd7e280e361daa0444da1f7f79498ace901e1
+log_upkeep_counter_wrapper: ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.bin 42426bbb83f96dfbe55fc576d6c65020eaeed690e2289cf99b0c4aa810a5f4ec
+mock_aggregator_proxy: ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.bin b16c108f3dd384c342ddff5e94da7c0a8d39d1be5e3d8f2cf61ecc7f0e50ff42
+mock_ethlink_aggregator_wrapper: ../../contracts/solc/v0.6/MockETHLINKAggregator/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator/MockETHLINKAggregator.bin 1c52c24f797b8482aa12b8251dcea1c072827bd5b3426b822621261944b99ca0
+mock_gas_aggregator_wrapper: ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.bin bacbb1ea4dc6beac0db8a13ca5c75e2fd61b903d70feea9b3b1c8b10fe8df4f3
+multiwordconsumer_wrapper: ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.bin 6e68abdf614e3ed0f5066c1b5f9d7c1199f1e7c5c5251fe8a471344a59afc6ba
offchain_aggregator_wrapper: OffchainAggregator/OffchainAggregator.abi - 5c8d6562e94166d4790f1ee6e4321d359d9f7262e6c5452a712b1f1c896f45cf
-operator_factory: ../../contracts/solc/v0.8.19/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory.bin 0fdfacf8879537b854875608dfca41c6221c342174417112acaa67dfcadafddc
-operator_wrapper: ../../contracts/solc/v0.8.19/Operator.abi ../../contracts/solc/v0.8.19/Operator.bin d7abd0e67f30a3a4c9c04c896124391306fa364fcf579fa6df04dbf912b48568
-oracle_wrapper: ../../contracts/solc/v0.6/Oracle.abi ../../contracts/solc/v0.6/Oracle.bin 7af2fbac22a6e8c2847e8e685a5400cac5101d72ddf5365213beb79e4dede43a
-perform_data_checker_wrapper: ../../contracts/solc/v0.8.16/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker.bin 48d8309c2117c29a24e1155917ab0b780956b2cd6a8a39ef06ae66a7f6d94f73
-solidity_vrf_consumer_interface: ../../contracts/solc/v0.6/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer.bin ecc99378aa798014de9db42b2eb81320778b0663dbe208008dad75ccdc1d4366
-solidity_vrf_consumer_interface_v08: ../../contracts/solc/v0.8.6/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer.bin b14f9136b15e3dc9d6154d5700f3ed4cf88ddc4f70f20c3bb57fc46050904c8f
-solidity_vrf_coordinator_interface: ../../contracts/solc/v0.6/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator.bin a23d3c395156804788c7f6fbda2994e8f7184304c0f0c9f2c4ddeaf073d346d2
-solidity_vrf_request_id: ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.bin 383b59e861732c1911ddb7b002c6158608496ce889979296527215fd0366b318
-solidity_vrf_request_id_v08: ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.bin f2559015d6f3e5d285c57b011be9b2300632e93dd6c4524e58202d6200f09edc
-solidity_vrf_v08_verifier_wrapper: ../../contracts/solc/v0.8.6/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper.bin f37f8b21a81c113085c6137835a2246db6ebda07da455c4f2b5c7ec60c725c3b
-solidity_vrf_verifier_wrapper: ../../contracts/solc/v0.6/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper.bin 44c2b67d8d2990ab580453deb29d63508c6147a3dc49908a1db563bef06e6474
-solidity_vrf_wrapper: ../../contracts/solc/v0.6/VRF.abi ../../contracts/solc/v0.6/VRF.bin 04ede5b83c06ba5b76ef99c081c72928007d8a7aaefcf21449a46a07cbd4bfc2
-streams_lookup_compatible_interface: ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface.bin feb92cc666df21ea04ab9d7a588a513847b01b2f66fc167d06ab28ef2b17e015
-streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep.bin b1a598963cacac51ed4706538d0f142bdc0d94b9a4b13e2d402131cdf05c9bcf
-test_api_consumer_wrapper: ../../contracts/solc/v0.6/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer.bin ed10893cb18894c18e275302329c955f14ea2de37ee044f84aa1e067ac5ea71e
-trusted_blockhash_store: ../../contracts/solc/v0.8.6/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore.bin 98cb0dc06c15af5dcd3b53bdfc98e7ed2489edc96a42203294ac2fc0efdda02b
-type_and_version_interface_wrapper: ../../contracts/solc/v0.8.6/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/TypeAndVersionInterface.bin bc9c3a6e73e3ebd5b58754df0deeb3b33f4bb404d5709bb904aed51d32f4b45e
-upkeep_counter_wrapper: ../../contracts/solc/v0.8.16/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter.bin 77f000229a501f638dd2dc439859257f632894c728b31e68aea4f6d6c52f1b71
-upkeep_perform_counter_restrictive_wrapper: ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.bin 20955b21acceb58355fa287b29194a73edf5937067ba7140667301017cb2b24c
-upkeep_transcoder: ../../contracts/solc/v0.8.6/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder.bin 336c92a981597be26508455f81a908a0784a817b129a59686c5b2c4afcba730a
-verifiable_load_log_trigger_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep.bin fb674ba44c0e8f3b385cd10b2f7dea5cd07b5f38df08066747e8b1542e152557
-verifiable_load_streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep.bin 785f68c44bfff070505eaa65e38a1af94046e5f9afc1189bcf2c8cfcd1102d66
-verifiable_load_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep.bin a3e02c43756ea91e7ce4b81e48c11648f1d12f6663c236780147e41dfa36ebee
-vrf_consumer_v2: ../../contracts/solc/v0.8.6/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2.bin 9ef258bf8e9f8d880fd229ceb145593d91e24fc89366baa0bf19169c5787d15f
-vrf_consumer_v2_plus_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample.bin 3155c611e4d6882e9324b6e975033b31356776ea8b031ca63d63da37589d583b
-vrf_consumer_v2_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample.bin f1790a9a2f2a04c730593e483459709cb89e897f8a19d7a3ac0cfe6a97265e6e
-vrf_coordinator_mock: ../../contracts/solc/v0.8.6/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock.bin 5c495cf8df1f46d8736b9150cdf174cce358cb8352f60f0d5bb9581e23920501
-vrf_coordinator_v2: ../../contracts/solc/v0.8.6/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2.bin 295f35ce282060317dfd01f45959f5a2b05ba26913e422fbd4fb6bf90b107006
-vrf_coordinator_v2_5: ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5.bin b0e7c42a30b36d9d31fa9a3f26bad7937152e3dddee5bd8dd3d121390c879ab6
-vrf_coordinator_v2_plus_v2_example: ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example.bin 4a5b86701983b1b65f0a8dfa116b3f6d75f8f706fa274004b57bdf5992e4cec3
-vrf_coordinator_v2plus: ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus.bin e4409bbe361258273458a5c99408b3d7f0cc57a2560dee91c0596cc6d6f738be
-vrf_coordinator_v2plus_interface: ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal.bin 834a2ce0e83276372a0e1446593fd89798f4cf6dc95d4be0113e99fadf61558b
-vrf_external_sub_owner_example: ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.bin 14f888eb313930b50233a6f01ea31eba0206b7f41a41f6311670da8bb8a26963
-vrf_load_test_external_sub_owner: ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.bin 2097faa70265e420036cc8a3efb1f1e0836ad2d7323b295b9a26a125dbbe6c7d
-vrf_load_test_ownerless_consumer: ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.bin 74f914843cbc70b9c3079c3e1c709382ce415225e8bb40113e7ac018bfcb0f5c
-vrf_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics.bin 8ab9de5816fbdf93a2865e2711b85a39a6fc9c413a4b336578c485be1158d430
-vrf_malicious_consumer_v2: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.bin 9755fa8ffc7f5f0b337d5d413d77b0c9f6cd6f68c31727d49acdf9d4a51bc522
-vrf_malicious_consumer_v2_plus: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus.bin e2a72638e11da807b6533d037e7e5aaeed695efd5035777b8e20d2f8973a574c
-vrf_owner: ../../contracts/solc/v0.8.6/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner.bin eccfae5ee295b5850e22f61240c469f79752b8d9a3bac5d64aec7ac8def2f6cb
-vrf_owner_test_consumer: ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer.bin 0537bbe96c5a8bbd44d0a65fbb7e51f6a9f9e75f4673225845ac1ba33f4e7974
-vrf_ownerless_consumer_example: ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.bin 9893b3805863273917fb282eed32274e32aa3d5c2a67a911510133e1218132be
-vrf_single_consumer_example: ../../contracts/solc/v0.8.6/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample.bin 892a5ed35da2e933f7fd7835cd6f7f70ef3aa63a9c03a22c5b1fd026711b0ece
-vrf_v2_consumer_wrapper: ../../contracts/solc/v0.8.6/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer.bin 12368b3b5e06392440143a13b94c0ea2f79c4c897becc3b060982559e10ace40
-vrf_v2plus_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics.bin 0a89cb7ed9dfb42f91e559b03dc351ccdbe14d281a7ab71c63bd3f47eeed7711
-vrf_v2plus_single_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample.bin 6226d05afa1664033b182bfbdde11d5dfb1d4c8e3eb0bd0448c8bfb76f5b96e4
-vrf_v2plus_sub_owner: ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample.bin 7541f986571b8a5671a256edc27ae9b8df9bcdff45ac3b96e5609bbfcc320e4e
-vrf_v2plus_upgraded_version: ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion.bin c0793d86fb6e45342c4424184fe241c16da960c0b4de76816364b933344d0756
-vrfv2_proxy_admin: ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin.bin 402b1103087ffe1aa598854a8f8b38f8cd3de2e3aaa86369e28017a9157f4980
-vrfv2_reverting_example: ../../contracts/solc/v0.8.6/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample.bin 1ae46f80351d428bd85ba58b9041b2a608a1845300d79a8fed83edf96606de87
-vrfv2_transparent_upgradeable_proxy: ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy.bin fe1a8e6852fbd06d91f64315c5cede86d340891f5b5cc981fb5b86563f7eac3f
-vrfv2_wrapper: ../../contracts/solc/v0.8.6/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper.bin d5e9a982325d2d4f517c4f2bc818795f61555408ef4b38fb59b923d144970e38
-vrfv2_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample.bin 3c5c9f1c501e697a7e77e959b48767e2a0bb1372393fd7686f7aaef3eb794231
-vrfv2_wrapper_interface: ../../contracts/solc/v0.8.6/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface.bin ff8560169de171a68b360b7438d13863682d07040d984fd0fb096b2379421003
-vrfv2plus_client: ../../contracts/solc/v0.8.6/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient.bin 3ffbfa4971a7e5f46051a26b1722613f265d89ea1867547ecec58500953a9501
-vrfv2plus_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample.bin 2c480a6d7955d33a00690fdd943486d95802e48a03f3cc243df314448e4ddb2c
-vrfv2plus_malicious_migrator: ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator.bin 80dbc98be5e42246960c889d29488f978d3db0127e95e9b295352c481d8c9b07
-vrfv2plus_reverting_example: ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample.bin 6c9053a94f90b8151964d3311310478b57744fbbd153e8ee742ed570e1e49798
-vrfv2plus_wrapper: ../../contracts/solc/v0.8.6/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper.bin 934bafba386b934f491827e535306726069f4cafef9125079ea88abf0d808877
-vrfv2plus_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample.bin a14c4c6e2299cd963a8f0ed069e61dd135af5aad4c13a94f6ea7e086eced7191
-vrfv2plus_wrapper_load_test_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer.bin 55e3bd534045125fb6579a201ab766185e9b0fac5737b4f37897bb69c9f599fa
+operator_factory: ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.bin 0fdfacf8879537b854875608dfca41c6221c342174417112acaa67dfcadafddc
+operator_wrapper: ../../contracts/solc/v0.8.19/Operator/Operator.abi ../../contracts/solc/v0.8.19/Operator/Operator.bin d7abd0e67f30a3a4c9c04c896124391306fa364fcf579fa6df04dbf912b48568
+oracle_wrapper: ../../contracts/solc/v0.6/Oracle/Oracle.abi ../../contracts/solc/v0.6/Oracle/Oracle.bin 7af2fbac22a6e8c2847e8e685a5400cac5101d72ddf5365213beb79e4dede43a
+perform_data_checker_wrapper: ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.bin 48d8309c2117c29a24e1155917ab0b780956b2cd6a8a39ef06ae66a7f6d94f73
+solidity_vrf_consumer_interface: ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.bin ecc99378aa798014de9db42b2eb81320778b0663dbe208008dad75ccdc1d4366
+solidity_vrf_consumer_interface_v08: ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.bin b14f9136b15e3dc9d6154d5700f3ed4cf88ddc4f70f20c3bb57fc46050904c8f
+solidity_vrf_coordinator_interface: ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.bin a23d3c395156804788c7f6fbda2994e8f7184304c0f0c9f2c4ddeaf073d346d2
+solidity_vrf_request_id: ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin 383b59e861732c1911ddb7b002c6158608496ce889979296527215fd0366b318
+solidity_vrf_request_id_v08: ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin f2559015d6f3e5d285c57b011be9b2300632e93dd6c4524e58202d6200f09edc
+solidity_vrf_v08_verifier_wrapper: ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.bin f37f8b21a81c113085c6137835a2246db6ebda07da455c4f2b5c7ec60c725c3b
+solidity_vrf_verifier_wrapper: ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.bin 44c2b67d8d2990ab580453deb29d63508c6147a3dc49908a1db563bef06e6474
+solidity_vrf_wrapper: ../../contracts/solc/v0.6/VRF/VRF.abi ../../contracts/solc/v0.6/VRF/VRF.bin 04ede5b83c06ba5b76ef99c081c72928007d8a7aaefcf21449a46a07cbd4bfc2
+streams_lookup_compatible_interface: ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.bin feb92cc666df21ea04ab9d7a588a513847b01b2f66fc167d06ab28ef2b17e015
+streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.bin b1a598963cacac51ed4706538d0f142bdc0d94b9a4b13e2d402131cdf05c9bcf
+test_api_consumer_wrapper: ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.bin ed10893cb18894c18e275302329c955f14ea2de37ee044f84aa1e067ac5ea71e
+trusted_blockhash_store: ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.bin 98cb0dc06c15af5dcd3b53bdfc98e7ed2489edc96a42203294ac2fc0efdda02b
+type_and_version_interface_wrapper: ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.bin bc9c3a6e73e3ebd5b58754df0deeb3b33f4bb404d5709bb904aed51d32f4b45e
+upkeep_counter_wrapper: ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.bin 77f000229a501f638dd2dc439859257f632894c728b31e68aea4f6d6c52f1b71
+upkeep_perform_counter_restrictive_wrapper: ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin 20955b21acceb58355fa287b29194a73edf5937067ba7140667301017cb2b24c
+upkeep_transcoder: ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.bin 336c92a981597be26508455f81a908a0784a817b129a59686c5b2c4afcba730a
+verifiable_load_log_trigger_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.bin fb674ba44c0e8f3b385cd10b2f7dea5cd07b5f38df08066747e8b1542e152557
+verifiable_load_streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.bin 785f68c44bfff070505eaa65e38a1af94046e5f9afc1189bcf2c8cfcd1102d66
+verifiable_load_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.bin a3e02c43756ea91e7ce4b81e48c11648f1d12f6663c236780147e41dfa36ebee
+vrf_consumer_v2: ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.bin 9ef258bf8e9f8d880fd229ceb145593d91e24fc89366baa0bf19169c5787d15f
+vrf_consumer_v2_plus_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.bin 3155c611e4d6882e9324b6e975033b31356776ea8b031ca63d63da37589d583b
+vrf_consumer_v2_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.bin f1790a9a2f2a04c730593e483459709cb89e897f8a19d7a3ac0cfe6a97265e6e
+vrf_coordinator_mock: ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.bin 5c495cf8df1f46d8736b9150cdf174cce358cb8352f60f0d5bb9581e23920501
+vrf_coordinator_v2: ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.bin 295f35ce282060317dfd01f45959f5a2b05ba26913e422fbd4fb6bf90b107006
+vrf_coordinator_v2_5: ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.bin b0e7c42a30b36d9d31fa9a3f26bad7937152e3dddee5bd8dd3d121390c879ab6
+vrf_coordinator_v2_plus_v2_example: ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.bin 4a5b86701983b1b65f0a8dfa116b3f6d75f8f706fa274004b57bdf5992e4cec3
+vrf_coordinator_v2plus_interface: ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.bin 834a2ce0e83276372a0e1446593fd89798f4cf6dc95d4be0113e99fadf61558b
+vrf_external_sub_owner_example: ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.bin 14f888eb313930b50233a6f01ea31eba0206b7f41a41f6311670da8bb8a26963
+vrf_load_test_external_sub_owner: ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.bin 2097faa70265e420036cc8a3efb1f1e0836ad2d7323b295b9a26a125dbbe6c7d
+vrf_load_test_ownerless_consumer: ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.bin 74f914843cbc70b9c3079c3e1c709382ce415225e8bb40113e7ac018bfcb0f5c
+vrf_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.bin 8ab9de5816fbdf93a2865e2711b85a39a6fc9c413a4b336578c485be1158d430
+vrf_malicious_consumer_v2: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.bin 9755fa8ffc7f5f0b337d5d413d77b0c9f6cd6f68c31727d49acdf9d4a51bc522
+vrf_malicious_consumer_v2_plus: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.bin e2a72638e11da807b6533d037e7e5aaeed695efd5035777b8e20d2f8973a574c
+vrf_owner: ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.bin eccfae5ee295b5850e22f61240c469f79752b8d9a3bac5d64aec7ac8def2f6cb
+vrf_owner_test_consumer: ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.bin 0537bbe96c5a8bbd44d0a65fbb7e51f6a9f9e75f4673225845ac1ba33f4e7974
+vrf_ownerless_consumer_example: ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.bin 9893b3805863273917fb282eed32274e32aa3d5c2a67a911510133e1218132be
+vrf_single_consumer_example: ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.bin 892a5ed35da2e933f7fd7835cd6f7f70ef3aa63a9c03a22c5b1fd026711b0ece
+vrf_v2_consumer_wrapper: ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.bin 12368b3b5e06392440143a13b94c0ea2f79c4c897becc3b060982559e10ace40
+vrf_v2plus_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.bin 0a89cb7ed9dfb42f91e559b03dc351ccdbe14d281a7ab71c63bd3f47eeed7711
+vrf_v2plus_single_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.bin 6226d05afa1664033b182bfbdde11d5dfb1d4c8e3eb0bd0448c8bfb76f5b96e4
+vrf_v2plus_sub_owner: ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.bin 7541f986571b8a5671a256edc27ae9b8df9bcdff45ac3b96e5609bbfcc320e4e
+vrf_v2plus_upgraded_version: ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.bin c0793d86fb6e45342c4424184fe241c16da960c0b4de76816364b933344d0756
+vrfv2_proxy_admin: ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.bin 402b1103087ffe1aa598854a8f8b38f8cd3de2e3aaa86369e28017a9157f4980
+vrfv2_reverting_example: ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.bin 1ae46f80351d428bd85ba58b9041b2a608a1845300d79a8fed83edf96606de87
+vrfv2_transparent_upgradeable_proxy: ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.bin fe1a8e6852fbd06d91f64315c5cede86d340891f5b5cc981fb5b86563f7eac3f
+vrfv2_wrapper: ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.bin d5e9a982325d2d4f517c4f2bc818795f61555408ef4b38fb59b923d144970e38
+vrfv2_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.bin 3c5c9f1c501e697a7e77e959b48767e2a0bb1372393fd7686f7aaef3eb794231
+vrfv2_wrapper_interface: ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.bin ff8560169de171a68b360b7438d13863682d07040d984fd0fb096b2379421003
+vrfv2plus_client: ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.bin 3ffbfa4971a7e5f46051a26b1722613f265d89ea1867547ecec58500953a9501
+vrfv2plus_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.bin 2c480a6d7955d33a00690fdd943486d95802e48a03f3cc243df314448e4ddb2c
+vrfv2plus_malicious_migrator: ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.bin 80dbc98be5e42246960c889d29488f978d3db0127e95e9b295352c481d8c9b07
+vrfv2plus_reverting_example: ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.bin 6c9053a94f90b8151964d3311310478b57744fbbd153e8ee742ed570e1e49798
+vrfv2plus_wrapper: ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.bin 934bafba386b934f491827e535306726069f4cafef9125079ea88abf0d808877
+vrfv2plus_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.bin a14c4c6e2299cd963a8f0ed069e61dd135af5aad4c13a94f6ea7e086eced7191
+vrfv2plus_wrapper_load_test_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.bin 55e3bd534045125fb6579a201ab766185e9b0fac5737b4f37897bb69c9f599fa
diff --git a/core/gethwrappers/go_generate.go b/core/gethwrappers/go_generate.go
index 67090d16c6d..3965c159080 100644
--- a/core/gethwrappers/go_generate.go
+++ b/core/gethwrappers/go_generate.go
@@ -5,139 +5,139 @@ package gethwrappers
// Make sure solidity compiler artifacts are up-to-date. Only output stdout on failure.
//go:generate ./generation/compile_contracts.sh
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator.bin FluxAggregator flux_aggregator_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRF.abi ../../contracts/solc/v0.6/VRF.bin VRF solidity_vrf_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper.bin VRFTestHelper solidity_vrf_verifier_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator.bin VRFCoordinator solidity_vrf_coordinator_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Flags.abi ../../contracts/solc/v0.6/Flags.bin Flags flags_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Oracle.abi ../../contracts/solc/v0.6/Oracle.bin Oracle oracle_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer.bin TestAPIConsumer test_api_consumer_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator.bin MockETHLINKAggregator mock_ethlink_aggregator_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator.bin MockGASAggregator mock_gas_aggregator_wrapper
-
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/Consumer.abi ../../contracts/solc/v0.7/Consumer.bin Consumer consumer_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer.bin MultiWordConsumer multiwordconsumer_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/Operator.abi ../../contracts/solc/v0.8.19/Operator.bin Operator operator_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory.bin OperatorFactory operator_factory
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder.bin AuthorizedForwarder authorized_forwarder
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver.bin AuthorizedReceiver authorized_receiver
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.bin FluxAggregator flux_aggregator_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRF/VRF.abi ../../contracts/solc/v0.6/VRF/VRF.bin VRF solidity_vrf_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.bin VRFTestHelper solidity_vrf_verifier_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.bin VRFCoordinator solidity_vrf_coordinator_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Flags/Flags.abi ../../contracts/solc/v0.6/Flags/Flags.bin Flags flags_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Oracle/Oracle.abi ../../contracts/solc/v0.6/Oracle/Oracle.bin Oracle oracle_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.bin TestAPIConsumer test_api_consumer_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockETHLINKAggregator/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator/MockETHLINKAggregator.bin MockETHLINKAggregator mock_ethlink_aggregator_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.bin MockGASAggregator mock_gas_aggregator_wrapper
+
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/Consumer/Consumer.abi ../../contracts/solc/v0.7/Consumer/Consumer.bin Consumer consumer_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.bin MultiWordConsumer multiwordconsumer_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/Operator/Operator.abi ../../contracts/solc/v0.8.19/Operator/Operator.bin Operator operator_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.bin OperatorFactory operator_factory
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.bin AuthorizedForwarder authorized_forwarder
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.bin AuthorizedReceiver authorized_receiver
//go:generate go run ./generation/generate/wrap.go OffchainAggregator/OffchainAggregator.abi - OffchainAggregator offchain_aggregator_wrapper
// Automation
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1.bin KeeperRegistry keeper_registry_wrapper1_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock.bin KeeperRegistryMock keeper_registry_wrapper1_1_mock
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepCounter.abi ../../contracts/solc/v0.7/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - CronUpkeepFactory cron_upkeep_factory_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeep.abi - CronUpkeep cron_upkeep_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar.bin KeeperRegistrar keeper_registrar_wrapper1_2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock.bin KeeperRegistrarMock keeper_registrar_wrapper1_2_mock
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2.bin KeeperRegistry keeper_registry_wrapper1_2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/TypeAndVersionInterface.bin TypeAndVersionInterface type_and_version_interface_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin KeeperRegistryCheckUpkeepGasUsageWrapper gas_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin KeeperRegistryCheckUpkeepGasUsageWrapperMock gas_wrapper_mock
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3.bin KeeperRegistry keeper_registry_wrapper1_3
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3.bin KeeperRegistryLogic keeper_registry_logic1_3
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0.bin KeeperRegistrar keeper_registrar_wrapper2_0
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0.bin KeeperRegistry keeper_registry_wrapper2_0
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0.bin KeeperRegistryLogic keeper_registry_logic2_0
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder.bin UpkeepTranscoder upkeep_transcoder
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep.bin VerifiableLoadUpkeep verifiable_load_upkeep_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep.bin VerifiableLoadStreamsLookupUpkeep verifiable_load_streams_lookup_upkeep_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep.bin VerifiableLoadLogTriggerUpkeep verifiable_load_log_trigger_upkeep_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep.bin StreamsLookupUpkeep streams_lookup_upkeep_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface.bin StreamsLookupCompatibleInterface streams_lookup_compatible_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark.bin AutomationConsumerBenchmark automation_consumer_benchmark
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1.bin AutomationRegistrar automation_registrar_wrapper2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1.bin KeeperRegistry keeper_registry_wrapper_2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1.bin KeeperRegistryLogicA keeper_registry_logic_a_wrapper_2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1.bin KeeperRegistryLogicB keeper_registry_logic_b_wrapper_2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster.bin IKeeperRegistryMaster i_keeper_registry_master_wrapper_2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation.bin ILogAutomation i_log_automation
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1.bin AutomationUtils automation_utils_2_1
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic.bin AutomationForwarderLogic automation_forwarder_logic
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter.bin LogUpkeepCounter log_upkeep_counter_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup.bin LogTriggeredStreamsLookup log_triggered_streams_lookup_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol.bin DummyProtocol dummy_protocol_wrapper
-
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer.bin KeeperConsumer keeper_consumer_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance.bin KeeperConsumerPerformance keeper_consumer_performance_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker.bin PerformDataChecker perform_data_checker_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.bin KeeperRegistry keeper_registry_wrapper1_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.bin KeeperRegistryMock keeper_registry_wrapper1_1_mock
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.7/UpkeepCounter/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - CronUpkeepFactory cron_upkeep_factory_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeep.abi - CronUpkeep cron_upkeep_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.bin KeeperRegistrar keeper_registrar_wrapper1_2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.bin KeeperRegistrarMock keeper_registrar_wrapper1_2_mock
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.bin KeeperRegistry keeper_registry_wrapper1_2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.bin TypeAndVersionInterface type_and_version_interface_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin KeeperRegistryCheckUpkeepGasUsageWrapper gas_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin KeeperRegistryCheckUpkeepGasUsageWrapperMock gas_wrapper_mock
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.bin KeeperRegistry keeper_registry_wrapper1_3
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.bin KeeperRegistryLogic keeper_registry_logic1_3
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.bin KeeperRegistrar keeper_registrar_wrapper2_0
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.bin KeeperRegistry keeper_registry_wrapper2_0
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.bin KeeperRegistryLogic keeper_registry_logic2_0
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.bin UpkeepTranscoder upkeep_transcoder
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.bin VerifiableLoadUpkeep verifiable_load_upkeep_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.bin VerifiableLoadStreamsLookupUpkeep verifiable_load_streams_lookup_upkeep_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.bin VerifiableLoadLogTriggerUpkeep verifiable_load_log_trigger_upkeep_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.bin StreamsLookupUpkeep streams_lookup_upkeep_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.bin StreamsLookupCompatibleInterface streams_lookup_compatible_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.bin AutomationConsumerBenchmark automation_consumer_benchmark
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.bin AutomationRegistrar automation_registrar_wrapper2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.bin KeeperRegistry keeper_registry_wrapper_2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.bin KeeperRegistryLogicA keeper_registry_logic_a_wrapper_2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.bin KeeperRegistryLogicB keeper_registry_logic_b_wrapper_2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.bin IKeeperRegistryMaster i_keeper_registry_master_wrapper_2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.bin ILogAutomation i_log_automation
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.bin AutomationUtils automation_utils_2_1
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.bin AutomationForwarderLogic automation_forwarder_logic
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.bin LogUpkeepCounter log_upkeep_counter_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.bin LogTriggeredStreamsLookup log_triggered_streams_lookup_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.bin DummyProtocol dummy_protocol_wrapper
+
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.bin KeeperConsumer keeper_consumer_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.bin KeeperConsumerPerformance keeper_consumer_performance_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.bin PerformDataChecker perform_data_checker_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper
// v0.8.6 VRFConsumer
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock.bin VRFCoordinatorMock vrf_coordinator_mock
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface_v08
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id_v08
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample.bin VRFOwnerlessConsumerExample vrf_ownerless_consumer_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer.bin VRFLoadTestOwnerlessConsumer vrf_load_test_ownerless_consumer
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner.bin VRFLoadTestExternalSubOwner vrf_load_test_external_sub_owner
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics.bin VRFV2LoadTestWithMetrics vrf_load_test_with_metrics
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer.bin VRFV2OwnerTestConsumer vrf_owner_test_consumer
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer.bin VRFv2Consumer vrf_v2_consumer_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.bin VRFCoordinatorMock vrf_coordinator_mock
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface_v08
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id_v08
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.bin VRFOwnerlessConsumerExample vrf_ownerless_consumer_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.bin VRFLoadTestOwnerlessConsumer vrf_load_test_ownerless_consumer
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.bin VRFLoadTestExternalSubOwner vrf_load_test_external_sub_owner
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.bin VRFV2LoadTestWithMetrics vrf_load_test_with_metrics
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.bin VRFV2OwnerTestConsumer vrf_owner_test_consumer
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.bin VRFv2Consumer vrf_v2_consumer_wrapper
//go:generate go run ./generation/generate_link/wrap_link.go
// VRF V2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore.bin BlockhashStore blockhash_store
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner.bin VRFOwner vrf_owner
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2.bin VRFMaliciousConsumerV2 vrf_malicious_consumer_v2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.bin BlockhashStore blockhash_store
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.bin VRFOwner vrf_owner
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.bin VRFMaliciousConsumerV2 vrf_malicious_consumer_v2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper.bin VRFV08TestHelper solidity_vrf_v08_verifier_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample.bin VRFSingleConsumerExample vrf_single_consumer_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.bin VRFV08TestHelper solidity_vrf_v08_verifier_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.bin VRFSingleConsumerExample vrf_single_consumer_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample.bin VRFExternalSubOwnerExample vrf_external_sub_owner_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.bin VRFExternalSubOwnerExample vrf_external_sub_owner_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample.bin VRFV2RevertingExample vrfv2_reverting_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.bin VRFV2RevertingExample vrfv2_reverting_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample.bin VRFConsumerV2UpgradeableExample vrf_consumer_v2_upgradeable_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.bin VRFConsumerV2UpgradeableExample vrf_consumer_v2_upgradeable_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy.bin VRFV2TransparentUpgradeableProxy vrfv2_transparent_upgradeable_proxy
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin.bin VRFV2ProxyAdmin vrfv2_proxy_admin
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.bin ChainSpecificUtilHelper chain_specific_util_helper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.bin VRFV2TransparentUpgradeableProxy vrfv2_transparent_upgradeable_proxy
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.bin VRFV2ProxyAdmin vrfv2_proxy_admin
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin ChainSpecificUtilHelper chain_specific_util_helper
// VRF V2 Wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper.bin VRFV2Wrapper vrfv2_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface.bin VRFV2WrapperInterface vrfv2_wrapper_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample.bin VRFV2WrapperConsumerExample vrfv2_wrapper_consumer_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.bin VRFV2Wrapper vrfv2_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.bin VRFV2WrapperInterface vrfv2_wrapper_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.bin VRFV2WrapperConsumerExample vrfv2_wrapper_consumer_example
// Keepers X VRF v2
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer.bin KeepersVRFConsumer keepers_vrf_consumer
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.bin KeepersVRFConsumer keepers_vrf_consumer
// VRF V2Plus
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal.bin IVRFCoordinatorV2PlusInternal vrf_coordinator_v2plus_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.bin BatchVRFCoordinatorV2Plus batch_vrf_coordinator_v2plus
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore.bin TrustedBlockhashStore trusted_blockhash_store
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample.bin VRFV2PlusConsumerExample vrfv2plus_consumer_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5.bin VRFCoordinatorV2_5 vrf_coordinator_v2_5
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper.bin VRFV2PlusWrapper vrfv2plus_wrapper
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample.bin VRFV2PlusWrapperConsumerExample vrfv2plus_wrapper_consumer_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus.bin VRFMaliciousConsumerV2Plus vrf_malicious_consumer_v2_plus
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample.bin VRFV2PlusSingleConsumerExample vrf_v2plus_single_consumer
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample.bin VRFV2PlusExternalSubOwnerExample vrf_v2plus_sub_owner
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample.bin VRFV2PlusRevertingExample vrfv2plus_reverting_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample.bin VRFConsumerV2PlusUpgradeableExample vrf_consumer_v2_plus_upgradeable_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient.bin VRFV2PlusClient vrfv2plus_client
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example.bin VRFCoordinatorV2Plus_V2Example vrf_coordinator_v2_plus_v2_example
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator.bin VRFV2PlusMaliciousMigrator vrfv2plus_malicious_migrator
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics.bin VRFV2PlusLoadTestWithMetrics vrf_v2plus_load_test_with_metrics
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion.bin VRFCoordinatorV2PlusUpgradedVersion vrf_v2plus_upgraded_version
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer.bin VRFV2PlusWrapperLoadTestConsumer vrfv2plus_wrapper_load_test_consumer
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.bin IVRFCoordinatorV2PlusInternal vrf_coordinator_v2plus_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin BatchVRFCoordinatorV2Plus batch_vrf_coordinator_v2plus
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.bin TrustedBlockhashStore trusted_blockhash_store
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.bin VRFV2PlusConsumerExample vrfv2plus_consumer_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.bin VRFCoordinatorV2_5 vrf_coordinator_v2_5
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.bin VRFV2PlusWrapper vrfv2plus_wrapper
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.bin VRFV2PlusWrapperConsumerExample vrfv2plus_wrapper_consumer_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.bin VRFMaliciousConsumerV2Plus vrf_malicious_consumer_v2_plus
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.bin VRFV2PlusSingleConsumerExample vrf_v2plus_single_consumer
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.bin VRFV2PlusExternalSubOwnerExample vrf_v2plus_sub_owner
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.bin VRFV2PlusRevertingExample vrfv2plus_reverting_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.bin VRFConsumerV2PlusUpgradeableExample vrf_consumer_v2_plus_upgradeable_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.bin VRFV2PlusClient vrfv2plus_client
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.bin VRFCoordinatorV2Plus_V2Example vrf_coordinator_v2_plus_v2_example
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.bin VRFV2PlusMaliciousMigrator vrfv2plus_malicious_migrator
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.bin VRFV2PlusLoadTestWithMetrics vrf_v2plus_load_test_with_metrics
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.bin VRFCoordinatorV2PlusUpgradedVersion vrf_v2plus_upgraded_version
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.bin VRFV2PlusWrapperLoadTestConsumer vrfv2plus_wrapper_load_test_consumer
// Aggregators
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy.bin MockAggregatorProxy mock_aggregator_proxy
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.bin MockAggregatorProxy mock_aggregator_proxy
// Log tester
-//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter.bin LogEmitter log_emitter
+//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.bin LogEmitter log_emitter
// Chainlink Functions
//go:generate go generate ./functions
diff --git a/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index abc3b47db2c..293defcfbe0 100644
--- a/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -1,10 +1,10 @@
GETH_VERSION: 1.12.0
-errored_verifier: ../../../contracts/solc/v0.8.16/ErroredVerifier.abi ../../../contracts/solc/v0.8.16/ErroredVerifier.bin 510d18a58bfda646be35e46491baf73041eb333a349615465b20e2b5b41c5f73
-exposed_verifier: ../../../contracts/solc/v0.8.16/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier.bin 6932cea8f2738e874d3ec9e1a4231d2421704030c071d9e15dd2f7f08482c246
-fee_manager: ../../../contracts/solc/v0.8.16/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager.bin 1b852df75bfabcc2b57539e84309cd57f9e693a2bb6b25a50e4a6101ccf32c49
+errored_verifier: ../../../contracts/solc/v0.8.16/ErroredVerifier/ErroredVerifier.abi ../../../contracts/solc/v0.8.16/ErroredVerifier/ErroredVerifier.bin 510d18a58bfda646be35e46491baf73041eb333a349615465b20e2b5b41c5f73
+exposed_verifier: ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.bin 6932cea8f2738e874d3ec9e1a4231d2421704030c071d9e15dd2f7f08482c246
+fee_manager: ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.bin 1b852df75bfabcc2b57539e84309cd57f9e693a2bb6b25a50e4a6101ccf32c49
llo_feeds: ../../../contracts/solc/v0.8.16/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager.bin cb71e018f67e49d7bc0e194c822204dfd59f79ff42e4fc8fd8ab63f3acd71361
llo_feeds_test: ../../../contracts/solc/v0.8.16/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier.bin 6932cea8f2738e874d3ec9e1a4231d2421704030c071d9e15dd2f7f08482c246
-reward_manager: ../../../contracts/solc/v0.8.16/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager.bin db73e9062b17a1d5aa14c06881fe2be49bd95b00b7f1a8943910c5e4ded5b221
-verifier: ../../../contracts/solc/v0.8.16/Verifier.abi ../../../contracts/solc/v0.8.16/Verifier.bin df12786bbeccf3a8f3389479cf93c055b4efd5904b9f99a4835f81af43fe62bf
-verifier_proxy: ../../../contracts/solc/v0.8.16/VerifierProxy.abi ../../../contracts/solc/v0.8.16/VerifierProxy.bin 6393443d0a323f2dbe9687dc30fd77f8dfa918944b61c651759746ff2d76e4e5
+reward_manager: ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.bin db73e9062b17a1d5aa14c06881fe2be49bd95b00b7f1a8943910c5e4ded5b221
+verifier: ../../../contracts/solc/v0.8.16/Verifier/Verifier.abi ../../../contracts/solc/v0.8.16/Verifier/Verifier.bin df12786bbeccf3a8f3389479cf93c055b4efd5904b9f99a4835f81af43fe62bf
+verifier_proxy: ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.abi ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.bin 6393443d0a323f2dbe9687dc30fd77f8dfa918944b61c651759746ff2d76e4e5
werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94
diff --git a/core/gethwrappers/llo-feeds/go_generate.go b/core/gethwrappers/llo-feeds/go_generate.go
index 8d9e3be0493..5b2088f43a0 100644
--- a/core/gethwrappers/llo-feeds/go_generate.go
+++ b/core/gethwrappers/llo-feeds/go_generate.go
@@ -3,9 +3,9 @@
package gethwrappers
// Chainlink LLO
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/Verifier.abi ../../../contracts/solc/v0.8.16/Verifier.bin Verifier verifier
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/VerifierProxy.abi ../../../contracts/solc/v0.8.16/VerifierProxy.bin VerifierProxy verifier_proxy
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/ErroredVerifier.abi ../../../contracts/solc/v0.8.16/ErroredVerifier.bin ErroredVerifier errored_verifier
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier.bin ExposedVerifier exposed_verifier
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager.bin RewardManager reward_manager
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager.bin FeeManager fee_manager
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/Verifier/Verifier.abi ../../../contracts/solc/v0.8.16/Verifier/Verifier.bin Verifier verifier
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.abi ../../../contracts/solc/v0.8.16/VerifierProxy/VerifierProxy.bin VerifierProxy verifier_proxy
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/ErroredVerifier/ErroredVerifier.abi ../../../contracts/solc/v0.8.16/ErroredVerifier/ErroredVerifier.bin ErroredVerifier errored_verifier
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.abi ../../../contracts/solc/v0.8.16/ExposedVerifier/ExposedVerifier.bin ExposedVerifier exposed_verifier
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.16/RewardManager/RewardManager.bin RewardManager reward_manager
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.abi ../../../contracts/solc/v0.8.16/FeeManager/FeeManager.bin FeeManager fee_manager
diff --git a/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index 7ac7e77a4b8..af907ce85eb 100644
--- a/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -1,5 +1,5 @@
GETH_VERSION: 1.12.0
-burn_mint_erc677: ../../../contracts/solc/v0.8.19/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677.bin 405c9016171e614b17e10588653ef8d33dcea21dd569c3fddc596a46fcff68a3
-erc20: ../../../contracts/solc/v0.8.19/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20.bin 5b1a93d9b24f250e49a730c96335a8113c3f7010365cba578f313b483001d4fc
-link_token: ../../../contracts/solc/v0.8.19/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken.bin c0ef9b507103aae541ebc31d87d051c2764ba9d843076b30ec505d37cdfffaba
-werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94
+burn_mint_erc677: ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.bin 405c9016171e614b17e10588653ef8d33dcea21dd569c3fddc596a46fcff68a3
+erc20: ../../../contracts/solc/v0.8.19/ERC20/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20/ERC20.bin 5b1a93d9b24f250e49a730c96335a8113c3f7010365cba578f313b483001d4fc
+link_token: ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.bin c0ef9b507103aae541ebc31d87d051c2764ba9d843076b30ec505d37cdfffaba
+werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94
diff --git a/core/gethwrappers/shared/go_generate.go b/core/gethwrappers/shared/go_generate.go
index 85a01670c9a..6f3bead7d6b 100644
--- a/core/gethwrappers/shared/go_generate.go
+++ b/core/gethwrappers/shared/go_generate.go
@@ -2,7 +2,7 @@
// golang packages, using abigen.
package gethwrappers
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677.bin BurnMintERC677 burn_mint_erc677
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken.bin LinkToken link_token
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20.bin ERC20 erc20
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock.bin WERC20Mock werc20_mock
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.bin BurnMintERC677 burn_mint_erc677
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.bin LinkToken link_token
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ERC20/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20/ERC20.bin ERC20 erc20
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.bin WERC20Mock werc20_mock
diff --git a/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt
index 8ea0492fa40..a6d32bf0a85 100644
--- a/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt
+++ b/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt
@@ -1,9 +1,9 @@
GETH_VERSION: 1.12.0
-entry_point: ../../../contracts/solc/v0.8.15/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint.bin 2cb4bb2ba3efa8df3dfb0a57eb3727d17b68fe202682024fa7cfb4faf026833e
+entry_point: ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.bin 2cb4bb2ba3efa8df3dfb0a57eb3727d17b68fe202682024fa7cfb4faf026833e
greeter: ../../../contracts/solc/v0.8.15/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter.bin 653dcba5c33a46292073939ce1e639372cf521c0ec2814d4c9f20c72f796f18c
-greeter_wrapper: ../../../contracts/solc/v0.8.15/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter.bin 653dcba5c33a46292073939ce1e639372cf521c0ec2814d4c9f20c72f796f18c
-paymaster_wrapper: ../../../contracts/solc/v0.8.15/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster.bin 189ef817a5b7a6ff53ddf35b1988465b8aec479c47b77236fe20bf7e67d48100
+greeter_wrapper: ../../../contracts/solc/v0.8.15/Greeter/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter/Greeter.bin 653dcba5c33a46292073939ce1e639372cf521c0ec2814d4c9f20c72f796f18c
+paymaster_wrapper: ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.bin 189ef817a5b7a6ff53ddf35b1988465b8aec479c47b77236fe20bf7e67d48100
sca: ../../../contracts/solc/v0.8.15/SCA.abi ../../../contracts/solc/v0.8.15/SCA.bin ae0f860cdac87d4ac505edbd228bd3ea1108550453aba67aebcb61f09cf70d0b
-sca_wrapper: ../../../contracts/solc/v0.8.15/SCA.abi ../../../contracts/solc/v0.8.15/SCA.bin 2a8100fbdb41e6ce917ed333a624eaa4a8984b07e2d8d8ca6bba9bc9f74b05d7
-smart_contract_account_factory: ../../../contracts/solc/v0.8.15/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory.bin a44d6fa2dbf9cb3441d6d637d89e1cd656f28b6bf4146f58d508067474bf845b
-smart_contract_account_helper: ../../../contracts/solc/v0.8.15/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper.bin 22f960a74bd1581a12aa4f8f438a3f265f32f43682f5c1897ca50707b9982d56
+sca_wrapper: ../../../contracts/solc/v0.8.15/SCA/SCA.abi ../../../contracts/solc/v0.8.15/SCA/SCA.bin 2a8100fbdb41e6ce917ed333a624eaa4a8984b07e2d8d8ca6bba9bc9f74b05d7
+smart_contract_account_factory: ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.bin a44d6fa2dbf9cb3441d6d637d89e1cd656f28b6bf4146f58d508067474bf845b
+smart_contract_account_helper: ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.bin 22f960a74bd1581a12aa4f8f438a3f265f32f43682f5c1897ca50707b9982d56
diff --git a/core/gethwrappers/transmission/go_generate.go b/core/gethwrappers/transmission/go_generate.go
index 52182a11504..54c6ecf94ed 100644
--- a/core/gethwrappers/transmission/go_generate.go
+++ b/core/gethwrappers/transmission/go_generate.go
@@ -3,9 +3,9 @@
package gethwrappers
// Transmission
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter.bin Greeter greeter_wrapper
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory.bin SmartContractAccountFactory smart_contract_account_factory
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint.bin EntryPoint entry_point
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper.bin SmartContractAccountHelper smart_contract_account_helper
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SCA.abi ../../../contracts/solc/v0.8.15/SCA.bin SCA sca_wrapper
-//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster.bin Paymaster paymaster_wrapper
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Greeter/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter/Greeter.bin Greeter greeter_wrapper
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.bin SmartContractAccountFactory smart_contract_account_factory
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.bin EntryPoint entry_point
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.bin SmartContractAccountHelper smart_contract_account_helper
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SCA/SCA.abi ../../../contracts/solc/v0.8.15/SCA/SCA.bin SCA sca_wrapper
+//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.bin Paymaster paymaster_wrapper
diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go
index d47e6243b82..778ccbdb154 100644
--- a/core/internal/cltest/cltest.go
+++ b/core/internal/cltest/cltest.go
@@ -39,12 +39,12 @@ import (
"github.com/tidwall/gjson"
"github.com/urfave/cli"
+ "github.com/jmoiron/sqlx"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
- clienttypes "github.com/smartcontractkit/chainlink/v2/common/chains/client"
+ "github.com/smartcontractkit/chainlink/v2/common/client"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
commonmocks "github.com/smartcontractkit/chainlink/v2/common/types/mocks"
"github.com/smartcontractkit/chainlink/v2/core/assets"
@@ -191,7 +191,7 @@ func NewJobPipelineV2(t testing.TB, cfg pipeline.BridgeConfig, jpcfg JobPipeline
lggr := logger.TestLogger(t)
prm := pipeline.NewORM(db, lggr, dbCfg, jpcfg.MaxSuccessfulRuns())
btORM := bridges.NewORM(db, lggr, dbCfg)
- jrm := job.NewORM(db, legacyChains, prm, btORM, keyStore, lggr, dbCfg)
+ jrm := job.NewORM(db, prm, btORM, keyStore, lggr, dbCfg)
pr := pipeline.NewRunner(prm, btORM, jpcfg, cfg, legacyChains, keyStore.Eth(), keyStore.VRF(), lggr, restrictedHTTPClient, unrestrictedHTTPClient)
return JobPipelineV2TestHelper{
prm,
@@ -414,11 +414,10 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn
if cfg.CosmosEnabled() {
cosmosCfg := chainlink.CosmosFactoryConfig{
- Keystore: keyStore.Cosmos(),
- TOMLConfigs: cfg.CosmosConfigs(),
- EventBroadcaster: eventBroadcaster,
- DB: db,
- QConfig: cfg.Database(),
+ Keystore: keyStore.Cosmos(),
+ TOMLConfigs: cfg.CosmosConfigs(),
+ DB: db,
+ QConfig: cfg.Database(),
}
initOps = append(initOps, chainlink.InitCosmos(testCtx, relayerFactory, cosmosCfg))
}
@@ -519,7 +518,7 @@ func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmclimocks.Cl
c.On("Dial", mock.Anything).Maybe().Return(nil)
c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil)
c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil)
- c.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(clienttypes.Successful, nil)
+ c.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(client.Successful, nil)
// Construct chain
h2 := Head(2)
h1 := HeadWithHash(1, h2.ParentHash)
@@ -629,7 +628,7 @@ func (ta *TestApplication) NewHTTPClient(user *User) HTTPClientCleaner {
u, err := clsessions.NewUser(user.Email, Password, user.Role)
require.NoError(ta.t, err)
- err = ta.SessionORM().CreateUser(&u)
+ err = ta.BasicAdminUsersORM().CreateUser(&u)
require.NoError(ta.t, err)
sessionID := ta.MustSeedNewSession(user.Email)
diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go
index 85ffc6b02bd..a52b9a5d06b 100644
--- a/core/internal/cltest/factories.go
+++ b/core/internal/cltest/factories.go
@@ -21,7 +21,7 @@ import (
"github.com/urfave/cli"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
@@ -563,7 +563,7 @@ func MustInsertV2JobSpec(t *testing.T, db *sqlx.DB, transmitterAddress common.Ad
PipelineSpecID: pipelineSpec.ID,
}
- jorm := job.NewORM(db, nil, nil, nil, nil, logger.TestLogger(t), configtest.NewTestGeneralConfig(t).Database())
+ jorm := job.NewORM(db, nil, nil, nil, logger.TestLogger(t), configtest.NewTestGeneralConfig(t).Database())
err = jorm.InsertJob(&jb)
require.NoError(t, err)
return jb
@@ -619,7 +619,7 @@ func MustInsertKeeperJob(t *testing.T, db *sqlx.DB, korm keeper.ORM, from ethkey
tlg := logger.TestLogger(t)
prm := pipeline.NewORM(db, tlg, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
btORM := bridges.NewORM(db, tlg, cfg.Database())
- jrm := job.NewORM(db, nil, prm, btORM, nil, tlg, cfg.Database())
+ jrm := job.NewORM(db, prm, btORM, nil, tlg, cfg.Database())
err = jrm.InsertJob(&jb)
require.NoError(t, err)
return jb
diff --git a/core/internal/cltest/heavyweight/orm.go b/core/internal/cltest/heavyweight/orm.go
index 2f9370f35a6..5df28a49778 100644
--- a/core/internal/cltest/heavyweight/orm.go
+++ b/core/internal/cltest/heavyweight/orm.go
@@ -7,17 +7,18 @@ import (
"database/sql"
"errors"
"fmt"
- "math/rand"
"net/url"
"os"
"path"
"runtime"
+ "strings"
"testing"
+ "github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/cmd"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
@@ -30,21 +31,25 @@ import (
// FullTestDBV2 creates a pristine DB which runs in a separate database than the normal
// unit tests, so you can do things like use other Postgres connection types with it.
-func FullTestDBV2(t testing.TB, name string, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
- return prepareFullTestDBV2(t, name, false, true, overrideFn)
+func FullTestDBV2(t testing.TB, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
+ return prepareFullTestDBV2(t, false, true, overrideFn)
}
// FullTestDBNoFixturesV2 is the same as FullTestDB, but it does not load fixtures.
-func FullTestDBNoFixturesV2(t testing.TB, name string, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
- return prepareFullTestDBV2(t, name, false, false, overrideFn)
+func FullTestDBNoFixturesV2(t testing.TB, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
+ return prepareFullTestDBV2(t, false, false, overrideFn)
}
// FullTestDBEmptyV2 creates an empty DB (without migrations).
-func FullTestDBEmptyV2(t testing.TB, name string, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
- return prepareFullTestDBV2(t, name, true, false, overrideFn)
+func FullTestDBEmptyV2(t testing.TB, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
+ return prepareFullTestDBV2(t, true, false, overrideFn)
}
-func prepareFullTestDBV2(t testing.TB, name string, empty bool, loadFixtures bool, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
+func generateName() string {
+ return strings.ReplaceAll(uuid.New().String(), "-", "")
+}
+
+func prepareFullTestDBV2(t testing.TB, empty bool, loadFixtures bool, overrideFn func(c *chainlink.Config, s *chainlink.Secrets)) (chainlink.GeneralConfig, *sqlx.DB) {
testutils.SkipShort(t, "FullTestDB")
if empty && loadFixtures {
@@ -59,8 +64,7 @@ func prepareFullTestDBV2(t testing.TB, name string, empty bool, loadFixtures boo
})
require.NoError(t, os.MkdirAll(gcfg.RootDir(), 0700))
- name = fmt.Sprintf("%s_%x", name, rand.Intn(0xFFF)) // to avoid name collisions
- migrationTestDBURL, err := dropAndCreateThrowawayTestDB(gcfg.Database().URL(), name, empty)
+ migrationTestDBURL, err := dropAndCreateThrowawayTestDB(gcfg.Database().URL(), generateName(), empty)
require.NoError(t, err)
db, err := pg.NewConnection(migrationTestDBURL, dialects.Postgres, gcfg.Database())
require.NoError(t, err)
diff --git a/core/internal/cltest/job_factories.go b/core/internal/cltest/job_factories.go
index 77fee125e21..a9e403fb608 100644
--- a/core/internal/cltest/job_factories.go
+++ b/core/internal/cltest/job_factories.go
@@ -7,17 +7,15 @@ import (
"github.com/google/uuid"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
)
const (
@@ -66,9 +64,7 @@ func getORMs(t *testing.T, db *sqlx.DB) (jobORM job.ORM, pipelineORM pipeline.OR
lggr := logger.TestLogger(t)
pipelineORM = pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgeORM := bridges.NewORM(db, lggr, config.Database())
- cc := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(cc)
- jobORM = job.NewORM(db, legacyChains, pipelineORM, bridgeORM, keyStore, lggr, config.Database())
+ jobORM = job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, config.Database())
t.Cleanup(func() { jobORM.Close() })
return
}
diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go
index 9fdbcbb373d..540924d7f02 100644
--- a/core/internal/cltest/mocks.go
+++ b/core/internal/cltest/mocks.go
@@ -11,7 +11,7 @@ import (
"testing"
"time"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
@@ -309,7 +309,7 @@ func MustRandomUser(t testing.TB) sessions.User {
return r
}
-func NewUserWithSession(t testing.TB, orm sessions.ORM) sessions.User {
+func NewUserWithSession(t testing.TB, orm sessions.AuthenticationProvider) sessions.User {
u := MustRandomUser(t)
require.NoError(t, orm.CreateUser(&u))
@@ -330,7 +330,7 @@ func NewMockAPIInitializer(t testing.TB) *MockAPIInitializer {
return &MockAPIInitializer{t: t}
}
-func (m *MockAPIInitializer) Initialize(orm sessions.ORM, lggr logger.Logger) (sessions.User, error) {
+func (m *MockAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) {
if user, err := orm.FindUser(APIEmailAdmin); err == nil {
return user, err
}
diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go
index 058c8325b9a..b5f42d8bf3e 100644
--- a/core/internal/features/features_test.go
+++ b/core/internal/features/features_test.go
@@ -237,8 +237,7 @@ observationSource = """
pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
bridgeORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database())
- legacyChains := app.GetRelayers().LegacyEVMChains()
- jobORM := job.NewORM(app.GetSqlxDB(), legacyChains, pipelineORM, bridgeORM, app.KeyStore, logger.TestLogger(t), cfg.Database())
+ jobORM := job.NewORM(app.GetSqlxDB(), pipelineORM, bridgeORM, app.KeyStore, logger.TestLogger(t), cfg.Database())
runs := cltest.WaitForPipelineComplete(t, 0, jobID, 1, 2, jobORM, 5*time.Second, 300*time.Millisecond)
require.Len(t, runs, 1)
@@ -267,7 +266,7 @@ func TestIntegration_AuthToken(t *testing.T) {
mockUser := cltest.MustRandomUser(t)
key, secret := uuid.New().String(), uuid.New().String()
apiToken := auth.Token{AccessKey: key, Secret: secret}
- orm := app.SessionORM()
+ orm := app.AuthenticationProvider()
require.NoError(t, orm.CreateUser(&mockUser))
require.NoError(t, orm.SetAuthToken(&mockUser, &apiToken))
@@ -676,11 +675,11 @@ func setupOCRContracts(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBac
return owner, b, ocrContractAddress, ocrContract, flagsContract, flagsContractAddress
}
-func setupNode(t *testing.T, owner *bind.TransactOpts, portV1, portV2 int, dbName string,
+func setupNode(t *testing.T, owner *bind.TransactOpts, portV1, portV2 int,
b *backends.SimulatedBackend, ns ocrnetworking.NetworkingStack, overrides func(c *chainlink.Config, s *chainlink.Secrets),
) (*cltest.TestApplication, string, common.Address, ocrkey.KeyV2) {
p2pKey := keystest.NewP2PKeyV2(t)
- config, _ := heavyweight.FullTestDBV2(t, dbName, func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test.
c.OCR.Enabled = ptr(true)
@@ -749,7 +748,6 @@ func setupForwarderEnabledNode(
owner *bind.TransactOpts,
portV1,
portV2 int,
- dbName string,
b *backends.SimulatedBackend,
ns ocrnetworking.NetworkingStack,
overrides func(c *chainlink.Config, s *chainlink.Secrets),
@@ -761,7 +759,7 @@ func setupForwarderEnabledNode(
ocrkey.KeyV2,
) {
p2pKey := keystest.NewP2PKeyV2(t)
- config, _ := heavyweight.FullTestDBV2(t, dbName, func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test.
c.OCR.Enabled = ptr(true)
@@ -868,7 +866,7 @@ func TestIntegration_OCR(t *testing.T) {
// Note it's plausible these ports could be occupied on a CI machine.
// May need a port randomize + retry approach if we observe collisions.
- appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV1, bootstrapNodePortV2, fmt.Sprintf("b_%d", test.id), b, test.ns, nil)
+ appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV1, bootstrapNodePortV2, b, test.ns, nil)
var (
oracles []confighelper.OracleIdentityExtra
transmitters []common.Address
@@ -879,7 +877,7 @@ func TestIntegration_OCR(t *testing.T) {
for i := 0; i < numOracles; i++ {
portV1 := ports[2*i]
portV2 := ports[2*i+1]
- app, peerID, transmitter, key := setupNode(t, owner, portV1, portV2, fmt.Sprintf("o%d_%d", i, test.id), b, test.ns, func(c *chainlink.Config, s *chainlink.Secrets) {
+ app, peerID, transmitter, key := setupNode(t, owner, portV1, portV2, b, test.ns, func(c *chainlink.Config, s *chainlink.Secrets) {
c.EVM[0].FlagsContractAddress = ptr(ethkey.EIP55AddressFromAddress(flagsContractAddress))
c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(test.eip1559)
if test.ns != ocrnetworking.NetworkingStackV1 {
@@ -1093,7 +1091,7 @@ func TestIntegration_OCR_ForwarderFlow(t *testing.T) {
// Note it's plausible these ports could be occupied on a CI machine.
// May need a port randomize + retry approach if we observe collisions.
- appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV1, bootstrapNodePortV2, fmt.Sprintf("b_%d", 1), b, ocrnetworking.NetworkingStackV2, nil)
+ appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV1, bootstrapNodePortV2, b, ocrnetworking.NetworkingStackV2, nil)
var (
oracles []confighelper.OracleIdentityExtra
@@ -1106,7 +1104,7 @@ func TestIntegration_OCR_ForwarderFlow(t *testing.T) {
for i := 0; i < numOracles; i++ {
portV1 := ports[2*i]
portV2 := ports[2*i+1]
- app, peerID, transmitter, forwarder, key := setupForwarderEnabledNode(t, owner, portV1, portV2, fmt.Sprintf("o%d_%d", i, 1), b, ocrnetworking.NetworkingStackV2, func(c *chainlink.Config, s *chainlink.Secrets) {
+ app, peerID, transmitter, forwarder, key := setupForwarderEnabledNode(t, owner, portV1, portV2, b, ocrnetworking.NetworkingStackV2, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Feature.LogPoller = ptr(true)
c.EVM[0].FlagsContractAddress = ptr(ethkey.EIP55AddressFromAddress(flagsContractAddress))
c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true)
diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go
index 25b6781c4e9..3e220935685 100644
--- a/core/internal/features/ocr2/features_ocr2_test.go
+++ b/core/internal/features/ocr2/features_ocr2_test.go
@@ -105,13 +105,12 @@ func setupNodeOCR2(
t *testing.T,
owner *bind.TransactOpts,
port int,
- dbName string,
useForwarder bool,
b *backends.SimulatedBackend,
p2pV2Bootstrappers []commontypes.BootstrapperLocator,
) *ocr2Node {
p2pKey := keystest.NewP2PKeyV2(t)
- config, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", dbName, port), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test.
c.Feature.LogPoller = ptr(true)
@@ -193,7 +192,7 @@ func TestIntegration_OCR2(t *testing.T) {
lggr := logger.TestLogger(t)
bootstrapNodePort := freeport.GetOne(t)
- bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, "bootstrap", false /* useForwarders */, b, nil)
+ bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, false /* useForwarders */, b, nil)
var (
oracles []confighelper2.OracleIdentityExtra
@@ -203,7 +202,7 @@ func TestIntegration_OCR2(t *testing.T) {
)
ports := freeport.GetN(t, 4)
for i := 0; i < 4; i++ {
- node := setupNodeOCR2(t, owner, ports[i], fmt.Sprintf("oracle%d", i), false /* useForwarders */, b, []commontypes.BootstrapperLocator{
+ node := setupNodeOCR2(t, owner, ports[i], false /* useForwarders */, b, []commontypes.BootstrapperLocator{
// Supply the bootstrap IP and port as a V2 peer address
{PeerID: bootstrapNode.peerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}},
})
@@ -477,7 +476,7 @@ func TestIntegration_OCR2_ForwarderFlow(t *testing.T) {
lggr := logger.TestLogger(t)
bootstrapNodePort := freeport.GetOne(t)
- bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, "bootstrap", true /* useForwarders */, b, nil)
+ bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, true /* useForwarders */, b, nil)
var (
oracles []confighelper2.OracleIdentityExtra
@@ -488,7 +487,7 @@ func TestIntegration_OCR2_ForwarderFlow(t *testing.T) {
)
ports := freeport.GetN(t, 4)
for i := uint16(0); i < 4; i++ {
- node := setupNodeOCR2(t, owner, ports[i], fmt.Sprintf("oracle%d", i), true /* useForwarders */, b, []commontypes.BootstrapperLocator{
+ node := setupNodeOCR2(t, owner, ports[i], true /* useForwarders */, b, []commontypes.BootstrapperLocator{
// Supply the bootstrap IP and port as a V2 peer address
{PeerID: bootstrapNode.peerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}},
})
diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go
index ec656509afd..48f8e12dac3 100644
--- a/core/internal/mocks/application.go
+++ b/core/internal/mocks/application.go
@@ -31,7 +31,7 @@ import (
sessions "github.com/smartcontractkit/chainlink/v2/core/sessions"
- sqlx "github.com/smartcontractkit/sqlx"
+ sqlx "github.com/jmoiron/sqlx"
txmgr "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
@@ -63,6 +63,38 @@ func (_m *Application) AddJobV2(ctx context.Context, _a1 *job.Job) error {
return r0
}
+// AuthenticationProvider provides a mock function with given fields:
+func (_m *Application) AuthenticationProvider() sessions.AuthenticationProvider {
+ ret := _m.Called()
+
+ var r0 sessions.AuthenticationProvider
+ if rf, ok := ret.Get(0).(func() sessions.AuthenticationProvider); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(sessions.AuthenticationProvider)
+ }
+ }
+
+ return r0
+}
+
+// BasicAdminUsersORM provides a mock function with given fields:
+func (_m *Application) BasicAdminUsersORM() sessions.BasicAdminUsersORM {
+ ret := _m.Called()
+
+ var r0 sessions.BasicAdminUsersORM
+ if rf, ok := ret.Get(0).(func() sessions.BasicAdminUsersORM); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(sessions.BasicAdminUsersORM)
+ }
+ }
+
+ return r0
+}
+
// BridgeORM provides a mock function with given fields:
func (_m *Application) BridgeORM() bridges.ORM {
ret := _m.Called()
@@ -439,22 +471,6 @@ func (_m *Application) SecretGenerator() chainlink.SecretGenerator {
return r0
}
-// SessionORM provides a mock function with given fields:
-func (_m *Application) SessionORM() sessions.ORM {
- ret := _m.Called()
-
- var r0 sessions.ORM
- if rf, ok := ret.Get(0).(func() sessions.ORM); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(sessions.ORM)
- }
- }
-
- return r0
-}
-
// SetLogLevel provides a mock function with given fields: lvl
func (_m *Application) SetLogLevel(lvl zapcore.Level) error {
ret := _m.Called(lvl)
diff --git a/core/internal/testutils/evmtest/evmtest.go b/core/internal/testutils/evmtest/evmtest.go
index 3a08c815166..80237d218d7 100644
--- a/core/internal/testutils/evmtest/evmtest.go
+++ b/core/internal/testutils/evmtest/evmtest.go
@@ -9,8 +9,8 @@ import (
"testing"
"github.com/ethereum/go-ethereum"
+ "github.com/jmoiron/sqlx"
"github.com/pelletier/go-toml/v2"
- "github.com/smartcontractkit/sqlx"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
diff --git a/core/internal/testutils/pgtest/pgtest.go b/core/internal/testutils/pgtest/pgtest.go
index 283326de85f..1900fcc62b3 100644
--- a/core/internal/testutils/pgtest/pgtest.go
+++ b/core/internal/testutils/pgtest/pgtest.go
@@ -5,8 +5,8 @@ import (
"testing"
"github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
"github.com/scylladb/go-reflectx"
- "github.com/smartcontractkit/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/core/internal/testutils/pgtest/txdb.go b/core/internal/testutils/pgtest/txdb.go
index 598a5dddc55..da9fd6cb2d0 100644
--- a/core/internal/testutils/pgtest/txdb.go
+++ b/core/internal/testutils/pgtest/txdb.go
@@ -12,7 +12,7 @@ import (
"sync"
"testing"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
diff --git a/core/internal/testutils/pgtest/txdb_test.go b/core/internal/testutils/pgtest/txdb_test.go
index 71960c6150a..c1aeef4b8c2 100644
--- a/core/internal/testutils/pgtest/txdb_test.go
+++ b/core/internal/testutils/pgtest/txdb_test.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/google/uuid"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
)
diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go
index 79c86f0c5f8..6ffd873d092 100644
--- a/core/internal/testutils/testutils.go
+++ b/core/internal/testutils/testutils.go
@@ -27,7 +27,7 @@ import (
"github.com/tidwall/gjson"
"go.uber.org/zap/zaptest/observer"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/core/scripts/chaincli/command/keeper/verifiable_load.go b/core/scripts/chaincli/command/keeper/verifiable_load.go
index 7d77f0d3a37..33acf9bf3b2 100644
--- a/core/scripts/chaincli/command/keeper/verifiable_load.go
+++ b/core/scripts/chaincli/command/keeper/verifiable_load.go
@@ -1,6 +1,8 @@
package keeper
import (
+ "log"
+
"github.com/spf13/cobra"
"github.com/smartcontractkit/chainlink/core/scripts/chaincli/config"
@@ -15,6 +17,14 @@ var verifiableLoad = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
cfg := config.New()
hdlr := handler.NewKeeper(cfg)
- hdlr.GetVerifiableLoadStats(cmd.Context())
+ csv, err := cmd.Flags().GetBool("csv")
+ if err != nil {
+ log.Fatal("failed to get verify flag: ", err)
+ }
+ hdlr.GetVerifiableLoadStats(cmd.Context(), csv)
},
}
+
+func init() {
+ verifiableLoad.Flags().BoolP("csv", "c", false, "Specify if stats should be output as CSV")
+}
diff --git a/core/scripts/chaincli/handler/debug.go b/core/scripts/chaincli/handler/debug.go
index 7cf801d3326..daf012ee16e 100644
--- a/core/scripts/chaincli/handler/debug.go
+++ b/core/scripts/chaincli/handler/debug.go
@@ -275,13 +275,21 @@ func (k *Keeper) Debug(ctx context.Context, args []string) {
if err != nil {
failUnknown("failed to execute mercury callback ", err)
}
+ if callbackResult.UpkeepFailureReason != 0 {
+ message(fmt.Sprintf("checkCallback failed with UpkeepFailureReason %d", checkResult.UpkeepFailureReason))
+ }
upkeepNeeded, performData = callbackResult.UpkeepNeeded, callbackResult.PerformData
- // do tenderly simulation
+ // do tenderly simulations
rawCall, err := core.RegistryABI.Pack("checkCallback", upkeepID, values, streamsLookup.extraData)
if err != nil {
- failUnknown("failed to pack raw checkUpkeep call", err)
+ failUnknown("failed to pack raw checkCallback call", err)
}
addLink("checkCallback simulation", tenderlySimLink(k.cfg, chainID, blockNum, rawCall, registryAddress))
+ rawCall, err = core.StreamsCompatibleABI.Pack("checkCallback", values, streamsLookup.extraData)
+ if err != nil {
+ failUnknown("failed to pack raw checkCallback (direct) call", err)
+ }
+ addLink("checkCallback (direct) simulation", tenderlySimLink(k.cfg, chainID, blockNum, rawCall, upkeepInfo.Target))
} else {
message("did not revert with StreamsLookup error")
}
diff --git a/core/scripts/chaincli/handler/keeper_verifiable_load.go b/core/scripts/chaincli/handler/keeper_verifiable_load.go
index 429a7620079..b71a9af3387 100644
--- a/core/scripts/chaincli/handler/keeper_verifiable_load.go
+++ b/core/scripts/chaincli/handler/keeper_verifiable_load.go
@@ -2,6 +2,7 @@ package handler
import (
"context"
+ "fmt"
"log"
"math/big"
"sort"
@@ -57,7 +58,7 @@ type upkeepStats struct {
SortedAllDelays []float64
}
-func (k *Keeper) GetVerifiableLoadStats(ctx context.Context) {
+func (k *Keeper) GetVerifiableLoadStats(ctx context.Context, csv bool) {
var v verifiableLoad
var err error
addr := common.HexToAddress(k.cfg.VerifiableLoadContractAddress)
@@ -84,6 +85,10 @@ func (k *Keeper) GetVerifiableLoadStats(ctx context.Context) {
log.Fatalf("failed to get active upkeep IDs from %s: %v", k.cfg.VerifiableLoadContractAddress, err)
}
+ if csv {
+ fmt.Println("upkeep ID,total performs,p50,p90,p95,p99,max delay,total delay blocks,average perform delay")
+ }
+
us := &upkeepStats{BlockNumber: blockNum}
resultsChan := make(chan *upkeepInfo, maxUpkeepNum)
@@ -94,7 +99,7 @@ func (k *Keeper) GetVerifiableLoadStats(ctx context.Context) {
// create a number of workers to process the upkeep ids in batch
for i := 0; i < workerNum; i++ {
wg.Add(1)
- go k.getUpkeepInfo(idChan, resultsChan, v, opts, &wg)
+ go k.getUpkeepInfo(idChan, resultsChan, v, opts, &wg, csv)
}
for _, id := range upkeepIds {
@@ -120,12 +125,16 @@ func (k *Keeper) GetVerifiableLoadStats(ctx context.Context) {
p90, _ := stats.Percentile(us.SortedAllDelays, 90)
p95, _ := stats.Percentile(us.SortedAllDelays, 95)
p99, _ := stats.Percentile(us.SortedAllDelays, 99)
- maxDelay := us.SortedAllDelays[len(us.SortedAllDelays)-1]
+
+ maxDelay := float64(0)
+ if len(us.SortedAllDelays) > 0 {
+ maxDelay = us.SortedAllDelays[len(us.SortedAllDelays)-1]
+ }
log.Printf("For total %d upkeeps: total performs: %d, p50: %f, p90: %f, p95: %f, p99: %f, max delay: %f, total delay blocks: %f, average perform delay: %f\n", len(upkeepIds), us.TotalPerforms, p50, p90, p95, p99, maxDelay, us.TotalDelayBlock, us.TotalDelayBlock/float64(us.TotalPerforms))
log.Printf("All STATS ABOVE ARE CALCULATED AT BLOCK %d", blockNum)
}
-func (k *Keeper) getUpkeepInfo(idChan chan *big.Int, resultsChan chan *upkeepInfo, v verifiableLoad, opts *bind.CallOpts, wg *sync.WaitGroup) {
+func (k *Keeper) getUpkeepInfo(idChan chan *big.Int, resultsChan chan *upkeepInfo, v verifiableLoad, opts *bind.CallOpts, wg *sync.WaitGroup, csv bool) {
defer wg.Done()
for id := range idChan {
@@ -171,9 +180,18 @@ func (k *Keeper) getUpkeepInfo(idChan chan *big.Int, resultsChan chan *upkeepInf
p90, _ := stats.Percentile(info.SortedAllDelays, 90)
p95, _ := stats.Percentile(info.SortedAllDelays, 95)
p99, _ := stats.Percentile(info.SortedAllDelays, 99)
- maxDelay := info.SortedAllDelays[len(info.SortedAllDelays)-1]
- log.Printf("upkeep ID %s has %d performs in total. p50: %f, p90: %f, p95: %f, p99: %f, max delay: %f, total delay blocks: %d, average perform delay: %f\n", id, info.TotalPerforms, p50, p90, p95, p99, maxDelay, uint64(info.TotalDelayBlock), info.TotalDelayBlock/float64(info.TotalPerforms))
+ maxDelay := float64(0)
+
+ if len(info.SortedAllDelays) > 0 {
+ maxDelay = info.SortedAllDelays[len(info.SortedAllDelays)-1]
+ }
+
+ if csv {
+ fmt.Printf("%s,%d,%f,%f,%f,%f,%f,%d,%f\n", id, info.TotalPerforms, p50, p90, p95, p99, maxDelay, uint64(info.TotalDelayBlock), info.TotalDelayBlock/float64(info.TotalPerforms))
+ } else {
+ log.Printf("upkeep ID %s has %d performs in total. p50: %f, p90: %f, p95: %f, p99: %f, max delay: %f, total delay blocks: %d, average perform delay: %f\n", id, info.TotalPerforms, p50, p90, p95, p99, maxDelay, uint64(info.TotalDelayBlock), info.TotalDelayBlock/float64(info.TotalPerforms))
+ }
resultsChan <- info
}
}
diff --git a/core/scripts/common/helpers.go b/core/scripts/common/helpers.go
index d03dcec097f..c141e8a29c4 100644
--- a/core/scripts/common/helpers.go
+++ b/core/scripts/common/helpers.go
@@ -219,6 +219,11 @@ func explorerLinkPrefix(chainID int64) (prefix string) {
case 8453:
prefix = "https://basescan.org"
+ case 280: // zkSync Goerli testnet
+ prefix = "https://goerli.explorer.zksync.io"
+ case 324: // zkSync mainnet
+ prefix = "https://explorer.zksync.io"
+
default: // Unknown chain, return prefix as-is
prefix = ""
}
diff --git a/core/scripts/common/vrf/model/model.go b/core/scripts/common/vrf/model/model.go
index bd0e3bbe364..42deb424536 100644
--- a/core/scripts/common/vrf/model/model.go
+++ b/core/scripts/common/vrf/model/model.go
@@ -44,3 +44,8 @@ type ContractAddresses struct {
CoordinatorAddress common.Address
BatchCoordinatorAddress common.Address
}
+
+type VRFKeyRegistrationConfig struct {
+ VRFKeyUncompressedPubKey string
+ RegisterAgainstAddress string
+}
diff --git a/core/scripts/common/vrf/setup-envs/README.md b/core/scripts/common/vrf/setup-envs/README.md
index 33515338a24..f3b391f0eed 100644
--- a/core/scripts/common/vrf/setup-envs/README.md
+++ b/core/scripts/common/vrf/setup-envs/README.md
@@ -35,7 +35,9 @@ go run . \
--min-confs=3 \
--num-eth-keys=1 \
--num-vrf-keys=1 \
---sending-key-funding-amount="1e17"
+--sending-key-funding-amount="1e17" \
+--register-vrf-key-against-address=
```
Optional parameters - will not be deployed if specified (NOT WORKING YET)
diff --git a/core/scripts/common/vrf/setup-envs/main.go b/core/scripts/common/vrf/setup-envs/main.go
index 6748408f476..7c2530ffd47 100644
--- a/core/scripts/common/vrf/setup-envs/main.go
+++ b/core/scripts/common/vrf/setup-envs/main.go
@@ -85,6 +85,8 @@ func main() {
batchBHSAddressString := flag.String("batch-bhs-address", "", "address of Batch BHS contract")
coordinatorAddressString := flag.String("coordinator-address", "", "address of VRF Coordinator contract")
batchCoordinatorAddressString := flag.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract")
+ registerVRFKeyAgainstAddress := flag.String("register-vrf-key-against-address", "", "VRF Key registration against address - "+
+ "from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments")
e := helpers.SetupEnv(false)
flag.Parse()
@@ -171,6 +173,11 @@ func main() {
BatchCoordinatorAddress: common.HexToAddress(*batchCoordinatorAddressString),
}
+ vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{
+ VRFKeyUncompressedPubKey: nodesMap[model.VRFPrimaryNodeName].VrfKeys[0],
+ RegisterAgainstAddress: *registerVRFKeyAgainstAddress,
+ }
+
var jobSpecs model.JobSpecs
switch *vrfVersion {
@@ -188,10 +195,10 @@ func main() {
}
coordinatorConfigV2 := v2scripts.CoordinatorConfigV2{
- MinConfs: minConfs,
- MaxGasLimit: &constants.MaxGasLimit,
- StalenessSeconds: &constants.StalenessSeconds,
- GasAfterPayment: &constants.GasAfterPayment,
+ MinConfs: *minConfs,
+ MaxGasLimit: constants.MaxGasLimit,
+ StalenessSeconds: constants.StalenessSeconds,
+ GasAfterPayment: constants.GasAfterPayment,
FallbackWeiPerUnitLink: constants.FallbackWeiPerUnitLink,
FeeConfig: feeConfigV2,
}
@@ -199,7 +206,7 @@ func main() {
jobSpecs = v2scripts.VRFV2DeployUniverse(
e,
subscriptionBalanceJuels,
- &nodesMap[model.VRFPrimaryNodeName].VrfKeys[0],
+ vrfKeyRegistrationConfig,
contractAddresses,
coordinatorConfigV2,
*batchFulfillmentEnabled,
@@ -211,10 +218,10 @@ func main() {
FulfillmentFlatFeeNativePPM: uint32(constants.FlatFeeNativePPM),
}
coordinatorConfigV2Plus := v2plusscripts.CoordinatorConfigV2Plus{
- MinConfs: minConfs,
- MaxGasLimit: &constants.MaxGasLimit,
- StalenessSeconds: &constants.StalenessSeconds,
- GasAfterPayment: &constants.GasAfterPayment,
+ MinConfs: *minConfs,
+ MaxGasLimit: constants.MaxGasLimit,
+ StalenessSeconds: constants.StalenessSeconds,
+ GasAfterPayment: constants.GasAfterPayment,
FallbackWeiPerUnitLink: constants.FallbackWeiPerUnitLink,
FeeConfig: feeConfigV2Plus,
}
@@ -223,7 +230,7 @@ func main() {
e,
subscriptionBalanceJuels,
subscriptionBalanceNativeWei,
- &nodesMap[model.VRFPrimaryNodeName].VrfKeys[0],
+ vrfKeyRegistrationConfig,
contractAddresses,
coordinatorConfigV2Plus,
*batchFulfillmentEnabled,
diff --git a/core/scripts/functions/templates/oracle.toml b/core/scripts/functions/templates/oracle.toml
index 4739252d68e..d21fe4a5e87 100644
--- a/core/scripts/functions/templates/oracle.toml
+++ b/core/scripts/functions/templates/oracle.toml
@@ -36,6 +36,7 @@ requestTimeoutSec = 300
maxRequestSizesList = [30_720, 51_200, 102_400, 204_800, 512_000, 1_048_576, 2_097_152, 3_145_728, 5_242_880, 10_485_760]
maxSecretsSizesList = [10_240, 20_480, 51_200, 102_400, 307_200, 512_000, 1_048_576, 2_097_152]
minimumSubscriptionBalance = "2 link"
+pastBlocksToPoll = 25
[pluginConfig.OnchainAllowlist]
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 690a8d189cc..eb41312a6ab 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -13,6 +13,7 @@ require (
github.com/ethereum/go-ethereum v1.12.0
github.com/google/go-cmp v0.5.9
github.com/google/uuid v1.3.1
+ github.com/jmoiron/sqlx v1.3.5
github.com/joho/godotenv v1.4.0
github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f
github.com/montanaflynn/stats v0.7.1
@@ -21,10 +22,9 @@ require (
github.com/pkg/errors v0.9.1
github.com/shopspring/decimal v1.3.1
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
- github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545
- github.com/smartcontractkit/ocr2keepers v0.7.27
+ github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7
+ github.com/smartcontractkit/ocr2keepers v0.7.28
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687
- github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.4
@@ -44,6 +44,7 @@ require (
filippo.io/edwards25519 v1.0.0 // indirect
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
github.com/99designs/keyring v1.2.1 // indirect
+ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect
github.com/CosmWasm/wasmd v0.40.1 // indirect
github.com/CosmWasm/wasmvm v1.2.4 // indirect
@@ -105,7 +106,7 @@ require (
github.com/fatih/color v1.15.0 // indirect
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
- github.com/fxamacker/cbor/v2 v2.4.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.5.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gagliardetto/binary v0.7.1 // indirect
github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 // indirect
@@ -119,8 +120,10 @@ require (
github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
+ github.com/go-ldap/ldap/v3 v3.4.5 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -129,13 +132,13 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
- github.com/go-webauthn/revoke v0.1.9 // indirect
- github.com/go-webauthn/webauthn v0.8.2 // indirect
+ github.com/go-webauthn/webauthn v0.8.6 // indirect
+ github.com/go-webauthn/x v0.1.4 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.3 // indirect
- github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
@@ -143,7 +146,7 @@ require (
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
- github.com/google/go-tpm v0.3.3 // indirect
+ github.com/google/go-tpm v0.9.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
@@ -196,7 +199,6 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect
- github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.0 // indirect
@@ -251,7 +253,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
github.com/minio/sha256-simd v0.1.1 // indirect
@@ -286,9 +288,9 @@ require (
github.com/pressly/goose/v3 v3.15.1 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/prometheus/prometheus v0.46.0 // indirect
+ github.com/prometheus/prometheus v0.47.2 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.3 // indirect
@@ -301,8 +303,8 @@ require (
github.com/shirou/gopsutil/v3 v3.23.9 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect
- github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0 // indirect
- github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111 // indirect
+ github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255 // indirect
+ github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a // indirect
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
@@ -322,7 +324,7 @@ require (
github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect
github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a // indirect
github.com/tidwall/btree v1.6.0 // indirect
- github.com/tidwall/gjson v1.16.0 // indirect
+ github.com/tidwall/gjson v1.17.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -364,7 +366,7 @@ require (
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.14.0 // indirect
- gonum.org/v1/gonum v0.13.0 // indirect
+ gonum.org/v1/gonum v0.14.0 // indirect
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 5cbdb37427d..35e85fe2c97 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -79,6 +79,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOv
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -124,6 +126,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@@ -387,8 +391,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
-github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
+github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0=
@@ -424,6 +428,8 @@ github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
+github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
+github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -438,6 +444,8 @@ github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEai
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-ldap/ldap/v3 v3.4.5 h1:ekEKmaDrpvR2yf5Nc/DClsGG9lAmdDixe44mLzlW5r8=
+github.com/go-ldap/ldap/v3 v3.4.5/go.mod h1:bMGIq3AGbytbaMwf8wdv5Phdxz0FWHTIYMSzyrYgnQs=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@@ -470,10 +478,10 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/go-webauthn/revoke v0.1.9 h1:gSJ1ckA9VaKA2GN4Ukp+kiGTk1/EXtaDb1YE8RknbS0=
-github.com/go-webauthn/revoke v0.1.9/go.mod h1:j6WKPnv0HovtEs++paan9g3ar46gm1NarktkXBaPR+w=
-github.com/go-webauthn/webauthn v0.8.2 h1:8KLIbpldjz9KVGHfqEgJNbkhd7bbRXhNw4QWFJE15oA=
-github.com/go-webauthn/webauthn v0.8.2/go.mod h1:d+ezx/jMCNDiqSMzOchuynKb9CVU1NM9BumOnokfcVQ=
+github.com/go-webauthn/webauthn v0.8.6 h1:bKMtL1qzd2WTFkf1mFTVbreYrwn7dsYmEPjTq6QN90E=
+github.com/go-webauthn/webauthn v0.8.6/go.mod h1:emwVLMCI5yx9evTTvr0r+aOZCdWJqMfbRhF0MufyUog=
+github.com/go-webauthn/x v0.1.4 h1:sGmIFhcY70l6k7JIDfnjVBiAAFEssga5lXIUXe0GtAs=
+github.com/go-webauthn/x v0.1.4/go.mod h1:75Ug0oK6KYpANh5hDOanfDI+dvPWHk788naJVG/37H8=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
@@ -491,9 +499,12 @@ github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q8
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
+github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
@@ -559,12 +570,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI=
-github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw=
-github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo=
-github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4=
-github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0=
-github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4=
+github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk=
+github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -618,7 +625,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -1172,8 +1178,8 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJK
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
@@ -1380,16 +1386,16 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
-github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw=
-github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4=
+github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI=
+github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
@@ -1456,10 +1462,10 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0 h1:YrJ3moRDu2kgdv4o3Hym/FWVF4MS5cIZ7o7wk+43pvk=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0/go.mod h1:fxtwgVZzTgoU1CpdSxNvFXecIY2r8DhH2JCzPO4e9G0=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111 h1:CElKhWq0WIa9Rmg5Ssajs5Hp3m3u/nYIQdXtpj2gbcc=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255 h1:Pt6c7bJU9wIN6PQQnmN8UmYYH6lpfiQ6U/B8yEC2s5s=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255/go.mod h1:EHppaccd/LTlTMI2o4dmBHe4BknEgEFFDjDGMNuGb3k=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a h1:G/pD8uI1PULRJU8Y3eLLzjqQBp9ruG9hj+wWxtyrgTo=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 h1:DaPSVnxe7oz1QJ+AVIhQWs1W3ubQvwvGo9NbHpMs1OQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8=
@@ -1468,14 +1474,12 @@ github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545 h1:qOsw2ETQD/Sb/W2xuYn2KPWjvvsWA0C+l19rWFq8iNg=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
-github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves=
-github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7 h1:21V61XOYSxpFmFqlhr5IaEh1uQ1F6CewJ30D/U/P34c=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
+github.com/smartcontractkit/ocr2keepers v0.7.28 h1:dufAiYl4+uly9aH0+6GkS2jYzHGujq7tg0LYQE+x6JU=
+github.com/smartcontractkit/ocr2keepers v0.7.28/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb/go.mod h1:HNUu4cJekUdsJbwRBCiOybtkPJEfGRELQPe2tkoDEyk=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg=
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ=
@@ -1499,7 +1503,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
@@ -1510,7 +1513,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
@@ -1557,8 +1559,8 @@ github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47
github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
-github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg=
-github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
+github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
@@ -1745,6 +1747,7 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1785,6 +1788,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1804,7 +1808,6 @@ golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1846,6 +1849,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1858,8 +1862,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
-golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
+golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1872,6 +1876,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1945,7 +1950,6 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1975,6 +1979,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1987,6 +1992,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2063,6 +2069,7 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2074,8 +2081,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
-gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
+gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
+gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2161,7 +2168,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
diff --git a/core/scripts/vrfv2/testnet/README.md b/core/scripts/vrfv2/testnet/README.md
index 1b2d986f554..b527c576fd0 100644
--- a/core/scripts/vrfv2/testnet/README.md
+++ b/core/scripts/vrfv2/testnet/README.md
@@ -57,11 +57,19 @@ To deploy a full VRF environment on-chain, run:
```shell
go run . deploy-universe \
---sending-key-funding-amount 100000000000000000 \
---subscription-balance=10000000000000000000 \
+--subscription-balance=5000000000000000000 \ #5 LINK
--uncompressed-pub-key= \
---vrf-primary-node-sending-keys="" \
---batch-fulfillment-enabled false
+--vrf-primary-node-sending-keys="" \ #used to fund the keys and for sample VRF Job Spec generation
+--sending-key-funding-amount 100000000000000000 \ #0.1 ETH, fund addresses specified in vrf-primary-node-sending-keys
+--batch-fulfillment-enabled false \ #only used for sample VRF Job Spec generation
+--register-vrf-key-against-address=<"from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments>
+```
+```shell
+go run . deploy-universe \
+--subscription-balance=5000000000000000000 \
+--uncompressed-pub-key="0xf3706e247a7b205c8a8bd25a6e8c4650474da496151371085d45beeead27e568c1a5e8330c7fa718f8a31226efbff6632ed6f8ed470b637aa9be2b948e9dcef6" \
+--batch-fulfillment-enabled false \
+--register-vrf-key-against-address="0x23b5613fc04949F4A53d1cc8d6BCCD21ffc38C11"
```
## Deploying the Consumer Contract
diff --git a/core/scripts/vrfv2/testnet/main.go b/core/scripts/vrfv2/testnet/main.go
index 5b216776bd9..677c0b105ea 100644
--- a/core/scripts/vrfv2/testnet/main.go
+++ b/core/scripts/vrfv2/testnet/main.go
@@ -21,7 +21,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
helpers "github.com/smartcontractkit/chainlink/core/scripts/common"
"github.com/smartcontractkit/chainlink/v2/core/assets"
diff --git a/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go b/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go
index f5e37005690..b623ae63084 100644
--- a/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go
+++ b/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go
@@ -5,10 +5,6 @@ import (
"encoding/hex"
"flag"
"fmt"
- "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/constants"
- "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/jobs"
- "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/model"
- "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/util"
"math/big"
"os"
"strings"
@@ -18,6 +14,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/shopspring/decimal"
+ "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/constants"
+ "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/jobs"
+ "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/model"
+ "github.com/smartcontractkit/chainlink/core/scripts/common/vrf/util"
+
helpers "github.com/smartcontractkit/chainlink/core/scripts/common"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2"
@@ -25,10 +26,10 @@ import (
)
type CoordinatorConfigV2 struct {
- MinConfs *int
- MaxGasLimit *int64
- StalenessSeconds *int64
- GasAfterPayment *int64
+ MinConfs int
+ MaxGasLimit int64
+ StalenessSeconds int64
+ GasAfterPayment int64
FallbackWeiPerUnitLink *big.Int
FeeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig
}
@@ -37,12 +38,12 @@ func DeployUniverseViaCLI(e helpers.Environment) {
deployCmd := flag.NewFlagSet("deploy-universe", flag.ExitOnError)
// required flags
- linkAddress := *deployCmd.String("link-address", "", "address of link token")
- linkEthAddress := *deployCmd.String("link-eth-feed", "", "address of link eth feed")
- bhsContractAddressString := *deployCmd.String("bhs-address", "", "address of BHS contract")
- batchBHSAddressString := *deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract")
- coordinatorAddressString := *deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract")
- batchCoordinatorAddressString := *deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract")
+ linkAddress := deployCmd.String("link-address", "", "address of link token")
+ linkEthAddress := deployCmd.String("link-eth-feed", "", "address of link eth feed")
+ bhsContractAddressString := deployCmd.String("bhs-address", "", "address of BHS contract")
+ batchBHSAddressString := deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract")
+ coordinatorAddressString := deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract")
+ batchCoordinatorAddressString := deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract")
subscriptionBalanceJuelsString := deployCmd.String("subscription-balance", constants.SubscriptionBalanceJuels, "amount to fund subscription")
nodeSendingKeyFundingAmount := deployCmd.String("sending-key-funding-amount", constants.NodeSendingKeyFundingAmount, "CL node sending key funding amount")
@@ -51,7 +52,10 @@ func DeployUniverseViaCLI(e helpers.Environment) {
// optional flags
fallbackWeiPerUnitLinkString := deployCmd.String("fallback-wei-per-unit-link", constants.FallbackWeiPerUnitLink.String(), "fallback wei/link ratio")
- registerKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key")
+ registerVRFKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key")
+ registerVRFKeyAgainstAddress := deployCmd.String("register-vrf-key-against-address", "", "VRF Key registration against address - "+
+ "from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments")
+
vrfPrimaryNodeSendingKeysString := deployCmd.String("vrf-primary-node-sending-keys", "", "VRF Primary Node sending keys")
minConfs := deployCmd.Int("min-confs", constants.MinConfs, "min confs")
@@ -87,7 +91,10 @@ func DeployUniverseViaCLI(e helpers.Environment) {
ReqsForTier5: big.NewInt(*reqsForTier5),
}
- vrfPrimaryNodeSendingKeys := strings.Split(*vrfPrimaryNodeSendingKeysString, ",")
+ var vrfPrimaryNodeSendingKeys []string
+ if len(*vrfPrimaryNodeSendingKeysString) > 0 {
+ vrfPrimaryNodeSendingKeys = strings.Split(*vrfPrimaryNodeSendingKeysString, ",")
+ }
nodesMap := make(map[string]model.Node)
@@ -100,14 +107,14 @@ func DeployUniverseViaCLI(e helpers.Environment) {
SendingKeyFundingAmount: fundingAmount,
}
- bhsContractAddress := common.HexToAddress(bhsContractAddressString)
- batchBHSAddress := common.HexToAddress(batchBHSAddressString)
- coordinatorAddress := common.HexToAddress(coordinatorAddressString)
- batchCoordinatorAddress := common.HexToAddress(batchCoordinatorAddressString)
+ bhsContractAddress := common.HexToAddress(*bhsContractAddressString)
+ batchBHSAddress := common.HexToAddress(*batchBHSAddressString)
+ coordinatorAddress := common.HexToAddress(*coordinatorAddressString)
+ batchCoordinatorAddress := common.HexToAddress(*batchCoordinatorAddressString)
contractAddresses := model.ContractAddresses{
- LinkAddress: linkAddress,
- LinkEthAddress: linkEthAddress,
+ LinkAddress: *linkAddress,
+ LinkEthAddress: *linkEthAddress,
BhsContractAddress: bhsContractAddress,
BatchBHSAddress: batchBHSAddress,
CoordinatorAddress: coordinatorAddress,
@@ -115,18 +122,23 @@ func DeployUniverseViaCLI(e helpers.Environment) {
}
coordinatorConfig := CoordinatorConfigV2{
- MinConfs: minConfs,
- MaxGasLimit: maxGasLimit,
- StalenessSeconds: stalenessSeconds,
- GasAfterPayment: gasAfterPayment,
+ MinConfs: *minConfs,
+ MaxGasLimit: *maxGasLimit,
+ StalenessSeconds: *stalenessSeconds,
+ GasAfterPayment: *gasAfterPayment,
FallbackWeiPerUnitLink: fallbackWeiPerUnitLink,
FeeConfig: feeConfig,
}
+ vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{
+ VRFKeyUncompressedPubKey: *registerVRFKeyUncompressedPubKey,
+ RegisterAgainstAddress: *registerVRFKeyAgainstAddress,
+ }
+
VRFV2DeployUniverse(
e,
subscriptionBalanceJuels,
- registerKeyUncompressedPubKey,
+ vrfKeyRegistrationConfig,
contractAddresses,
coordinatorConfig,
*batchFulfillmentEnabled,
@@ -143,35 +155,38 @@ func DeployUniverseViaCLI(e helpers.Environment) {
func VRFV2DeployUniverse(
e helpers.Environment,
subscriptionBalanceJuels *big.Int,
- registerKeyUncompressedPubKey *string,
+ vrfKeyRegistrationConfig model.VRFKeyRegistrationConfig,
contractAddresses model.ContractAddresses,
coordinatorConfig CoordinatorConfigV2,
batchFulfillmentEnabled bool,
nodesMap map[string]model.Node,
) model.JobSpecs {
-
- // Put key in ECDSA format
- if strings.HasPrefix(*registerKeyUncompressedPubKey, "0x") {
- *registerKeyUncompressedPubKey = strings.Replace(*registerKeyUncompressedPubKey, "0x", "04", 1)
- }
-
- // Generate compressed public key and key hash
- pubBytes, err := hex.DecodeString(*registerKeyUncompressedPubKey)
- helpers.PanicErr(err)
- pk, err := crypto.UnmarshalPubkey(pubBytes)
- helpers.PanicErr(err)
- var pkBytes []byte
- if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 {
- pkBytes = append(pk.X.Bytes(), 1)
- } else {
- pkBytes = append(pk.X.Bytes(), 0)
+ var compressedPkHex string
+ var keyHash common.Hash
+ if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 {
+ // Put key in ECDSA format
+ if strings.HasPrefix(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x") {
+ vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey = strings.Replace(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x", "04", 1)
+ }
+
+ // Generate compressed public key and key hash
+ pubBytes, err := hex.DecodeString(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey)
+ helpers.PanicErr(err)
+ pk, err := crypto.UnmarshalPubkey(pubBytes)
+ helpers.PanicErr(err)
+ var pkBytes []byte
+ if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 {
+ pkBytes = append(pk.X.Bytes(), 1)
+ } else {
+ pkBytes = append(pk.X.Bytes(), 0)
+ }
+ var newPK secp256k1.PublicKey
+ copy(newPK[:], pkBytes)
+
+ compressedPkHex = hexutil.Encode(pkBytes)
+ keyHash, err = newPK.Hash()
+ helpers.PanicErr(err)
}
- var newPK secp256k1.PublicKey
- copy(newPK[:], pkBytes)
-
- compressedPkHex := hexutil.Encode(pkBytes)
- keyHash, err := newPK.Hash()
- helpers.PanicErr(err)
if len(contractAddresses.LinkAddress) == 0 {
fmt.Println("\nDeploying LINK Token...")
@@ -210,10 +225,10 @@ func VRFV2DeployUniverse(
SetCoordinatorConfig(
e,
*coordinator,
- uint16(*coordinatorConfig.MinConfs),
- uint32(*coordinatorConfig.MaxGasLimit),
- uint32(*coordinatorConfig.StalenessSeconds),
- uint32(*coordinatorConfig.GasAfterPayment),
+ uint16(coordinatorConfig.MinConfs),
+ uint32(coordinatorConfig.MaxGasLimit),
+ uint32(coordinatorConfig.StalenessSeconds),
+ uint32(coordinatorConfig.GasAfterPayment),
coordinatorConfig.FallbackWeiPerUnitLink,
coordinatorConfig.FeeConfig,
)
@@ -221,12 +236,12 @@ func VRFV2DeployUniverse(
fmt.Println("\nConfig set, getting current config from deployed contract...")
PrintCoordinatorConfig(coordinator)
- if len(*registerKeyUncompressedPubKey) > 0 {
+ if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 {
fmt.Println("\nRegistering proving key...")
//NOTE - register proving key against EOA account, and not against Oracle's sending address in other to be able
// easily withdraw funds from Coordinator contract back to EOA account
- RegisterCoordinatorProvingKey(e, *coordinator, *registerKeyUncompressedPubKey, e.Owner.From.String())
+ RegisterCoordinatorProvingKey(e, *coordinator, vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, vrfKeyRegistrationConfig.RegisterAgainstAddress)
fmt.Println("\nProving key registered, getting proving key hashes from deployed contract...")
_, _, provingKeyHashes, configErr := coordinator.GetRequestConfig(nil)
@@ -264,11 +279,17 @@ func VRFV2DeployUniverse(
contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress
batchFulfillmentEnabled, //batchFulfillmentEnabled
compressedPkHex, //publicKey
- *coordinatorConfig.MinConfs, //minIncomingConfirmations
+ coordinatorConfig.MinConfs, //minIncomingConfirmations
e.ChainID, //evmChainID
strings.Join(util.MapToAddressArr(nodesMap[model.VRFPrimaryNodeName].SendingKeys), "\",\""), //fromAddresses
contractAddresses.CoordinatorAddress,
- nodesMap[model.VRFPrimaryNodeName].SendingKeys[0].Address,
+ func() string {
+ if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 {
+ return keys[0].Address
+ } else {
+ return common.HexToAddress("0x0").String()
+ }
+ }(),
contractAddresses.CoordinatorAddress,
contractAddresses.CoordinatorAddress,
)
@@ -283,7 +304,13 @@ func VRFV2DeployUniverse(
e.ChainID, //evmChainID
strings.Join(util.MapToAddressArr(nodesMap[model.VRFBackupNodeName].SendingKeys), "\",\""), //fromAddresses
contractAddresses.CoordinatorAddress,
- nodesMap[model.VRFPrimaryNodeName].SendingKeys[0],
+ func() string {
+ if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 {
+ return keys[0].Address
+ } else {
+ return common.HexToAddress("0x0").String()
+ }
+ }(),
contractAddresses.CoordinatorAddress,
contractAddresses.CoordinatorAddress,
)
@@ -329,7 +356,7 @@ func VRFV2DeployUniverse(
"\nVRF Subscription Id:", subID,
"\nVRF Subscription Balance:", *subscriptionBalanceJuels,
"\nPossible VRF Request command: ",
- fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, *coordinatorConfig.MinConfs),
+ fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, coordinatorConfig.MinConfs),
"\nRetrieve Request Status: ",
fmt.Sprintf("go run . eoa-load-test-read-metrics --consumer-address=%s", consumerAddress),
"\nA node can now be configured to run a VRF job with the below job spec :\n",
diff --git a/core/scripts/vrfv2plus/testnet/README.md b/core/scripts/vrfv2plus/testnet/README.md
index b95ec99d5fb..6402569c560 100644
--- a/core/scripts/vrfv2plus/testnet/README.md
+++ b/core/scripts/vrfv2plus/testnet/README.md
@@ -58,7 +58,16 @@ cd /core/scripts/vrfv2/testnet
- Not specifying `--link-eth-feed` would make the super script deploy a new LINK-ETH feed contract and use it for funding VRF V2+ subscription
```shell
-go run . deploy-universe --link-address=$LINK --link-eth-feed=$LINK_ETH_FEED --subscription-balance= --uncompressed-pub-key=$PUB_KEY --oracle-address=$ORACLE_ADDRESS
+go run . deploy-universe \
+--link-address=$LINK \
+--link-eth-feed=$LINK_ETH_FEED \
+--subscription-balance=5000000000000000000 \ #5 LINK
+--subscription-balance-native=1000000000000000000 \ #1 ETH
+--uncompressed-pub-key= \
+--vrf-primary-node-sending-keys="" \ #used to fund the keys and for sample VRF Job Spec generation
+--sending-key-funding-amount 100000000000000000 \ #0.1 ETH, fund addresses specified in vrf-primary-node-sending-keys
+--batch-fulfillment-enabled false \ #only used for sample VRF Job Spec generation
+--register-vrf-key-against-address="" # from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments
```
## Deploying the Consumer Contract
diff --git a/core/scripts/vrfv2plus/testnet/main.go b/core/scripts/vrfv2plus/testnet/main.go
index 0d1bf9a9481..b7940d6fda0 100644
--- a/core/scripts/vrfv2plus/testnet/main.go
+++ b/core/scripts/vrfv2plus/testnet/main.go
@@ -6,12 +6,13 @@ import (
"encoding/hex"
"flag"
"fmt"
- "github.com/smartcontractkit/chainlink/core/scripts/vrfv2plus/testnet/v2plusscripts"
"log"
"math/big"
"os"
"strings"
+ "github.com/smartcontractkit/chainlink/core/scripts/vrfv2plus/testnet/v2plusscripts"
+
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/chain_specific_util_helper"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2_5"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface"
@@ -25,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
helpers "github.com/smartcontractkit/chainlink/core/scripts/common"
"github.com/smartcontractkit/chainlink/v2/core/assets"
diff --git a/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go b/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go
index f805e7b74f0..50584d885a2 100644
--- a/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go
+++ b/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go
@@ -6,15 +6,16 @@ import (
"encoding/hex"
"flag"
"fmt"
+ "math/big"
+ "os"
+ "strings"
+
"github.com/smartcontractkit/chainlink/core/scripts/common/vrf/constants"
"github.com/smartcontractkit/chainlink/core/scripts/common/vrf/jobs"
"github.com/smartcontractkit/chainlink/core/scripts/common/vrf/model"
"github.com/smartcontractkit/chainlink/core/scripts/common/vrf/util"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface"
- "math/big"
- "os"
- "strings"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
@@ -38,10 +39,10 @@ import (
var coordinatorV2PlusABI = evmtypes.MustGetABI(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalABI)
type CoordinatorConfigV2Plus struct {
- MinConfs *int
- MaxGasLimit *int64
- StalenessSeconds *int64
- GasAfterPayment *int64
+ MinConfs int
+ MaxGasLimit int64
+ StalenessSeconds int64
+ GasAfterPayment int64
FallbackWeiPerUnitLink *big.Int
FeeConfig vrf_coordinator_v2_5.VRFCoordinatorV25FeeConfig
}
@@ -467,12 +468,12 @@ func DeployUniverseViaCLI(e helpers.Environment) {
deployCmd := flag.NewFlagSet("deploy-universe", flag.ExitOnError)
// required flags
- linkAddress := *deployCmd.String("link-address", "", "address of link token")
- linkEthAddress := *deployCmd.String("link-eth-feed", "", "address of link eth feed")
- bhsContractAddressString := *deployCmd.String("bhs-address", "", "address of BHS contract")
- batchBHSAddressString := *deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract")
- coordinatorAddressString := *deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract")
- batchCoordinatorAddressString := *deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract")
+ linkAddress := deployCmd.String("link-address", "", "address of link token")
+ linkEthAddress := deployCmd.String("link-eth-feed", "", "address of link eth feed")
+ bhsContractAddressString := deployCmd.String("bhs-address", "", "address of BHS contract")
+ batchBHSAddressString := deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract")
+ coordinatorAddressString := deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract")
+ batchCoordinatorAddressString := deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract")
subscriptionBalanceJuelsString := deployCmd.String("subscription-balance", "1e19", "amount to fund subscription with Link token (Juels)")
subscriptionBalanceNativeWeiString := deployCmd.String("subscription-balance-native", "1e18", "amount to fund subscription with native token (Wei)")
@@ -480,7 +481,10 @@ func DeployUniverseViaCLI(e helpers.Environment) {
// optional flags
fallbackWeiPerUnitLinkString := deployCmd.String("fallback-wei-per-unit-link", "6e16", "fallback wei/link ratio")
- registerKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key")
+ registerVRFKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key")
+ registerVRFKeyAgainstAddress := deployCmd.String("register-vrf-key-against-address", "", "VRF Key registration against address - "+
+ "from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments")
+
vrfPrimaryNodeSendingKeysString := deployCmd.String("vrf-primary-node-sending-keys", "", "VRF Primary Node sending keys")
minConfs := deployCmd.Int("min-confs", constants.MinConfs, "min confs")
nodeSendingKeyFundingAmount := deployCmd.String("sending-key-funding-amount", constants.NodeSendingKeyFundingAmount, "CL node sending key funding amount")
@@ -504,23 +508,30 @@ func DeployUniverseViaCLI(e helpers.Environment) {
FulfillmentFlatFeeNativePPM: uint32(*flatFeeEthPPM),
}
- vrfPrimaryNodeSendingKeys := strings.Split(*vrfPrimaryNodeSendingKeysString, ",")
+ var vrfPrimaryNodeSendingKeys []string
+ if len(*vrfPrimaryNodeSendingKeysString) > 0 {
+ vrfPrimaryNodeSendingKeys = strings.Split(*vrfPrimaryNodeSendingKeysString, ",")
+ }
nodesMap := make(map[string]model.Node)
+ fundingAmount, ok := new(big.Int).SetString(*nodeSendingKeyFundingAmount, 10)
+ if !ok {
+ panic(fmt.Sprintf("failed to parse node sending key funding amount '%s'", *nodeSendingKeyFundingAmount))
+ }
nodesMap[model.VRFPrimaryNodeName] = model.Node{
SendingKeys: util.MapToSendingKeyArr(vrfPrimaryNodeSendingKeys),
SendingKeyFundingAmount: fundingAmount,
}
- bhsContractAddress := common.HexToAddress(bhsContractAddressString)
- batchBHSAddress := common.HexToAddress(batchBHSAddressString)
- coordinatorAddress := common.HexToAddress(coordinatorAddressString)
- batchCoordinatorAddress := common.HexToAddress(batchCoordinatorAddressString)
+ bhsContractAddress := common.HexToAddress(*bhsContractAddressString)
+ batchBHSAddress := common.HexToAddress(*batchBHSAddressString)
+ coordinatorAddress := common.HexToAddress(*coordinatorAddressString)
+ batchCoordinatorAddress := common.HexToAddress(*batchCoordinatorAddressString)
contractAddresses := model.ContractAddresses{
- LinkAddress: linkAddress,
- LinkEthAddress: linkEthAddress,
+ LinkAddress: *linkAddress,
+ LinkEthAddress: *linkEthAddress,
BhsContractAddress: bhsContractAddress,
BatchBHSAddress: batchBHSAddress,
CoordinatorAddress: coordinatorAddress,
@@ -528,19 +539,24 @@ func DeployUniverseViaCLI(e helpers.Environment) {
}
coordinatorConfig := CoordinatorConfigV2Plus{
- MinConfs: minConfs,
- MaxGasLimit: maxGasLimit,
- StalenessSeconds: stalenessSeconds,
- GasAfterPayment: gasAfterPayment,
+ MinConfs: *minConfs,
+ MaxGasLimit: *maxGasLimit,
+ StalenessSeconds: *stalenessSeconds,
+ GasAfterPayment: *gasAfterPayment,
FallbackWeiPerUnitLink: fallbackWeiPerUnitLink,
FeeConfig: feeConfig,
}
+ vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{
+ VRFKeyUncompressedPubKey: *registerVRFKeyUncompressedPubKey,
+ RegisterAgainstAddress: *registerVRFKeyAgainstAddress,
+ }
+
VRFV2PlusDeployUniverse(
e,
subscriptionBalanceJuels,
subscriptionBalanceNativeWei,
- registerKeyUncompressedPubKey,
+ vrfKeyRegistrationConfig,
contractAddresses,
coordinatorConfig,
*batchFulfillmentEnabled,
@@ -557,34 +573,38 @@ func DeployUniverseViaCLI(e helpers.Environment) {
func VRFV2PlusDeployUniverse(e helpers.Environment,
subscriptionBalanceJuels *big.Int,
subscriptionBalanceNativeWei *big.Int,
- registerKeyUncompressedPubKey *string,
+ vrfKeyRegistrationConfig model.VRFKeyRegistrationConfig,
contractAddresses model.ContractAddresses,
coordinatorConfig CoordinatorConfigV2Plus,
batchFulfillmentEnabled bool,
nodesMap map[string]model.Node,
) model.JobSpecs {
- // Put key in ECDSA format
- if strings.HasPrefix(*registerKeyUncompressedPubKey, "0x") {
- *registerKeyUncompressedPubKey = strings.Replace(*registerKeyUncompressedPubKey, "0x", "04", 1)
- }
+ var compressedPkHex string
+ var keyHash common.Hash
+ if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 {
+ // Put key in ECDSA format
+ if strings.HasPrefix(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x") {
+ vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey = strings.Replace(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x", "04", 1)
+ }
- // Generate compressed public key and key hash
- pubBytes, err := hex.DecodeString(*registerKeyUncompressedPubKey)
- helpers.PanicErr(err)
- pk, err := crypto.UnmarshalPubkey(pubBytes)
- helpers.PanicErr(err)
- var pkBytes []byte
- if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 {
- pkBytes = append(pk.X.Bytes(), 1)
- } else {
- pkBytes = append(pk.X.Bytes(), 0)
- }
- var newPK secp256k1.PublicKey
- copy(newPK[:], pkBytes)
+ // Generate compressed public key and key hash
+ pubBytes, err := hex.DecodeString(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey)
+ helpers.PanicErr(err)
+ pk, err := crypto.UnmarshalPubkey(pubBytes)
+ helpers.PanicErr(err)
+ var pkBytes []byte
+ if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 {
+ pkBytes = append(pk.X.Bytes(), 1)
+ } else {
+ pkBytes = append(pk.X.Bytes(), 0)
+ }
+ var newPK secp256k1.PublicKey
+ copy(newPK[:], pkBytes)
- compressedPkHex := hexutil.Encode(pkBytes)
- keyHash, err := newPK.Hash()
- helpers.PanicErr(err)
+ compressedPkHex = hexutil.Encode(pkBytes)
+ keyHash, err = newPK.Hash()
+ helpers.PanicErr(err)
+ }
if len(contractAddresses.LinkAddress) == 0 {
fmt.Println("\nDeploying LINK Token...")
@@ -623,10 +643,10 @@ func VRFV2PlusDeployUniverse(e helpers.Environment,
SetCoordinatorConfig(
e,
*coordinator,
- uint16(*coordinatorConfig.MinConfs),
- uint32(*coordinatorConfig.MaxGasLimit),
- uint32(*coordinatorConfig.StalenessSeconds),
- uint32(*coordinatorConfig.GasAfterPayment),
+ uint16(coordinatorConfig.MinConfs),
+ uint32(coordinatorConfig.MaxGasLimit),
+ uint32(coordinatorConfig.StalenessSeconds),
+ uint32(coordinatorConfig.GasAfterPayment),
coordinatorConfig.FallbackWeiPerUnitLink,
coordinatorConfig.FeeConfig,
)
@@ -634,12 +654,12 @@ func VRFV2PlusDeployUniverse(e helpers.Environment,
fmt.Println("\nConfig set, getting current config from deployed contract...")
PrintCoordinatorConfig(coordinator)
- if len(*registerKeyUncompressedPubKey) > 0 {
+ if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 {
fmt.Println("\nRegistering proving key...")
//NOTE - register proving key against EOA account, and not against Oracle's sending address in other to be able
// easily withdraw funds from Coordinator contract back to EOA account
- RegisterCoordinatorProvingKey(e, *coordinator, *registerKeyUncompressedPubKey, e.Owner.From.String())
+ RegisterCoordinatorProvingKey(e, *coordinator, vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, vrfKeyRegistrationConfig.RegisterAgainstAddress)
fmt.Println("\nProving key registered, getting proving key hashes from deployed contract...")
_, _, provingKeyHashes, configErr := coordinator.GetRequestConfig(nil)
@@ -685,11 +705,17 @@ func VRFV2PlusDeployUniverse(e helpers.Environment,
contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress
batchFulfillmentEnabled, //batchFulfillmentEnabled
compressedPkHex, //publicKey
- *coordinatorConfig.MinConfs, //minIncomingConfirmations
+ coordinatorConfig.MinConfs, //minIncomingConfirmations
e.ChainID, //evmChainID
strings.Join(util.MapToAddressArr(nodesMap[model.VRFPrimaryNodeName].SendingKeys), "\",\""), //fromAddresses
contractAddresses.CoordinatorAddress,
- nodesMap[model.VRFPrimaryNodeName].SendingKeys[0].Address,
+ func() string {
+ if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 {
+ return keys[0].Address
+ } else {
+ return common.HexToAddress("0x0").String()
+ }
+ }(),
contractAddresses.CoordinatorAddress,
contractAddresses.CoordinatorAddress,
)
@@ -704,7 +730,13 @@ func VRFV2PlusDeployUniverse(e helpers.Environment,
e.ChainID, //evmChainID
strings.Join(util.MapToAddressArr(nodesMap[model.VRFBackupNodeName].SendingKeys), "\",\""), //fromAddresses
contractAddresses.CoordinatorAddress,
- nodesMap[model.VRFPrimaryNodeName].SendingKeys[0],
+ func() string {
+ if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 {
+ return keys[0].Address
+ } else {
+ return common.HexToAddress("0x0").String()
+ }
+ }(),
contractAddresses.CoordinatorAddress,
contractAddresses.CoordinatorAddress,
)
@@ -751,7 +783,7 @@ func VRFV2PlusDeployUniverse(e helpers.Environment,
"\nVRF Subscription LINK Balance:", *subscriptionBalanceJuels,
"\nVRF Subscription Native Balance:", *subscriptionBalanceNativeWei,
"\nPossible VRF Request command: ",
- fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, *coordinatorConfig.MinConfs),
+ fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, coordinatorConfig.MinConfs),
"\nRetrieve Request Status: ",
fmt.Sprintf("go run . eoa-load-test-read-metrics --consumer-address=%s", consumerAddress),
"\nA node can now be configured to run a VRF job with the below job spec :\n",
diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go
index 354f0479042..0d479b1f1ab 100644
--- a/core/services/chainlink/application.go
+++ b/core/services/chainlink/application.go
@@ -16,7 +16,7 @@ import (
"go.uber.org/multierr"
"go.uber.org/zap/zapcore"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
relayservices "github.com/smartcontractkit/chainlink-relay/pkg/services"
@@ -52,6 +52,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/vrf"
"github.com/smartcontractkit/chainlink/v2/core/services/webhook"
"github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/ldapauth"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -82,7 +84,8 @@ type Application interface {
EVMORM() evmtypes.Configs
PipelineORM() pipeline.ORM
BridgeORM() bridges.ORM
- SessionORM() sessions.ORM
+ BasicAdminUsersORM() sessions.BasicAdminUsersORM
+ AuthenticationProvider() sessions.AuthenticationProvider
TxmStorageService() txmgr.EvmTxStore
AddJobV2(ctx context.Context, job *job.Job) error
DeleteJob(ctx context.Context, jobID int32) error
@@ -115,7 +118,8 @@ type ChainlinkApplication struct {
pipelineORM pipeline.ORM
pipelineRunner pipeline.Runner
bridgeORM bridges.ORM
- sessionORM sessions.ORM
+ localAdminUsersORM sessions.BasicAdminUsersORM
+ authenticationProvider sessions.AuthenticationProvider
txmStorageService txmgr.EvmTxStore
FeedsService feeds.Service
webhookJobRunner webhook.JobRunner
@@ -245,13 +249,39 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
return nil, fmt.Errorf("no evm chains found")
}
+ // Initialize Local Users ORM and Authentication Provider specified in config
+ // BasicAdminUsersORM is initialized and required regardless of separate Authentication Provider
+ localAdminUsersORM := localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger)
+
+ // Initialize Sessions ORM based on environment configured authenticator
+ // localDB auth or remote LDAP auth
+ authMethod := cfg.WebServer().AuthenticationMethod()
+ var authenticationProvider sessions.AuthenticationProvider
+ var sessionReaper utils.SleeperTask
+
+ switch sessions.AuthenticationProviderName(authMethod) {
+ case sessions.LDAPAuth:
+ var err error
+ authenticationProvider, err = ldapauth.NewLDAPAuthenticator(
+ db, cfg.Database(), cfg.WebServer().LDAP(), cfg.Insecure().DevWebServer(), globalLogger, auditLogger,
+ )
+ if err != nil {
+ return nil, errors.Wrap(err, "NewApplication: failed to initialize LDAP Authentication module")
+ }
+ sessionReaper = ldapauth.NewLDAPServerStateSync(db, cfg.Database(), cfg.WebServer().LDAP(), globalLogger)
+ case sessions.LocalAuth:
+ authenticationProvider = localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger)
+ sessionReaper = localauth.NewSessionReaper(db.DB, cfg.WebServer(), globalLogger)
+ default:
+ return nil, errors.Errorf("NewApplication: Unexpected 'AuthenticationMethod': %s supported values: %s, %s", authMethod, sessions.LocalAuth, sessions.LDAPAuth)
+ }
+
var (
pipelineORM = pipeline.NewORM(db, globalLogger, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
bridgeORM = bridges.NewORM(db, globalLogger, cfg.Database())
- sessionORM = sessions.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger)
mercuryORM = mercury.NewORM(db, globalLogger, cfg.Database())
pipelineRunner = pipeline.NewRunner(pipelineORM, bridgeORM, cfg.JobPipeline(), cfg.WebServer(), legacyEVMChains, keyStore.Eth(), keyStore.VRF(), globalLogger, restrictedHTTPClient, unrestrictedHTTPClient)
- jobORM = job.NewORM(db, legacyEVMChains, pipelineORM, bridgeORM, keyStore, globalLogger, cfg.Database())
+ jobORM = job.NewORM(db, pipelineORM, bridgeORM, keyStore, globalLogger, cfg.Database())
txmORM = txmgr.NewTxStore(db, globalLogger, cfg.Database())
)
@@ -440,13 +470,14 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
pipelineRunner: pipelineRunner,
pipelineORM: pipelineORM,
bridgeORM: bridgeORM,
- sessionORM: sessionORM,
+ localAdminUsersORM: localAdminUsersORM,
+ authenticationProvider: authenticationProvider,
txmStorageService: txmORM,
FeedsService: feedsService,
Config: cfg,
webhookJobRunner: webhookJobRunner,
KeyStore: keyStore,
- SessionReaper: sessions.NewSessionReaper(db.DB, cfg.WebServer(), globalLogger),
+ SessionReaper: sessionReaper,
ExternalInitiatorManager: externalInitiatorManager,
HealthChecker: healthChecker,
Nurse: nurse,
@@ -612,8 +643,12 @@ func (app *ChainlinkApplication) BridgeORM() bridges.ORM {
return app.bridgeORM
}
-func (app *ChainlinkApplication) SessionORM() sessions.ORM {
- return app.sessionORM
+func (app *ChainlinkApplication) BasicAdminUsersORM() sessions.BasicAdminUsersORM {
+ return app.localAdminUsersORM
+}
+
+func (app *ChainlinkApplication) AuthenticationProvider() sessions.AuthenticationProvider {
+ return app.authenticationProvider
}
// TODO BCF-2516 remove this all together remove EVM specifics
diff --git a/core/services/chainlink/config.go b/core/services/chainlink/config.go
index 3f55a2dc00f..10598718f97 100644
--- a/core/services/chainlink/config.go
+++ b/core/services/chainlink/config.go
@@ -168,28 +168,32 @@ type Secrets struct {
}
func (s *Secrets) SetFrom(f *Secrets) (err error) {
- if err1 := s.Database.SetFrom(&f.Database); err1 != nil {
- err = multierr.Append(err, config.NamedMultiErrorList(err1, "Database"))
+ if err2 := s.Database.SetFrom(&f.Database); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "Database"))
}
if err2 := s.Password.SetFrom(&f.Password); err2 != nil {
err = multierr.Append(err, config.NamedMultiErrorList(err2, "Password"))
}
- if err3 := s.Pyroscope.SetFrom(&f.Pyroscope); err3 != nil {
- err = multierr.Append(err, config.NamedMultiErrorList(err3, "Pyroscope"))
+ if err2 := s.WebServer.SetFrom(&f.WebServer); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "WebServer"))
}
- if err4 := s.Prometheus.SetFrom(&f.Prometheus); err4 != nil {
- err = multierr.Append(err, config.NamedMultiErrorList(err4, "Prometheus"))
+ if err2 := s.Pyroscope.SetFrom(&f.Pyroscope); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "Pyroscope"))
}
- if err5 := s.Mercury.SetFrom(&f.Mercury); err5 != nil {
- err = multierr.Append(err, config.NamedMultiErrorList(err5, "Mercury"))
+ if err2 := s.Prometheus.SetFrom(&f.Prometheus); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "Prometheus"))
}
- if err6 := s.Threshold.SetFrom(&f.Threshold); err6 != nil {
- err = multierr.Append(err, config.NamedMultiErrorList(err6, "Threshold"))
+ if err2 := s.Mercury.SetFrom(&f.Mercury); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "Mercury"))
+ }
+
+ if err2 := s.Threshold.SetFrom(&f.Threshold); err2 != nil {
+ err = multierr.Append(err, config.NamedMultiErrorList(err2, "Threshold"))
}
_, err = utils.MultiErrorList(err)
diff --git a/core/services/chainlink/config_general.go b/core/services/chainlink/config_general.go
index 81e38833359..6a835e09c89 100644
--- a/core/services/chainlink/config_general.go
+++ b/core/services/chainlink/config_general.go
@@ -348,7 +348,7 @@ func (g *generalConfig) StarkNetEnabled() bool {
}
func (g *generalConfig) WebServer() config.WebServer {
- return &webServerConfig{c: g.c.WebServer, rootDir: g.RootDir}
+ return &webServerConfig{c: g.c.WebServer, s: g.secrets.WebServer, rootDir: g.RootDir}
}
func (g *generalConfig) AutoPprofBlockProfileRate() int {
diff --git a/core/services/chainlink/config_general_test.go b/core/services/chainlink/config_general_test.go
index 46931e53e2b..c122f8f968c 100644
--- a/core/services/chainlink/config_general_test.go
+++ b/core/services/chainlink/config_general_test.go
@@ -149,6 +149,9 @@ var mercurySecretsTOMLSplitTwo string
//go:embed testdata/mergingsecretsdata/secrets-threshold.toml
var thresholdSecretsTOML string
+//go:embed testdata/mergingsecretsdata/secrets-webserver-ldap.toml
+var WebServerLDAPSecretsTOML string
+
func TestConfig_SecretsMerging(t *testing.T) {
t.Run("verify secrets merging in GeneralConfigOpts.New()", func(t *testing.T) {
databaseSecrets, err := parseSecrets(databaseSecretsTOML)
@@ -165,6 +168,8 @@ func TestConfig_SecretsMerging(t *testing.T) {
require.NoErrorf(t, err6, "error: %s", err6)
thresholdSecrets, err7 := parseSecrets(thresholdSecretsTOML)
require.NoErrorf(t, err7, "error: %s", err7)
+ webserverLDAPSecrets, err8 := parseSecrets(WebServerLDAPSecretsTOML)
+ require.NoErrorf(t, err8, "error: %s", err8)
opts := new(GeneralConfigOpts)
configFiles := []string{
@@ -178,6 +183,7 @@ func TestConfig_SecretsMerging(t *testing.T) {
"testdata/mergingsecretsdata/secrets-mercury-split-one.toml",
"testdata/mergingsecretsdata/secrets-mercury-split-two.toml",
"testdata/mergingsecretsdata/secrets-threshold.toml",
+ "testdata/mergingsecretsdata/secrets-webserver-ldap.toml",
}
err = opts.Setup(configFiles, secretsFiles)
require.NoErrorf(t, err, "error: %s", err)
@@ -194,6 +200,10 @@ func TestConfig_SecretsMerging(t *testing.T) {
assert.Equal(t, (string)(*prometheusSecrets.Prometheus.AuthToken), (string)(*opts.Secrets.Prometheus.AuthToken))
assert.Equal(t, (string)(*thresholdSecrets.Threshold.ThresholdKeyShare), (string)(*opts.Secrets.Threshold.ThresholdKeyShare))
+ assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ServerAddress.URL().String(), opts.Secrets.WebServer.LDAP.ServerAddress.URL().String())
+ assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ReadOnlyUserLogin, opts.Secrets.WebServer.LDAP.ReadOnlyUserLogin)
+ assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ReadOnlyUserPass, opts.Secrets.WebServer.LDAP.ReadOnlyUserPass)
+
err = assertDeepEqualityMercurySecrets(*merge(mercurySecrets_a.Mercury, mercurySecrets_b.Mercury), opts.Secrets.Mercury)
require.NoErrorf(t, err, "merged mercury secrets unequal")
})
diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go
index 48fb8272ace..34fcc4bbe91 100644
--- a/core/services/chainlink/config_test.go
+++ b/core/services/chainlink/config_test.go
@@ -308,6 +308,7 @@ func TestConfig_Marshal(t *testing.T) {
},
}
full.WebServer = toml.WebServer{
+ AuthenticationMethod: ptr("local"),
AllowOrigins: ptr("*"),
BridgeResponseURL: mustURL("https://bridge.response"),
BridgeCacheTTL: models.MustNewDuration(10 * time.Second),
@@ -323,6 +324,25 @@ func TestConfig_Marshal(t *testing.T) {
RPID: ptr("test-rpid"),
RPOrigin: ptr("test-rp-origin"),
},
+ LDAP: toml.WebServerLDAP{
+ ServerTLS: ptr(true),
+ SessionTimeout: models.MustNewDuration(15 * time.Minute),
+ QueryTimeout: models.MustNewDuration(2 * time.Minute),
+ BaseUserAttr: ptr("uid"),
+ BaseDN: ptr("dc=custom,dc=example,dc=com"),
+ UsersDN: ptr("ou=users"),
+ GroupsDN: ptr("ou=groups"),
+ ActiveAttribute: ptr("organizationalStatus"),
+ ActiveAttributeAllowedValue: ptr("ACTIVE"),
+ AdminUserGroupCN: ptr("NodeAdmins"),
+ EditUserGroupCN: ptr("NodeEditors"),
+ RunUserGroupCN: ptr("NodeRunners"),
+ ReadUserGroupCN: ptr("NodeReadOnly"),
+ UserApiTokenEnabled: ptr(false),
+ UserAPITokenDuration: models.MustNewDuration(240 * time.Hour),
+ UpstreamSyncInterval: models.MustNewDuration(0 * time.Second),
+ UpstreamSyncRateLimit: models.MustNewDuration(2 * time.Minute),
+ },
RateLimit: toml.WebServerRateLimit{
Authenticated: ptr[int64](42),
AuthenticatedPeriod: models.MustNewDuration(time.Second),
@@ -468,7 +488,7 @@ func TestConfig_Marshal(t *testing.T) {
FlagsContractAddress: mustAddress("0xae4E781a6218A8031764928E88d457937A954fC3"),
GasEstimator: evmcfg.GasEstimator{
- Mode: ptr("L2Suggested"),
+ Mode: ptr("SuggestedPrice"),
EIP1559DynamicFees: ptr(true),
BumpPercent: ptr[uint16](10),
BumpThreshold: ptr[uint32](6),
@@ -738,6 +758,7 @@ MaxAgeDays = 17
MaxBackups = 9
`},
{"WebServer", Config{Core: toml.Core{WebServer: full.WebServer}}, `[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = '*'
BridgeResponseURL = 'https://bridge.response'
BridgeCacheTTL = '10s'
@@ -750,6 +771,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '192.158.1.37'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = 'dc=custom,dc=example,dc=com'
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = 'organizationalStatus'
+ActiveAttributeAllowedValue = 'ACTIVE'
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = 'test-rpid'
RPOrigin = 'test-rp-origin'
@@ -912,7 +952,7 @@ ResendAfterThreshold = '1h0m0s'
Enabled = true
[EVM.GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '9.223372036854775807 ether'
PriceMax = '281.474976710655 micro'
PriceMin = '13 wei'
@@ -1118,8 +1158,17 @@ func TestConfig_Validate(t *testing.T) {
toml string
exp string
}{
- {name: "invalid", toml: invalidTOML, exp: `invalid configuration: 5 errors:
+ {name: "invalid", toml: invalidTOML, exp: `invalid configuration: 6 errors:
- Database.Lock.LeaseRefreshInterval: invalid value (6s): must be less than or equal to half of LeaseDuration (10s)
+ - WebServer: 8 errors:
+ - LDAP.BaseDN: invalid value (): LDAP BaseDN can not be empty
+ - LDAP.BaseUserAttr: invalid value (): LDAP BaseUserAttr can not be empty
+ - LDAP.UsersDN: invalid value (): LDAP UsersDN can not be empty
+ - LDAP.GroupsDN: invalid value (): LDAP GroupsDN can not be empty
+ - LDAP.AdminUserGroupCN: invalid value (): LDAP AdminUserGroupCN can not be empty
+ - LDAP.RunUserGroupCN: invalid value (): LDAP ReadUserGroupCN can not be empty
+ - LDAP.RunUserGroupCN: invalid value (): LDAP RunUserGroupCN can not be empty
+ - LDAP.ReadUserGroupCN: invalid value (): LDAP ReadUserGroupCN can not be empty
- EVM: 8 errors:
- 1.ChainID: invalid value (1): duplicate - must be unique
- 0.Nodes.1.Name: invalid value (foo): duplicate - must be unique
@@ -1141,7 +1190,7 @@ func TestConfig_Validate(t *testing.T) {
- 1: 6 errors:
- ChainType: invalid value (Foo): must not be set with this chain id
- Nodes: missing: must have at least one node
- - ChainType: invalid value (Foo): must be one of arbitrum, metis, xdai, optimismBedrock, celo or omitted
+ - ChainType: invalid value (Foo): must be one of arbitrum, metis, xdai, optimismBedrock, celo, kroma, wemix, zksync or omitted
- HeadTracker.HistoryDepth: invalid value (30): must be equal to or greater than FinalityDepth
- GasEstimator: 2 errors:
- FeeCapDefault: invalid value (101 wei): must be equal to PriceMax (99 wei) since you are using FixedPrice estimation with gas bumping disabled in EIP1559 mode - PriceMax will be used as the FeeCap for transactions instead of FeeCapDefault
@@ -1150,7 +1199,7 @@ func TestConfig_Validate(t *testing.T) {
- 2: 5 errors:
- ChainType: invalid value (Arbitrum): only "optimismBedrock" can be used with this chain id
- Nodes: missing: must have at least one node
- - ChainType: invalid value (Arbitrum): must be one of arbitrum, metis, xdai, optimismBedrock, celo or omitted
+ - ChainType: invalid value (Arbitrum): must be one of arbitrum, metis, xdai, optimismBedrock, celo, kroma, wemix, zksync or omitted
- FinalityDepth: invalid value (0): must be greater than or equal to 1
- MinIncomingConfirmations: invalid value (0): must be greater than or equal to 1
- 3.Nodes: 5 errors:
diff --git a/core/services/chainlink/config_web_server.go b/core/services/chainlink/config_web_server.go
index a931d67f386..06db398e2ea 100644
--- a/core/services/chainlink/config_web_server.go
+++ b/core/services/chainlink/config_web_server.go
@@ -98,6 +98,7 @@ func (m *mfaConfig) RPOrigin() string {
type webServerConfig struct {
c toml.WebServer
+ s toml.WebServerSecrets
rootDir func() string
}
@@ -113,6 +114,14 @@ func (w *webServerConfig) MFA() config.MFA {
return &mfaConfig{c: w.c.MFA}
}
+func (w *webServerConfig) LDAP() config.LDAP {
+ return &ldapConfig{c: w.c.LDAP, s: w.s.LDAP}
+}
+
+func (w *webServerConfig) AuthenticationMethod() string {
+ return *w.c.AuthenticationMethod
+}
+
func (w *webServerConfig) AllowOrigins() string {
return *w.c.AllowOrigins
}
@@ -168,3 +177,139 @@ func (w *webServerConfig) SessionTimeout() models.Duration {
func (w *webServerConfig) ListenIP() net.IP {
return *w.c.ListenIP
}
+
+type ldapConfig struct {
+ c toml.WebServerLDAP
+ s toml.WebServerLDAPSecrets
+}
+
+func (l *ldapConfig) ServerAddress() string {
+ if l.s.ServerAddress == nil {
+ return ""
+ }
+ return l.s.ServerAddress.URL().String()
+}
+
+func (l *ldapConfig) ReadOnlyUserLogin() string {
+ if l.s.ReadOnlyUserLogin == nil {
+ return ""
+ }
+ return string(*l.s.ReadOnlyUserLogin)
+}
+
+func (l *ldapConfig) ReadOnlyUserPass() string {
+ if l.s.ReadOnlyUserPass == nil {
+ return ""
+ }
+ return string(*l.s.ReadOnlyUserPass)
+}
+
+func (l *ldapConfig) ServerTLS() bool {
+ if l.c.ServerTLS == nil {
+ return false
+ }
+ return *l.c.ServerTLS
+}
+
+func (l *ldapConfig) SessionTimeout() models.Duration {
+ return *l.c.SessionTimeout
+}
+
+func (l *ldapConfig) QueryTimeout() time.Duration {
+ return l.c.QueryTimeout.Duration()
+}
+
+func (l *ldapConfig) UserAPITokenDuration() models.Duration {
+ return *l.c.UserAPITokenDuration
+}
+
+func (l *ldapConfig) BaseUserAttr() string {
+ if l.c.BaseUserAttr == nil {
+ return ""
+ }
+ return *l.c.BaseUserAttr
+}
+
+func (l *ldapConfig) BaseDN() string {
+ if l.c.BaseDN == nil {
+ return ""
+ }
+ return *l.c.BaseDN
+}
+
+func (l *ldapConfig) UsersDN() string {
+ if l.c.UsersDN == nil {
+ return ""
+ }
+ return *l.c.UsersDN
+}
+
+func (l *ldapConfig) GroupsDN() string {
+ if l.c.GroupsDN == nil {
+ return ""
+ }
+ return *l.c.GroupsDN
+}
+
+func (l *ldapConfig) ActiveAttribute() string {
+ if l.c.ActiveAttribute == nil {
+ return ""
+ }
+ return *l.c.ActiveAttribute
+}
+
+func (l *ldapConfig) ActiveAttributeAllowedValue() string {
+ if l.c.ActiveAttributeAllowedValue == nil {
+ return ""
+ }
+ return *l.c.ActiveAttributeAllowedValue
+}
+
+func (l *ldapConfig) AdminUserGroupCN() string {
+ if l.c.AdminUserGroupCN == nil {
+ return ""
+ }
+ return *l.c.AdminUserGroupCN
+}
+
+func (l *ldapConfig) EditUserGroupCN() string {
+ if l.c.EditUserGroupCN == nil {
+ return ""
+ }
+ return *l.c.EditUserGroupCN
+}
+
+func (l *ldapConfig) RunUserGroupCN() string {
+ if l.c.RunUserGroupCN == nil {
+ return ""
+ }
+ return *l.c.RunUserGroupCN
+}
+
+func (l *ldapConfig) ReadUserGroupCN() string {
+ if l.c.ReadUserGroupCN == nil {
+ return ""
+ }
+ return *l.c.ReadUserGroupCN
+}
+
+func (l *ldapConfig) UserApiTokenEnabled() bool {
+ if l.c.UserApiTokenEnabled == nil {
+ return false
+ }
+ return *l.c.UserApiTokenEnabled
+}
+
+func (l *ldapConfig) UpstreamSyncInterval() models.Duration {
+ if l.c.UpstreamSyncInterval == nil {
+ return models.Duration{}
+ }
+ return *l.c.UpstreamSyncInterval
+}
+
+func (l *ldapConfig) UpstreamSyncRateLimit() models.Duration {
+ if l.c.UpstreamSyncRateLimit == nil {
+ return models.Duration{}
+ }
+ return *l.c.UpstreamSyncRateLimit
+}
diff --git a/core/services/chainlink/mocks/relayer_chain_interoperators.go b/core/services/chainlink/mocks/relayer_chain_interoperators.go
index 0a8758f6d4b..81f112f7663 100644
--- a/core/services/chainlink/mocks/relayer_chain_interoperators.go
+++ b/core/services/chainlink/mocks/relayer_chain_interoperators.go
@@ -1,248 +1,61 @@
-// Code generated by mockery v2.28.1. DO NOT EDIT.
-
package mocks
import (
- context "context"
-
- chainlink "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
-
- cosmos "github.com/smartcontractkit/chainlink/v2/core/chains/cosmos"
+ "context"
+ "slices"
- evm "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
+ services2 "github.com/smartcontractkit/chainlink/v2/core/services"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
- // Manually edited. mockery generates the wrong dependency. edited to use `loop` rather than `loop/internal`
- // seems to caused by incorrect alias resolution of the relayer dep
- internal "github.com/smartcontractkit/chainlink-relay/pkg/loop"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
- mock "github.com/stretchr/testify/mock"
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop"
- relay "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
- services "github.com/smartcontractkit/chainlink/v2/core/services"
-
- types "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
)
-// RelayerChainInteroperators is an autogenerated mock type for the RelayerChainInteroperators type
-type RelayerChainInteroperators struct {
- mock.Mock
-}
-
-// ChainStatus provides a mock function with given fields: ctx, id
-func (_m *RelayerChainInteroperators) ChainStatus(ctx context.Context, id relay.ID) (types.ChainStatus, error) {
- ret := _m.Called(ctx, id)
-
- var r0 types.ChainStatus
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, relay.ID) (types.ChainStatus, error)); ok {
- return rf(ctx, id)
- }
- if rf, ok := ret.Get(0).(func(context.Context, relay.ID) types.ChainStatus); ok {
- r0 = rf(ctx, id)
- } else {
- r0 = ret.Get(0).(types.ChainStatus)
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, relay.ID) error); ok {
- r1 = rf(ctx, id)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// ChainStatuses provides a mock function with given fields: ctx, offset, limit
-func (_m *RelayerChainInteroperators) ChainStatuses(ctx context.Context, offset int, limit int) ([]types.ChainStatus, int, error) {
- ret := _m.Called(ctx, offset, limit)
-
- var r0 []types.ChainStatus
- var r1 int
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, int, int) ([]types.ChainStatus, int, error)); ok {
- return rf(ctx, offset, limit)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int, int) []types.ChainStatus); ok {
- r0 = rf(ctx, offset, limit)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]types.ChainStatus)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int, int) int); ok {
- r1 = rf(ctx, offset, limit)
- } else {
- r1 = ret.Get(1).(int)
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, int, int) error); ok {
- r2 = rf(ctx, offset, limit)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+// FakeRelayerChainInteroperators is a fake chainlink.RelayerChainInteroperators.
+// This exists because mockery generation doesn't understand how to produce an alias instead of the underlying type (which is not exported in this case).
+type FakeRelayerChainInteroperators struct {
+ EVMChains evm.LegacyChainContainer
+ Nodes []types.NodeStatus
+ NodesErr error
}
-// Get provides a mock function with given fields: id
-func (_m *RelayerChainInteroperators) Get(id relay.ID) (internal.Relayer, error) {
- ret := _m.Called(id)
-
- var r0 internal.Relayer
- var r1 error
- if rf, ok := ret.Get(0).(func(relay.ID) (internal.Relayer, error)); ok {
- return rf(id)
- }
- if rf, ok := ret.Get(0).(func(relay.ID) internal.Relayer); ok {
- r0 = rf(id)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(internal.Relayer)
- }
- }
-
- if rf, ok := ret.Get(1).(func(relay.ID) error); ok {
- r1 = rf(id)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
+func (f *FakeRelayerChainInteroperators) LegacyEVMChains() evm.LegacyChainContainer {
+ return f.EVMChains
}
-// LegacyCosmosChains provides a mock function with given fields:
-func (_m *RelayerChainInteroperators) LegacyCosmosChains() cosmos.LegacyChainContainer {
- ret := _m.Called()
-
- var r0 cosmos.LegacyChainContainer
- if rf, ok := ret.Get(0).(func() cosmos.LegacyChainContainer); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(cosmos.LegacyChainContainer)
- }
- }
-
- return r0
+func (f *FakeRelayerChainInteroperators) NodeStatuses(ctx context.Context, offset, limit int, relayIDs ...relay.ID) (nodes []types.NodeStatus, count int, err error) {
+ return slices.Clone(f.Nodes), len(f.Nodes), f.NodesErr
}
-// LegacyEVMChains provides a mock function with given fields:
-func (_m *RelayerChainInteroperators) LegacyEVMChains() evm.LegacyChainContainer {
- ret := _m.Called()
-
- var r0 evm.LegacyChainContainer
- if rf, ok := ret.Get(0).(func() evm.LegacyChainContainer); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(evm.LegacyChainContainer)
- }
- }
-
- return r0
+func (f *FakeRelayerChainInteroperators) Services() []services2.ServiceCtx {
+ panic("unimplemented")
}
-// List provides a mock function with given fields: filter
-func (_m *RelayerChainInteroperators) List(filter chainlink.FilterFn) chainlink.RelayerChainInteroperators {
- ret := _m.Called(filter)
-
- var r0 chainlink.RelayerChainInteroperators
- if rf, ok := ret.Get(0).(func(chainlink.FilterFn) chainlink.RelayerChainInteroperators); ok {
- r0 = rf(filter)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(chainlink.RelayerChainInteroperators)
- }
- }
-
- return r0
+func (f *FakeRelayerChainInteroperators) List(filter chainlink.FilterFn) chainlink.RelayerChainInteroperators {
+ panic("unimplemented")
}
-// NodeStatuses provides a mock function with given fields: ctx, offset, limit, relayIDs
-func (_m *RelayerChainInteroperators) NodeStatuses(ctx context.Context, offset int, limit int, relayIDs ...relay.ID) ([]types.NodeStatus, int, error) {
- _va := make([]interface{}, len(relayIDs))
- for _i := range relayIDs {
- _va[_i] = relayIDs[_i]
- }
- var _ca []interface{}
- _ca = append(_ca, ctx, offset, limit)
- _ca = append(_ca, _va...)
- ret := _m.Called(_ca...)
-
- var r0 []types.NodeStatus
- var r1 int
- var r2 error
- if rf, ok := ret.Get(0).(func(context.Context, int, int, ...relay.ID) ([]types.NodeStatus, int, error)); ok {
- return rf(ctx, offset, limit, relayIDs...)
- }
- if rf, ok := ret.Get(0).(func(context.Context, int, int, ...relay.ID) []types.NodeStatus); ok {
- r0 = rf(ctx, offset, limit, relayIDs...)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]types.NodeStatus)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, int, int, ...relay.ID) int); ok {
- r1 = rf(ctx, offset, limit, relayIDs...)
- } else {
- r1 = ret.Get(1).(int)
- }
-
- if rf, ok := ret.Get(2).(func(context.Context, int, int, ...relay.ID) error); ok {
- r2 = rf(ctx, offset, limit, relayIDs...)
- } else {
- r2 = ret.Error(2)
- }
-
- return r0, r1, r2
+func (f *FakeRelayerChainInteroperators) Get(id relay.ID) (loop.Relayer, error) {
+ panic("unimplemented")
}
-// Services provides a mock function with given fields:
-func (_m *RelayerChainInteroperators) Services() []services.ServiceCtx {
- ret := _m.Called()
-
- var r0 []services.ServiceCtx
- if rf, ok := ret.Get(0).(func() []services.ServiceCtx); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]services.ServiceCtx)
- }
- }
-
- return r0
+func (f *FakeRelayerChainInteroperators) Slice() []loop.Relayer {
+ panic("unimplemented")
}
-// Slice provides a mock function with given fields:
-func (_m *RelayerChainInteroperators) Slice() []internal.Relayer {
- ret := _m.Called()
-
- var r0 []internal.Relayer
- if rf, ok := ret.Get(0).(func() []internal.Relayer); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]internal.Relayer)
- }
- }
-
- return r0
+func (f *FakeRelayerChainInteroperators) LegacyCosmosChains() chainlink.LegacyCosmosContainer {
+ panic("unimplemented")
}
-type mockConstructorTestingTNewRelayerChainInteroperators interface {
- mock.TestingT
- Cleanup(func())
+func (f *FakeRelayerChainInteroperators) ChainStatus(ctx context.Context, id relay.ID) (types.ChainStatus, error) {
+ panic("unimplemented")
}
-// NewRelayerChainInteroperators creates a new instance of RelayerChainInteroperators. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-func NewRelayerChainInteroperators(t mockConstructorTestingTNewRelayerChainInteroperators) *RelayerChainInteroperators {
- mock := &RelayerChainInteroperators{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
+func (f *FakeRelayerChainInteroperators) ChainStatuses(ctx context.Context, offset, limit int) ([]types.ChainStatus, int, error) {
+ panic("unimplemented")
}
diff --git a/core/services/chainlink/relayer_chain_interoperators.go b/core/services/chainlink/relayer_chain_interoperators.go
index e039afbfc91..b2ec0822d44 100644
--- a/core/services/chainlink/relayer_chain_interoperators.go
+++ b/core/services/chainlink/relayer_chain_interoperators.go
@@ -7,11 +7,12 @@ import (
"sort"
"sync"
+ "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos"
+ "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/adapters"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
"github.com/smartcontractkit/chainlink/v2/core/chains"
- "github.com/smartcontractkit/chainlink/v2/core/chains/cosmos"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/services"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2"
@@ -24,10 +25,6 @@ var ErrNoSuchRelayer = errors.New("relayer does not exist")
// encapsulates relayers and chains and is the primary entry point for
// the node to access relayers, get legacy chains associated to a relayer
// and get status about the chains and nodes
-//
-// note the generated mockery code incorrectly resolves dependencies and needs to be manually edited
-// therefore this interface is not auto-generated. for reference use and edit the result:
-// `go:generate mockery --quiet --name RelayerChainInteroperators --output ./mocks/ --case=underscore“`
type RelayerChainInteroperators interface {
Services() []services.ServiceCtx
@@ -50,7 +47,7 @@ type LoopRelayerStorer interface {
// on the relayer interface.
type LegacyChainer interface {
LegacyEVMChains() evm.LegacyChainContainer
- LegacyCosmosChains() cosmos.LegacyChainContainer
+ LegacyCosmosChains() LegacyCosmosContainer
}
type ChainStatuser interface {
@@ -135,7 +132,7 @@ func InitCosmos(ctx context.Context, factory RelayerFactory, config CosmosFactor
op.loopRelayers[id] = a
legacyMap[id.ChainID] = a.Chain()
}
- op.legacyChains.CosmosChains = cosmos.NewLegacyChains(legacyMap)
+ op.legacyChains.CosmosChains = NewLegacyCosmos(legacyMap)
return nil
}
@@ -196,7 +193,7 @@ func (rs *CoreRelayerChainInteroperators) LegacyEVMChains() evm.LegacyChainConta
// LegacyCosmosChains returns a container with all the cosmos chains
// TODO BCF-2511
-func (rs *CoreRelayerChainInteroperators) LegacyCosmosChains() cosmos.LegacyChainContainer {
+func (rs *CoreRelayerChainInteroperators) LegacyCosmosChains() LegacyCosmosContainer {
rs.mu.Lock()
defer rs.mu.Unlock()
return rs.legacyChains.CosmosChains
@@ -355,5 +352,44 @@ func (rs *CoreRelayerChainInteroperators) Services() (s []services.ServiceCtx) {
// deprecated when chain-specific logic is removed from products.
type legacyChains struct {
EVMChains evm.LegacyChainContainer
- CosmosChains cosmos.LegacyChainContainer
+ CosmosChains LegacyCosmosContainer
}
+
+// LegacyCosmosContainer is container interface for Cosmos chains
+type LegacyCosmosContainer interface {
+ Get(id string) (adapters.Chain, error)
+ Len() int
+ List(ids ...string) ([]adapters.Chain, error)
+ Slice() []adapters.Chain
+}
+
+type LegacyCosmos = chains.ChainsKV[adapters.Chain]
+
+var _ LegacyCosmosContainer = &LegacyCosmos{}
+
+func NewLegacyCosmos(m map[string]adapters.Chain) *LegacyCosmos {
+ return chains.NewChainsKV[adapters.Chain](m)
+}
+
+type CosmosLoopRelayerChainer interface {
+ loop.Relayer
+ Chain() adapters.Chain
+}
+
+type CosmosLoopRelayerChain struct {
+ loop.Relayer
+ chain adapters.Chain
+}
+
+func NewCosmosLoopRelayerChain(r *cosmos.Relayer, s adapters.Chain) *CosmosLoopRelayerChain {
+ ra := relay.NewServerAdapter(r, s)
+ return &CosmosLoopRelayerChain{
+ Relayer: ra,
+ chain: s,
+ }
+}
+func (r *CosmosLoopRelayerChain) Chain() adapters.Chain {
+ return r.chain
+}
+
+var _ CosmosLoopRelayerChainer = &CosmosLoopRelayerChain{}
diff --git a/core/services/chainlink/relayer_chain_interoperators_test.go b/core/services/chainlink/relayer_chain_interoperators_test.go
index cfc7dbadc18..87293069646 100644
--- a/core/services/chainlink/relayer_chain_interoperators_test.go
+++ b/core/services/chainlink/relayer_chain_interoperators_test.go
@@ -257,11 +257,10 @@ func TestCoreRelayerChainInteroperators(t *testing.T) {
name: "2 cosmos chains with 2 nodes",
initFuncs: []chainlink.CoreRelayerChainInitFunc{
chainlink.InitCosmos(testctx, factory, chainlink.CosmosFactoryConfig{
- Keystore: keyStore.Cosmos(),
- TOMLConfigs: cfg.CosmosConfigs(),
- EventBroadcaster: pg.NewNullEventBroadcaster(),
- DB: db,
- QConfig: cfg.Database()}),
+ Keystore: keyStore.Cosmos(),
+ TOMLConfigs: cfg.CosmosConfigs(),
+ DB: db,
+ QConfig: cfg.Database()}),
},
expectedCosmosChainCnt: 2,
expectedCosmosNodeCnt: 2,
@@ -290,11 +289,10 @@ func TestCoreRelayerChainInteroperators(t *testing.T) {
Keystore: keyStore.StarkNet(),
TOMLConfigs: cfg.StarknetConfigs()}),
chainlink.InitCosmos(testctx, factory, chainlink.CosmosFactoryConfig{
- Keystore: keyStore.Cosmos(),
- TOMLConfigs: cfg.CosmosConfigs(),
- EventBroadcaster: pg.NewNullEventBroadcaster(),
- DB: db,
- QConfig: cfg.Database(),
+ Keystore: keyStore.Cosmos(),
+ TOMLConfigs: cfg.CosmosConfigs(),
+ DB: db,
+ QConfig: cfg.Database(),
}),
},
expectedEVMChainCnt: 2,
diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go
index a159ee7cd06..d452decda1e 100644
--- a/core/services/chainlink/relayer_factory.go
+++ b/core/services/chainlink/relayer_factory.go
@@ -7,9 +7,9 @@ import (
"github.com/pelletier/go-toml/v2"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
- pkgcosmos "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos"
+ "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos"
coscfg "github.com/smartcontractkit/chainlink-cosmos/pkg/cosmos/config"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
"github.com/smartcontractkit/chainlink-solana/pkg/solana"
@@ -17,7 +17,7 @@ import (
pkgstarknet "github.com/smartcontractkit/chainlink-starknet/relayer/pkg/chainlink"
starkchain "github.com/smartcontractkit/chainlink-starknet/relayer/pkg/chainlink/chain"
"github.com/smartcontractkit/chainlink-starknet/relayer/pkg/chainlink/config"
- "github.com/smartcontractkit/chainlink/v2/core/chains/cosmos"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -225,7 +225,6 @@ func (r *RelayerFactory) NewStarkNet(ks keystore.StarkNet, chainCfgs config.TOML
type CosmosFactoryConfig struct {
Keystore keystore.Cosmos
coscfg.TOMLConfigs
- EventBroadcaster pg.EventBroadcaster
*sqlx.DB
pg.QConfig
}
@@ -238,9 +237,6 @@ func (c CosmosFactoryConfig) Validate() error {
if len(c.TOMLConfigs) == 0 {
err = errors.Join(err, fmt.Errorf("no CosmosConfigs provided"))
}
- if c.EventBroadcaster == nil {
- err = errors.Join(err, fmt.Errorf("nil EventBroadcaster"))
- }
if c.DB == nil {
err = errors.Join(err, fmt.Errorf("nil DB"))
}
@@ -254,12 +250,12 @@ func (c CosmosFactoryConfig) Validate() error {
return err
}
-func (r *RelayerFactory) NewCosmos(ctx context.Context, config CosmosFactoryConfig) (map[relay.ID]cosmos.LoopRelayerChainer, error) {
+func (r *RelayerFactory) NewCosmos(ctx context.Context, config CosmosFactoryConfig) (map[relay.ID]CosmosLoopRelayerChainer, error) {
err := config.Validate()
if err != nil {
return nil, fmt.Errorf("cannot create Cosmos relayer: %w", err)
}
- relayers := make(map[relay.ID]cosmos.LoopRelayerChainer)
+ relayers := make(map[relay.ID]CosmosLoopRelayerChainer)
var (
cosmosLggr = r.Logger.Named("Cosmos")
@@ -273,11 +269,9 @@ func (r *RelayerFactory) NewCosmos(ctx context.Context, config CosmosFactoryConf
lggr := cosmosLggr.Named(relayID.ChainID)
opts := cosmos.ChainOpts{
- QueryConfig: config.QConfig,
- Logger: lggr,
- DB: config.DB,
- KeyStore: loopKs,
- EventBroadcaster: config.EventBroadcaster,
+ Logger: lggr,
+ DB: config.DB,
+ KeyStore: loopKs,
}
chain, err := cosmos.NewChain(chainCfg, opts)
@@ -285,7 +279,7 @@ func (r *RelayerFactory) NewCosmos(ctx context.Context, config CosmosFactoryConf
return nil, fmt.Errorf("failed to load Cosmos chain %q: %w", relayID, err)
}
- relayers[relayID] = cosmos.NewLoopRelayerChain(pkgcosmos.NewRelayer(lggr, chain), chain)
+ relayers[relayID] = NewCosmosLoopRelayerChain(cosmos.NewRelayer(lggr, chain), chain)
}
return relayers, nil
diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml
index 48d432138a8..f5d775fe744 100644
--- a/core/services/chainlink/testdata/config-empty-effective.toml
+++ b/core/services/chainlink/testdata/config-empty-effective.toml
@@ -61,6 +61,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -73,6 +74,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml
index 1534a411dc1..5ede10ef695 100644
--- a/core/services/chainlink/testdata/config-full.toml
+++ b/core/services/chainlink/testdata/config-full.toml
@@ -67,6 +67,7 @@ MaxAgeDays = 17
MaxBackups = 9
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = '*'
BridgeResponseURL = 'https://bridge.response'
BridgeCacheTTL = '10s'
@@ -79,6 +80,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '192.158.1.37'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = 'dc=custom,dc=example,dc=com'
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = 'organizationalStatus'
+ActiveAttributeAllowedValue = 'ACTIVE'
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = 'test-rpid'
RPOrigin = 'test-rp-origin'
@@ -257,7 +277,7 @@ ResendAfterThreshold = '1h0m0s'
Enabled = true
[EVM.GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '9.223372036854775807 ether'
PriceMax = '281.474976710655 micro'
PriceMin = '13 wei'
diff --git a/core/services/chainlink/testdata/config-invalid.toml b/core/services/chainlink/testdata/config-invalid.toml
index 3b7e89299f6..4d8c9bc29a9 100644
--- a/core/services/chainlink/testdata/config-invalid.toml
+++ b/core/services/chainlink/testdata/config-invalid.toml
@@ -2,6 +2,28 @@
LeaseRefreshInterval='6s'
LeaseDuration='10s'
+[WebServer]
+AuthenticationMethod = 'ldap'
+
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = ''
+BaseDN = ''
+UsersDN = ''
+GroupsDN = ''
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = ''
+EditUserGroupCN = ''
+RunUserGroupCN = ''
+ReadUserGroupCN = ''
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[[EVM]]
ChainID = '1'
Transactions.MaxInFlight= 10
diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml
index 1dcbfe3a830..9dd0be8f5d2 100644
--- a/core/services/chainlink/testdata/config-multi-chain-effective.toml
+++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml
@@ -61,6 +61,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -73,6 +74,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml
new file mode 100644
index 00000000000..f73efcff0cc
--- /dev/null
+++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml
@@ -0,0 +1,4 @@
+[WebServer.LDAP]
+ServerAddress = 'ldaps://127.0.0.1'
+ReadOnlyUserLogin = 'viewer@example.com'
+ReadOnlyUserPass = 'password'
\ No newline at end of file
diff --git a/core/services/chainlink/testdata/secrets-full-redacted.toml b/core/services/chainlink/testdata/secrets-full-redacted.toml
index 740c3250edb..9d91d79cb51 100644
--- a/core/services/chainlink/testdata/secrets-full-redacted.toml
+++ b/core/services/chainlink/testdata/secrets-full-redacted.toml
@@ -7,6 +7,12 @@ AllowSimplePasswords = false
Keystore = 'xxxxx'
VRF = 'xxxxx'
+[WebServer]
+[WebServer.LDAP]
+ServerAddress = 'xxxxx'
+ReadOnlyUserLogin = 'xxxxx'
+ReadOnlyUserPass = 'xxxxx'
+
[Pyroscope]
AuthToken = 'xxxxx'
diff --git a/core/services/chainlink/testdata/secrets-full.toml b/core/services/chainlink/testdata/secrets-full.toml
index 37e5dafc7d7..37a3e2e7dc2 100644
--- a/core/services/chainlink/testdata/secrets-full.toml
+++ b/core/services/chainlink/testdata/secrets-full.toml
@@ -6,6 +6,12 @@ BackupURL = "postgresql://user:pass@localhost:5432/backupdbname?sslmode=disable"
Keystore = "keystore_pass"
VRF = "VRF_pass"
+[WebServer]
+[WebServer.LDAP]
+ServerAddress = 'ldaps://127.0.0.1'
+ReadOnlyUserLogin = 'viewer@example.com'
+ReadOnlyUserPass = 'password'
+
[Pyroscope]
AuthToken = "pyroscope-token"
diff --git a/core/services/cron/cron_test.go b/core/services/cron/cron_test.go
index 19a51a30650..b561248eddb 100644
--- a/core/services/cron/cron_test.go
+++ b/core/services/cron/cron_test.go
@@ -12,14 +12,12 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/cron"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
pipelinemocks "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/mocks"
- evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
)
func TestCronV2Pipeline(t *testing.T) {
@@ -28,12 +26,10 @@ func TestCronV2Pipeline(t *testing.T) {
db := pgtest.NewSqlxDB(t)
keyStore := cltest.NewKeyStore(t, db, cfg.Database())
- relayerExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, Client: evmtest.NewEthClientMockWithDefaultChain(t), KeyStore: keyStore.Eth()})
lggr := logger.TestLogger(t)
orm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
btORM := bridges.NewORM(db, lggr, cfg.Database())
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayerExtenders)
- jobORM := job.NewORM(db, legacyChains, orm, btORM, keyStore, lggr, cfg.Database())
+ jobORM := job.NewORM(db, orm, btORM, keyStore, lggr, cfg.Database())
jb := &job.Job{
Type: job.Cron,
diff --git a/core/services/directrequest/delegate.go b/core/services/directrequest/delegate.go
index 174dca062aa..920f94b4d60 100644
--- a/core/services/directrequest/delegate.go
+++ b/core/services/directrequest/delegate.go
@@ -77,7 +77,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
if err != nil {
return nil, err
}
- concreteSpec := job.LoadEnvConfigVarsDR(chain.Config().EVM(), *jb.DirectRequestSpec)
+ concreteSpec := job.SetDRMinIncomingConfirmations(chain.Config().EVM().MinIncomingConfirmations(), *jb.DirectRequestSpec)
oracle, err := operator_wrapper.NewOperator(concreteSpec.ContractAddress.Address(), chain.Client())
if err != nil {
diff --git a/core/services/directrequest/delegate_test.go b/core/services/directrequest/delegate_test.go
index e58dbaeb50c..34c79a0afbb 100644
--- a/core/services/directrequest/delegate_test.go
+++ b/core/services/directrequest/delegate_test.go
@@ -88,8 +88,8 @@ func NewDirectRequestUniverseWithConfig(t *testing.T, cfg chainlink.GeneralConfi
lggr := logger.TestLogger(t)
orm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
btORM := bridges.NewORM(db, lggr, cfg.Database())
+ jobORM := job.NewORM(db, orm, btORM, keyStore, lggr, cfg.Database())
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := job.NewORM(db, legacyChains, orm, btORM, keyStore, lggr, cfg.Database())
delegate := directrequest.NewDelegate(lggr, runner, orm, legacyChains, mailMon)
jb := cltest.MakeDirectRequestJobSpec(t)
diff --git a/core/services/feeds/orm.go b/core/services/feeds/orm.go
index 30b6ad632a6..24ed7b8b369 100644
--- a/core/services/feeds/orm.go
+++ b/core/services/feeds/orm.go
@@ -9,7 +9,7 @@ import (
"github.com/lib/pq"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/feeds/orm_test.go b/core/services/feeds/orm_test.go
index 746956bbfcd..02b9e24739c 100644
--- a/core/services/feeds/orm_test.go
+++ b/core/services/feeds/orm_test.go
@@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
@@ -1656,8 +1656,7 @@ func createJob(t *testing.T, db *sqlx.DB, externalJobID uuid.UUID) *job.Job {
bridgeORM = bridges.NewORM(db, lggr, config.Database())
relayExtenders = evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
)
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := job.NewORM(db, legacyChains, pipelineORM, bridgeORM, keyStore, lggr, config.Database())
+ orm := job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, config.Database())
require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey))
require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey))
@@ -1667,6 +1666,7 @@ func createJob(t *testing.T, db *sqlx.DB, externalJobID uuid.UUID) *job.Job {
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
jb, err := ocr.ValidatedOracleSpecToml(legacyChains,
testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{
JobID: externalJobID.String(),
diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go
index 20919606faf..f6e8952d6b1 100644
--- a/core/services/feeds/service.go
+++ b/core/services/feeds/service.go
@@ -15,9 +15,10 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/logger"
pb "github.com/smartcontractkit/chainlink/v2/core/services/feeds/proto"
diff --git a/core/services/fluxmonitorv2/delegate.go b/core/services/fluxmonitorv2/delegate.go
index d380122f715..e63f3556726 100644
--- a/core/services/fluxmonitorv2/delegate.go
+++ b/core/services/fluxmonitorv2/delegate.go
@@ -3,7 +3,7 @@ package fluxmonitorv2
import (
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go
index 99d33c42399..5dbeaeafc31 100644
--- a/core/services/fluxmonitorv2/flux_monitor.go
+++ b/core/services/fluxmonitorv2/flux_monitor.go
@@ -13,9 +13,10 @@ import (
"github.com/pkg/errors"
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/bridges"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go
index 0d1eb085a84..e81e1ba9e63 100644
--- a/core/services/fluxmonitorv2/flux_monitor_test.go
+++ b/core/services/fluxmonitorv2/flux_monitor_test.go
@@ -3,7 +3,6 @@ package fluxmonitorv2_test
import (
"fmt"
"math/big"
- "strings"
"testing"
"time"
@@ -19,14 +18,13 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
logmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/log/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
- "github.com/smartcontractkit/chainlink/v2/core/cmd"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/flux_aggregator_wrapper"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
@@ -284,8 +282,8 @@ func setupStoreWithKey(t *testing.T) (*sqlx.DB, common.Address) {
}
// setupStoreWithKey setups a new store and adds a key to the keystore
-func setupFullDBWithKey(t *testing.T, name string) (*sqlx.DB, common.Address) {
- cfg, db := heavyweight.FullTestDBV2(t, name, nil)
+func setupFullDBWithKey(t *testing.T) (*sqlx.DB, common.Address) {
+ cfg, db := heavyweight.FullTestDBV2(t, nil)
ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth()
_, nodeAddr := cltest.MustInsertRandomKey(t, ethKeyStore)
@@ -906,18 +904,8 @@ func TestFluxMonitor_HibernationTickerFiresMultipleTimes(t *testing.T) {
g.Eventually(func() int { return len(pollOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(3))
}
-// chainlink_test_TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped
-// 63 bytes is max and chainlink_test_ takes up 15, plus 4 for a random hex suffix.
-func dbName(s string) string {
- diff := len(cmd.TestDBNamePrefix) + len("_FFF")
- if len(s) <= diff {
- return strings.ReplaceAll(strings.ToLower(s), "/", "")
- }
- return strings.ReplaceAll(strings.ToLower(s[len(s)-diff:]), "/", "")
-}
-
func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) {
- db, nodeAddr := setupFullDBWithKey(t, "hibernation")
+ db, nodeAddr := setupFullDBWithKey(t)
oracles := []common.Address{nodeAddr, testutils.NewAddress()}
const (
diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go
index 2c45ed5ad89..38c73d3ad74 100644
--- a/core/services/fluxmonitorv2/integrations_test.go
+++ b/core/services/fluxmonitorv2/integrations_test.go
@@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
@@ -206,7 +206,7 @@ func startApplication(
fa fluxAggregatorUniverse,
overrides func(c *chainlink.Config, s *chainlink.Secrets),
) *cltest.TestApplication {
- config, _ := heavyweight.FullTestDBV2(t, dbName(t.Name()), overrides)
+ config, _ := heavyweight.FullTestDBV2(t, overrides)
app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, fa.backend, fa.key)
require.NoError(t, app.Start(testutils.Context(t)))
return app
diff --git a/core/services/fluxmonitorv2/orm.go b/core/services/fluxmonitorv2/orm.go
index 61395e8708a..f85ab146c7e 100644
--- a/core/services/fluxmonitorv2/orm.go
+++ b/core/services/fluxmonitorv2/orm.go
@@ -7,7 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
diff --git a/core/services/fluxmonitorv2/orm_test.go b/core/services/fluxmonitorv2/orm_test.go
index 0bb08032617..6e06a1e65b8 100644
--- a/core/services/fluxmonitorv2/orm_test.go
+++ b/core/services/fluxmonitorv2/orm_test.go
@@ -17,13 +17,11 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/fluxmonitorv2"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -96,11 +94,9 @@ func TestORM_UpdateFluxMonitorRoundStats(t *testing.T) {
pipelineORM := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
bridgeORM := bridges.NewORM(db, lggr, cfg.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{GeneralConfig: cfg, DB: db, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
// Instantiate a real job ORM because we need to create a job to satisfy
// a check in pipeline.CreateRun
- jobORM := job.NewORM(db, legacyChains, pipelineORM, bridgeORM, keyStore, lggr, cfg.Database())
+ jobORM := job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, cfg.Database())
orm := newORM(t, db, cfg.Database(), nil)
address := testutils.NewAddress()
diff --git a/core/services/functions/connector_handler.go b/core/services/functions/connector_handler.go
index a018157a373..8a8710e6ea6 100644
--- a/core/services/functions/connector_handler.go
+++ b/core/services/functions/connector_handler.go
@@ -78,6 +78,13 @@ func (h *functionsConnectorHandler) HandleGatewayMessage(ctx context.Context, ga
}
if balance, err := h.subscriptions.GetMaxUserBalance(fromAddr); err != nil || balance.Cmp(h.minimumBalance.ToInt()) < 0 {
h.lggr.Errorw("user subscription has insufficient balance", "id", gatewayId, "address", fromAddr, "balance", balance, "minBalance", h.minimumBalance)
+ response := functions.SecretsResponseBase{
+ Success: false,
+ ErrorMessage: "user subscription has insufficient balance",
+ }
+ if err := h.sendResponse(ctx, gatewayId, body, response); err != nil {
+ h.lggr.Errorw("failed to send response to gateway", "id", gatewayId, "error", err)
+ }
return
}
diff --git a/core/services/functions/connector_handler_test.go b/core/services/functions/connector_handler_test.go
index bb3e2acbabd..7bf98d7501d 100644
--- a/core/services/functions/connector_handler_test.go
+++ b/core/services/functions/connector_handler_test.go
@@ -39,7 +39,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
allowlist.On("Close", mock.Anything).Return(nil)
subscriptions.On("Start", mock.Anything).Return(nil)
subscriptions.On("Close", mock.Anything).Return(nil)
- handler, err := functions.NewFunctionsConnectorHandler(addr.Hex(), privateKey, storage, allowlist, rateLimiter, subscriptions, *assets.NewLinkFromJuels(0), logger)
+ handler, err := functions.NewFunctionsConnectorHandler(addr.Hex(), privateKey, storage, allowlist, rateLimiter, subscriptions, *assets.NewLinkFromJuels(100), logger)
require.NoError(t, err)
handler.SetConnector(connector)
@@ -78,7 +78,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
}
storage.On("List", ctx, addr).Return(snapshot, nil).Once()
allowlist.On("Allow", addr).Return(true).Once()
- subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil)
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -91,6 +91,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
t.Run("orm error", func(t *testing.T) {
storage.On("List", ctx, addr).Return(nil, errors.New("boom")).Once()
allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -135,7 +136,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
storage.On("Put", ctx, &key, &record, signature).Return(nil).Once()
allowlist.On("Allow", addr).Return(true).Once()
- subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil)
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -148,6 +149,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
t.Run("orm error", func(t *testing.T) {
storage.On("Put", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("boom")).Once()
allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -163,6 +165,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
require.NoError(t, msg.Sign(privateKey))
storage.On("Put", ctx, mock.Anything, mock.Anything, mock.Anything).Return(s4.ErrWrongSignature).Once()
allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -177,6 +180,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
msg.Body.Payload = json.RawMessage(`{sdfgdfgoscsicosd:sdf:::sdf ::; xx}`)
require.NoError(t, msg.Sign(privateKey))
allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
msg, ok := args[2].(*api.Message)
require.True(t, ok)
@@ -186,6 +190,19 @@ func TestFunctionsConnectorHandler(t *testing.T) {
handler.HandleGatewayMessage(ctx, "gw1", &msg)
})
+
+ t.Run("insufficient balance", func(t *testing.T) {
+ allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(0), nil).Once()
+ connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) {
+ msg, ok := args[2].(*api.Message)
+ require.True(t, ok)
+ require.Equal(t, `{"success":false,"error_message":"user subscription has insufficient balance"}`, string(msg.Body.Payload))
+
+ }).Return(nil).Once()
+
+ handler.HandleGatewayMessage(ctx, "gw1", &msg)
+ })
})
t.Run("unsupported method", func(t *testing.T) {
@@ -201,6 +218,7 @@ func TestFunctionsConnectorHandler(t *testing.T) {
require.NoError(t, msg.Sign(privateKey))
allowlist.On("Allow", addr).Return(true).Once()
+ subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once()
handler.HandleGatewayMessage(testutils.Context(t), "gw1", &msg)
})
})
diff --git a/core/services/functions/listener_test.go b/core/services/functions/listener_test.go
index 007a2a91688..3b7ed46988d 100644
--- a/core/services/functions/listener_test.go
+++ b/core/services/functions/listener_test.go
@@ -125,7 +125,7 @@ func NewFunctionsListenerUniverse(t *testing.T, timeoutSec int, pruneFrequencySe
ingressClient := sync_mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monEndpoint := ingressAgent.GenMonitoringEndpoint(contractAddress, synchronization.FunctionsRequests, "test-network", "test-chainID")
+ monEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", contractAddress, synchronization.FunctionsRequests)
s4Storage := s4_mocks.NewStorage(t)
client := chain.Client()
diff --git a/core/services/functions/orm.go b/core/services/functions/orm.go
index b6f692019a1..7838c700858 100644
--- a/core/services/functions/orm.go
+++ b/core/services/functions/orm.go
@@ -7,7 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/gateway/handlers/functions/handler.functions.go b/core/services/gateway/handlers/functions/handler.functions.go
index 01f450a4ea0..d0011145d40 100644
--- a/core/services/gateway/handlers/functions/handler.functions.go
+++ b/core/services/gateway/handlers/functions/handler.functions.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/big"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -178,8 +179,15 @@ func (h *functionsHandler) HandleUserMessage(ctx context.Context, msg *api.Messa
return ErrRateLimited
}
if h.subscriptions != nil && h.minimumBalance != nil {
- if balance, err := h.subscriptions.GetMaxUserBalance(sender); err != nil || balance.Cmp(h.minimumBalance.ToInt()) < 0 {
- h.lggr.Debug("received a message from a user having insufficient balance", "sender", msg.Body.Sender, "balance", balance.String())
+ balance, err := h.subscriptions.GetMaxUserBalance(sender)
+ if err != nil {
+ h.lggr.Debugw("error getting max user balance", "sender", msg.Body.Sender, "err", err)
+ }
+ if balance == nil {
+ balance = big.NewInt(0)
+ }
+ if err != nil || balance.Cmp(h.minimumBalance.ToInt()) < 0 {
+ h.lggr.Debugw("received a message from a user having insufficient balance", "sender", msg.Body.Sender, "balance", balance.String())
return fmt.Errorf("sender has insufficient balance: %v juels", balance.String())
}
}
diff --git a/core/services/gateway/handlers/functions/subscriptions.go b/core/services/gateway/handlers/functions/subscriptions.go
index 79233b1031a..c7a6519e693 100644
--- a/core/services/gateway/handlers/functions/subscriptions.go
+++ b/core/services/gateway/handlers/functions/subscriptions.go
@@ -130,19 +130,16 @@ func (s *onchainSubscriptions) queryLoop() {
blockNumber := big.NewInt(0).Sub(latestBlockHeight, s.blockConfirmations)
- updateLastKnownCount := func() {
+ if lastKnownCount == 0 || start > lastKnownCount {
count, err := s.getSubscriptionsCount(ctx, blockNumber)
if err != nil {
- s.lggr.Errorw("Error getting subscriptions count", "err", err)
- return
+ s.lggr.Errorw("Error getting new subscriptions count", "err", err)
+ } else {
+ s.lggr.Infow("Updated subscriptions count", "count", count, "blockNumber", blockNumber.Int64())
+ lastKnownCount = count
}
- s.lggr.Infow("Updated subscriptions count", "err", err, "count", count, "blockNumber", blockNumber.Int64())
- lastKnownCount = count
}
- if lastKnownCount == 0 {
- updateLastKnownCount()
- }
if lastKnownCount == 0 {
s.lggr.Info("Router has no subscriptions yet")
return
@@ -152,12 +149,9 @@ func (s *onchainSubscriptions) queryLoop() {
start = 1
}
- end := start + uint64(s.config.UpdateRangeSize)
+ end := start + uint64(s.config.UpdateRangeSize) - 1
if end > lastKnownCount {
- updateLastKnownCount()
- if end > lastKnownCount {
- end = lastKnownCount
- }
+ end = lastKnownCount
}
if err := s.querySubscriptionsRange(ctx, blockNumber, start, end); err != nil {
s.lggr.Errorw("Error querying subscriptions", "err", err, "start", start, "end", end)
@@ -180,6 +174,8 @@ func (s *onchainSubscriptions) queryLoop() {
}
func (s *onchainSubscriptions) querySubscriptionsRange(ctx context.Context, blockNumber *big.Int, start, end uint64) error {
+ s.lggr.Debugw("Querying subscriptions", "blockNumber", blockNumber, "start", start, "end", end)
+
subscriptions, err := s.router.GetSubscriptionsInRange(&bind.CallOpts{
Pending: false,
BlockNumber: blockNumber,
diff --git a/core/services/gateway/handlers/functions/subscriptions_test.go b/core/services/gateway/handlers/functions/subscriptions_test.go
index 1e46bff5c0f..adbf637ad73 100644
--- a/core/services/gateway/handlers/functions/subscriptions_test.go
+++ b/core/services/gateway/handlers/functions/subscriptions_test.go
@@ -2,6 +2,7 @@ package functions_test
import (
"math/big"
+ "sync/atomic"
"testing"
"time"
@@ -24,9 +25,7 @@ const (
invalidUser = "0x6E2dc0F9DB014aE19888F539E59285D2Ea04244C"
)
-func TestSubscriptions(t *testing.T) {
- t.Parallel()
-
+func TestSubscriptions_OnePass(t *testing.T) {
getSubscriptionCount := hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000003")
getSubscriptionsInRange := hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000109e6e1b12098cc8f3a1e9719a817ec53ab9b35c000000000000000000000000000000000000000000000000000034e23f515cb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f5340f0968ee8b7dfd97e3327a6139273cc2c4fa000000000000000000000000000000000000000000000001158e460913d000000000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bc14b92364c75e20000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000005439e5881a529f3ccbffc0e82d49f9db3950aefe")
@@ -46,7 +45,7 @@ func TestSubscriptions(t *testing.T) {
BlockConfirmations: 1,
UpdateFrequencySec: 1,
UpdateTimeoutSec: 1,
- UpdateRangeSize: 10,
+ UpdateRangeSize: 3,
}
subscriptions, err := functions.NewOnchainSubscriptions(client, config, logger.TestLogger(t))
require.NoError(t, err)
@@ -57,6 +56,7 @@ func TestSubscriptions(t *testing.T) {
assert.NoError(t, subscriptions.Close())
})
+ // initially we have 3 subs and range is 3, which needs one pass
gomega.NewGomegaWithT(t).Eventually(func() bool {
expectedBalance := big.NewInt(0).SetBytes(hexutil.MustDecode("0x01158e460913d00000"))
balance, err1 := subscriptions.GetMaxUserBalance(common.HexToAddress(validUser))
@@ -64,3 +64,47 @@ func TestSubscriptions(t *testing.T) {
return err1 == nil && err2 != nil && balance.Cmp(expectedBalance) == 0
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
}
+
+func TestSubscriptions_MultiPass(t *testing.T) {
+ const ncycles int32 = 5
+ var currentCycle atomic.Int32
+ getSubscriptionCount := hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000006")
+ getSubscriptionsInRange := hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000109e6e1b12098cc8f3a1e9719a817ec53ab9b35c000000000000000000000000000000000000000000000000000034e23f515cb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f5340f0968ee8b7dfd97e3327a6139273cc2c4fa000000000000000000000000000000000000000000000001158e460913d000000000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bc14b92364c75e20000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000005439e5881a529f3ccbffc0e82d49f9db3950aefe")
+
+ ctx := testutils.Context(t)
+ client := mocks.NewClient(t)
+ client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil)
+ client.On("CallContract", mock.Anything, ethereum.CallMsg{ // getSubscriptionCount
+ To: &common.Address{},
+ Data: hexutil.MustDecode("0x66419970"),
+ }, mock.Anything).Run(func(args mock.Arguments) {
+ currentCycle.Add(1)
+ }).Return(getSubscriptionCount, nil)
+ client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange(1,3)
+ To: &common.Address{},
+ Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000003"),
+ }, mock.Anything).Return(getSubscriptionsInRange, nil)
+ client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange(4,6)
+ To: &common.Address{},
+ Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006"),
+ }, mock.Anything).Return(getSubscriptionsInRange, nil)
+ config := functions.OnchainSubscriptionsConfig{
+ ContractAddress: common.Address{},
+ BlockConfirmations: 1,
+ UpdateFrequencySec: 1,
+ UpdateTimeoutSec: 1,
+ UpdateRangeSize: 3,
+ }
+ subscriptions, err := functions.NewOnchainSubscriptions(client, config, logger.TestLogger(t))
+ require.NoError(t, err)
+
+ err = subscriptions.Start(ctx)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ assert.NoError(t, subscriptions.Close())
+ })
+
+ gomega.NewGomegaWithT(t).Eventually(func() bool {
+ return currentCycle.Load() == ncycles
+ }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
+}
diff --git a/core/services/job/helpers_test.go b/core/services/job/helpers_test.go
index 167ed5297cc..4151ed401c8 100644
--- a/core/services/job/helpers_test.go
+++ b/core/services/job/helpers_test.go
@@ -15,7 +15,7 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go
index 74416e68dce..f4471e75c68 100644
--- a/core/services/job/job_orm_test.go
+++ b/core/services/job/job_orm_test.go
@@ -16,8 +16,6 @@ import (
"github.com/smartcontractkit/chainlink-relay/pkg/types"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay"
-
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
@@ -38,6 +36,7 @@ import (
ocr2validate "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon"
"github.com/smartcontractkit/chainlink/v2/core/services/webhook"
@@ -83,9 +82,7 @@ func TestORM(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: ethKeyStore})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
borm := bridges.NewORM(db, logger.TestLogger(t), config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
@@ -331,9 +328,7 @@ func TestORM_DeleteJob_DeletesAssociatedRecords(t *testing.T) {
lggr := logger.TestLogger(t)
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
scopedConfig := evmtest.NewChainScopedConfig(t, config)
korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database())
@@ -342,6 +337,8 @@ func TestORM_DeleteJob_DeletesAssociatedRecords(t *testing.T) {
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
+ relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
jb, err := ocr.ValidatedOracleSpecToml(legacyChains, testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{
TransmitterAddress: address.Hex(),
DS1BridgeName: bridge.Name.String(),
@@ -431,10 +428,8 @@ func TestORM_CreateJob_VRFV2(t *testing.T) {
lggr := logger.TestLogger(t)
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
fromAddresses := []string{cltest.NewEIP55Address().String(), cltest.NewEIP55Address().String()}
jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(
@@ -514,9 +509,7 @@ func TestORM_CreateJob_VRFV2Plus(t *testing.T) {
lggr := logger.TestLogger(t)
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- cc := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(cc)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
fromAddresses := []string{cltest.NewEIP55Address().String(), cltest.NewEIP55Address().String()}
jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(
@@ -599,9 +592,7 @@ func TestORM_CreateJob_OCRBootstrap(t *testing.T) {
lggr := logger.TestLogger(t)
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := ocrbootstrap.ValidatedBootstrapSpecToml(testspecs.GetOCRBootstrapSpec())
require.NoError(t, err)
@@ -628,9 +619,7 @@ func TestORM_CreateJob_EVMChainID_Validation(t *testing.T) {
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
t.Run("evm chain id validation for ocr works", func(t *testing.T) {
jb := job.Job{
@@ -725,9 +714,7 @@ func TestORM_CreateJob_OCR_DuplicatedContractAddress(t *testing.T) {
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
// defaultChainID is deprecated
defaultChainID := customChainID
@@ -745,6 +732,8 @@ func TestORM_CreateJob_OCR_DuplicatedContractAddress(t *testing.T) {
TransmitterAddress: address.Hex(),
JobID: externalJobID.UUID.String(),
})
+ relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
jb, err := ocr.ValidatedOracleSpecToml(legacyChains, spec.Toml())
require.NoError(t, err)
@@ -794,9 +783,7 @@ func TestORM_CreateJob_OCR2_DuplicatedContractAddress(t *testing.T) {
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
@@ -857,10 +844,7 @@ func TestORM_CreateJob_OCR2_Sending_Keys_Transmitter_Keys_Validations(t *testing
pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- require.True(t, relayExtenders.Len() > 0)
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal())
require.NoError(t, err)
@@ -974,14 +958,15 @@ func Test_FindJobs(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
+ relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
jb1, err := ocr.ValidatedOracleSpecToml(legacyChains,
testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{
JobID: uuid.New().String(),
@@ -1054,9 +1039,8 @@ func Test_FindJob(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
@@ -1065,6 +1049,8 @@ func Test_FindJob(t *testing.T) {
// Must uniquely name the OCR Specs to properly insert a new job in the job table.
externalJobID := uuid.New()
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
+ relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
job, err := ocr.ValidatedOracleSpecToml(legacyChains,
testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{
JobID: externalJobID.String(),
@@ -1232,9 +1218,7 @@ func Test_FindJobsByPipelineSpecIDs(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec())
require.NoError(t, err)
@@ -1263,20 +1247,7 @@ func Test_FindJobsByPipelineSpecIDs(t *testing.T) {
})
t.Run("with chainID disabled", func(t *testing.T) {
- newCfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.EVM[0] = &evmcfg.EVMConfig{
- ChainID: utils.NewBigI(0),
- Enabled: ptr(false),
- }
- c.EVM = append(c.EVM, &evmcfg.EVMConfig{
- ChainID: utils.NewBigI(123123123),
- Enabled: ptr(true),
- })
- })
- relayExtenders2 := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: newCfg, KeyStore: keyStore.Eth()})
- legacyChains2 := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders2)
-
- orm2 := NewTestORM(t, db, legacyChains2, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm2 := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jbs, err2 := orm2.FindJobsByPipelineSpecIDs([]int32{jb.PipelineSpecID})
require.NoError(t, err2)
@@ -1297,7 +1268,7 @@ func Test_FindPipelineRuns(t *testing.T) {
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
@@ -1358,7 +1329,7 @@ func Test_PipelineRunsByJobID(t *testing.T) {
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
@@ -1419,7 +1390,7 @@ func Test_FindPipelineRunIDsByJobID(t *testing.T) {
bridgesORM := bridges.NewORM(db, lggr, config.Database())
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, address := cltest.MustInsertRandomKey(t, keyStore.Eth())
@@ -1527,7 +1498,7 @@ func Test_FindPipelineRunsByIDs(t *testing.T) {
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
@@ -1583,9 +1554,7 @@ func Test_FindPipelineRunByID(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec())
require.NoError(t, err)
@@ -1628,9 +1597,7 @@ func Test_FindJobWithoutSpecErrors(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec())
require.NoError(t, err)
@@ -1667,9 +1634,7 @@ func Test_FindSpecErrorsByJobIDs(t *testing.T) {
pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec())
require.NoError(t, err)
@@ -1705,7 +1670,7 @@ func Test_CountPipelineRunsByJobID(t *testing.T) {
bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database())
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- orm := NewTestORM(t, db, legacyChains, pipelineORM, bridgesORM, keyStore, config.Database())
+ orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database())
_, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
_, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database())
diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go
index 1158fc46260..f1307753d29 100644
--- a/core/services/job/job_pipeline_orm_integration_test.go
+++ b/core/services/job/job_pipeline_orm_integration_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
@@ -156,7 +156,7 @@ func TestPipelineORM_Integration(t *testing.T) {
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
runner := pipeline.NewRunner(orm, btORM, config.JobPipeline(), cfg.WebServer(), legacyChains, nil, nil, lggr, nil, nil)
- jobORM := NewTestORM(t, db, legacyChains, orm, btORM, keyStore, cfg.Database())
+ jobORM := NewTestORM(t, db, orm, btORM, keyStore, cfg.Database())
dbSpec := makeVoterTurnoutOCRJobSpec(t, transmitterAddress, bridge.Name.String(), bridge2.Name.String())
diff --git a/core/services/job/models.go b/core/services/job/models.go
index a3dfce59996..a474040dd41 100644
--- a/core/services/job/models.go
+++ b/core/services/job/models.go
@@ -15,6 +15,7 @@ import (
"gopkg.in/guregu/null.v4"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
+
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
@@ -233,35 +234,25 @@ func (pr *PipelineRun) SetID(value string) error {
// OCROracleSpec defines the job spec for OCR jobs.
type OCROracleSpec struct {
- ID int32 `toml:"-"`
- ContractAddress ethkey.EIP55Address `toml:"contractAddress"`
- P2PBootstrapPeers pq.StringArray `toml:"p2pBootstrapPeers" db:"p2p_bootstrap_peers"`
- P2PV2Bootstrappers pq.StringArray `toml:"p2pv2Bootstrappers" db:"p2pv2_bootstrappers"`
- IsBootstrapPeer bool `toml:"isBootstrapPeer"`
- EncryptedOCRKeyBundleID *models.Sha256Hash `toml:"keyBundleID"`
- EncryptedOCRKeyBundleIDEnv bool
- TransmitterAddress *ethkey.EIP55Address `toml:"transmitterAddress"`
- TransmitterAddressEnv bool
- ObservationTimeout models.Interval `toml:"observationTimeout"`
- ObservationTimeoutEnv bool
- BlockchainTimeout models.Interval `toml:"blockchainTimeout"`
- BlockchainTimeoutEnv bool
- ContractConfigTrackerSubscribeInterval models.Interval `toml:"contractConfigTrackerSubscribeInterval"`
- ContractConfigTrackerSubscribeIntervalEnv bool
- ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"`
- ContractConfigTrackerPollIntervalEnv bool
- ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"`
- ContractConfigConfirmationsEnv bool
- EVMChainID *utils.Big `toml:"evmChainID" db:"evm_chain_id"`
- DatabaseTimeout *models.Interval `toml:"databaseTimeout"`
- DatabaseTimeoutEnv bool
- ObservationGracePeriod *models.Interval `toml:"observationGracePeriod"`
- ObservationGracePeriodEnv bool
- ContractTransmitterTransmitTimeout *models.Interval `toml:"contractTransmitterTransmitTimeout"`
- ContractTransmitterTransmitTimeoutEnv bool
- CaptureEATelemetry bool `toml:"captureEATelemetry"`
- CreatedAt time.Time `toml:"-"`
- UpdatedAt time.Time `toml:"-"`
+ ID int32 `toml:"-"`
+ ContractAddress ethkey.EIP55Address `toml:"contractAddress"`
+ P2PBootstrapPeers pq.StringArray `toml:"p2pBootstrapPeers" db:"p2p_bootstrap_peers"`
+ P2PV2Bootstrappers pq.StringArray `toml:"p2pv2Bootstrappers" db:"p2pv2_bootstrappers"`
+ IsBootstrapPeer bool `toml:"isBootstrapPeer"`
+ EncryptedOCRKeyBundleID *models.Sha256Hash `toml:"keyBundleID"`
+ TransmitterAddress *ethkey.EIP55Address `toml:"transmitterAddress"`
+ ObservationTimeout models.Interval `toml:"observationTimeout"`
+ BlockchainTimeout models.Interval `toml:"blockchainTimeout"`
+ ContractConfigTrackerSubscribeInterval models.Interval `toml:"contractConfigTrackerSubscribeInterval"`
+ ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"`
+ ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"`
+ EVMChainID *utils.Big `toml:"evmChainID" db:"evm_chain_id"`
+ DatabaseTimeout *models.Interval `toml:"databaseTimeout"`
+ ObservationGracePeriod *models.Interval `toml:"observationGracePeriod"`
+ ContractTransmitterTransmitTimeout *models.Interval `toml:"contractTransmitterTransmitTimeout"`
+ CaptureEATelemetry bool `toml:"captureEATelemetry"`
+ CreatedAt time.Time `toml:"-"`
+ UpdatedAt time.Time `toml:"-"`
}
// GetID is a getter function that returns the ID of the spec.
@@ -438,15 +429,14 @@ func (w *WebhookSpec) SetID(value string) error {
}
type DirectRequestSpec struct {
- ID int32 `toml:"-"`
- ContractAddress ethkey.EIP55Address `toml:"contractAddress"`
- MinIncomingConfirmations clnull.Uint32 `toml:"minIncomingConfirmations"`
- MinIncomingConfirmationsEnv bool `toml:"minIncomingConfirmationsEnv"`
- Requesters models.AddressCollection `toml:"requesters"`
- MinContractPayment *assets.Link `toml:"minContractPaymentLinkJuels"`
- EVMChainID *utils.Big `toml:"evmChainID"`
- CreatedAt time.Time `toml:"-"`
- UpdatedAt time.Time `toml:"-"`
+ ID int32 `toml:"-"`
+ ContractAddress ethkey.EIP55Address `toml:"contractAddress"`
+ MinIncomingConfirmations clnull.Uint32 `toml:"minIncomingConfirmations"`
+ Requesters models.AddressCollection `toml:"requesters"`
+ MinContractPayment *assets.Link `toml:"minContractPaymentLinkJuels"`
+ EVMChainID *utils.Big `toml:"evmChainID"`
+ CreatedAt time.Time `toml:"-"`
+ UpdatedAt time.Time `toml:"-"`
}
type CronSpec struct {
@@ -522,13 +512,11 @@ type VRFSpec struct {
CoordinatorAddress ethkey.EIP55Address `toml:"coordinatorAddress"`
PublicKey secp256k1.PublicKey `toml:"publicKey"`
MinIncomingConfirmations uint32 `toml:"minIncomingConfirmations"`
- ConfirmationsEnv bool `toml:"-"`
EVMChainID *utils.Big `toml:"evmChainID"`
FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"`
- PollPeriod time.Duration `toml:"pollPeriod"` // For v2 jobs
- PollPeriodEnv bool
- RequestedConfsDelay int64 `toml:"requestedConfsDelay"` // For v2 jobs. Optional, defaults to 0 if not provided.
- RequestTimeout time.Duration `toml:"requestTimeout"` // Optional, defaults to 24hr if not provided.
+ PollPeriod time.Duration `toml:"pollPeriod"` // For v2 jobs
+ RequestedConfsDelay int64 `toml:"requestedConfsDelay"` // For v2 jobs. Optional, defaults to 0 if not provided.
+ RequestTimeout time.Duration `toml:"requestTimeout"` // Optional, defaults to 24hr if not provided.
// GasLanePrice specifies the gas lane price for this VRF job.
// If the specified keys in FromAddresses do not have the provided gas price the job
diff --git a/core/services/job/orm.go b/core/services/job/orm.go
index cbdd7ebfae6..fb897bc9281 100644
--- a/core/services/job/orm.go
+++ b/core/services/job/orm.go
@@ -16,13 +16,11 @@ import (
"github.com/pkg/errors"
"go.uber.org/multierr"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
- "github.com/smartcontractkit/chainlink/v2/core/chains"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
"github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -85,35 +83,25 @@ type ORMConfig interface {
}
type orm struct {
- q pg.Q
- legacyChains evm.LegacyChainContainer
- keyStore keystore.Master
- pipelineORM pipeline.ORM
- lggr logger.SugaredLogger
- cfg pg.QConfig
- bridgeORM bridges.ORM
+ q pg.Q
+ keyStore keystore.Master
+ pipelineORM pipeline.ORM
+ lggr logger.SugaredLogger
+ cfg pg.QConfig
+ bridgeORM bridges.ORM
}
var _ ORM = (*orm)(nil)
-func NewORM(
- db *sqlx.DB,
- legacyChains evm.LegacyChainContainer,
- pipelineORM pipeline.ORM,
- bridgeORM bridges.ORM,
- keyStore keystore.Master, // needed to validation key properties on new job creation
- lggr logger.Logger,
- cfg pg.QConfig,
-) *orm {
+func NewORM(db *sqlx.DB, pipelineORM pipeline.ORM, bridgeORM bridges.ORM, keyStore keystore.Master, lggr logger.Logger, cfg pg.QConfig) *orm {
namedLogger := logger.Sugared(lggr.Named("JobORM"))
return &orm{
- q: pg.NewQ(db, namedLogger, cfg),
- legacyChains: legacyChains,
- keyStore: keyStore,
- pipelineORM: pipelineORM,
- bridgeORM: bridgeORM,
- lggr: namedLogger,
- cfg: cfg,
+ q: pg.NewQ(db, namedLogger, cfg),
+ keyStore: keyStore,
+ pipelineORM: pipelineORM,
+ bridgeORM: bridgeORM,
+ lggr: namedLogger,
+ cfg: cfg,
}
}
func (o *orm) Close() error {
@@ -704,62 +692,25 @@ func (o *orm) FindJobs(offset, limit int) (jobs []Job, count int, err error) {
if err != nil {
return err
}
- for i := range jobs {
- err = multierr.Combine(err, o.LoadEnvConfigVars(&jobs[i]))
- }
+
return nil
})
return jobs, int(count), err
}
-func (o *orm) LoadEnvConfigVars(jb *Job) error {
- if jb.OCROracleSpec != nil {
- ch, err := o.legacyChains.Get(jb.OCROracleSpec.EVMChainID.String())
- if err != nil {
- return err
- }
- newSpec, err := LoadEnvConfigVarsOCR(ch.Config().EVM().OCR(), ch.Config().OCR(), *jb.OCROracleSpec)
- if err != nil {
- return err
- }
- jb.OCROracleSpec = newSpec
- } else if jb.VRFSpec != nil {
- ch, err := o.legacyChains.Get(jb.VRFSpec.EVMChainID.String())
- if err != nil {
- return err
- }
- jb.VRFSpec = LoadEnvConfigVarsVRF(ch.Config().EVM(), *jb.VRFSpec)
- } else if jb.DirectRequestSpec != nil {
- ch, err := o.legacyChains.Get(jb.DirectRequestSpec.EVMChainID.String())
- if err != nil {
- return err
- }
- jb.DirectRequestSpec = LoadEnvConfigVarsDR(ch.Config().EVM(), *jb.DirectRequestSpec)
- }
- return nil
-}
-
-type DRSpecConfig interface {
- MinIncomingConfirmations() uint32
-}
-
-func LoadEnvConfigVarsVRF(cfg DRSpecConfig, vrfs VRFSpec) *VRFSpec {
+func LoadDefaultVRFPollPeriod(vrfs VRFSpec) *VRFSpec {
if vrfs.PollPeriod == 0 {
- vrfs.PollPeriodEnv = true
vrfs.PollPeriod = 5 * time.Second
}
return &vrfs
}
-func LoadEnvConfigVarsDR(cfg DRSpecConfig, drs DirectRequestSpec) *DirectRequestSpec {
- // Take the largest of the global vs specific.
- minIncomingConfirmations := cfg.MinIncomingConfirmations()
- if !drs.MinIncomingConfirmations.Valid || drs.MinIncomingConfirmations.Uint32 < minIncomingConfirmations {
- drs.MinIncomingConfirmationsEnv = true
- drs.MinIncomingConfirmations = null.Uint32From(minIncomingConfirmations)
+// SetDRMinIncomingConfirmations takes the largest of the global vs specific.
+func SetDRMinIncomingConfirmations(defaultMinIncomingConfirmations uint32, drs DirectRequestSpec) *DirectRequestSpec {
+ if !drs.MinIncomingConfirmations.Valid || drs.MinIncomingConfirmations.Uint32 < defaultMinIncomingConfirmations {
+ drs.MinIncomingConfirmations = null.Uint32From(defaultMinIncomingConfirmations)
}
-
return &drs
}
@@ -773,38 +724,30 @@ type OCRConfig interface {
TransmitterAddress() (ethkey.EIP55Address, error)
}
-// LoadEnvConfigVarsLocalOCR loads local OCR env vars into the OCROracleSpec.
-func LoadEnvConfigVarsLocalOCR(evmOcrCfg evmconfig.OCR, os OCROracleSpec, ocrCfg OCRConfig) *OCROracleSpec {
+// LoadConfigVarsLocalOCR loads local OCR vars into the OCROracleSpec.
+func LoadConfigVarsLocalOCR(evmOcrCfg evmconfig.OCR, os OCROracleSpec, ocrCfg OCRConfig) *OCROracleSpec {
if os.ObservationTimeout == 0 {
- os.ObservationTimeoutEnv = true
os.ObservationTimeout = models.Interval(ocrCfg.ObservationTimeout())
}
if os.BlockchainTimeout == 0 {
- os.BlockchainTimeoutEnv = true
os.BlockchainTimeout = models.Interval(ocrCfg.BlockchainTimeout())
}
if os.ContractConfigTrackerSubscribeInterval == 0 {
- os.ContractConfigTrackerSubscribeIntervalEnv = true
os.ContractConfigTrackerSubscribeInterval = models.Interval(ocrCfg.ContractSubscribeInterval())
}
if os.ContractConfigTrackerPollInterval == 0 {
- os.ContractConfigTrackerPollIntervalEnv = true
os.ContractConfigTrackerPollInterval = models.Interval(ocrCfg.ContractPollInterval())
}
if os.ContractConfigConfirmations == 0 {
- os.ContractConfigConfirmationsEnv = true
os.ContractConfigConfirmations = evmOcrCfg.ContractConfirmations()
}
if os.DatabaseTimeout == nil {
- os.DatabaseTimeoutEnv = true
os.DatabaseTimeout = models.NewInterval(evmOcrCfg.DatabaseTimeout())
}
if os.ObservationGracePeriod == nil {
- os.ObservationGracePeriodEnv = true
os.ObservationGracePeriod = models.NewInterval(evmOcrCfg.ObservationGracePeriod())
}
if os.ContractTransmitterTransmitTimeout == nil {
- os.ContractTransmitterTransmitTimeoutEnv = true
os.ContractTransmitterTransmitTimeout = models.NewInterval(evmOcrCfg.ContractTransmitterTransmitTimeout())
}
os.CaptureEATelemetry = ocrCfg.CaptureEATelemetry()
@@ -812,15 +755,14 @@ func LoadEnvConfigVarsLocalOCR(evmOcrCfg evmconfig.OCR, os OCROracleSpec, ocrCfg
return &os
}
-// LoadEnvConfigVarsOCR loads OCR env vars into the OCROracleSpec.
-func LoadEnvConfigVarsOCR(evmOcrCfg evmconfig.OCR, ocrCfg OCRConfig, os OCROracleSpec) (*OCROracleSpec, error) {
+// LoadConfigVarsOCR loads OCR config vars into the OCROracleSpec.
+func LoadConfigVarsOCR(evmOcrCfg evmconfig.OCR, ocrCfg OCRConfig, os OCROracleSpec) (*OCROracleSpec, error) {
if os.TransmitterAddress == nil {
ta, err := ocrCfg.TransmitterAddress()
if !errors.Is(errors.Cause(err), config.ErrEnvUnset) {
if err != nil {
return nil, err
}
- os.TransmitterAddressEnv = true
os.TransmitterAddress = &ta
}
}
@@ -834,11 +776,10 @@ func LoadEnvConfigVarsOCR(evmOcrCfg evmconfig.OCR, ocrCfg OCRConfig, os OCROracl
if err != nil {
return nil, err
}
- os.EncryptedOCRKeyBundleIDEnv = true
os.EncryptedOCRKeyBundleID = &encryptedOCRKeyBundleID
}
- return LoadEnvConfigVarsLocalOCR(evmOcrCfg, os, ocrCfg), nil
+ return LoadConfigVarsLocalOCR(evmOcrCfg, os, ocrCfg), nil
}
func (o *orm) FindJobTx(id int32) (Job, error) {
@@ -872,7 +813,7 @@ func (o *orm) FindJobWithoutSpecErrors(id int32) (jb Job, err error) {
return jb, errors.Wrap(err, "FindJobWithoutSpecErrors failed")
}
- return jb, o.LoadEnvConfigVars(&jb)
+ return jb, nil
}
// FindSpecErrorsByJobIDs returns all jobs spec errors by jobs IDs
@@ -961,7 +902,7 @@ func (o *orm) findJob(jb *Job, col string, arg interface{}, qopts ...pg.QOpt) er
if err != nil {
return errors.Wrap(err, "findJob failed")
}
- return o.LoadEnvConfigVars(jb)
+ return nil
}
func (o *orm) FindJobIDsWithBridge(name string) (jids []int32, err error) {
@@ -1051,11 +992,13 @@ func (o *orm) loadPipelineRunIDs(jobID *int32, offset, limit int, tx pg.Queryer)
// range minID <-> maxID.
for n := int64(1000); maxID > 0 && len(ids) < limit; n *= 2 {
+ var batch []int64
minID := maxID - n
- if err = tx.Select(&ids, stmt, offset, limit-len(ids), minID, maxID); err != nil {
+ if err = tx.Select(&batch, stmt, offset, limit-len(ids), minID, maxID); err != nil {
err = errors.Wrap(err, "error loading runs")
return
}
+ ids = append(ids, batch...)
if offset > 0 {
if len(ids) > 0 {
// If we're already receiving rows back, then we no longer need an offset
@@ -1204,13 +1147,6 @@ func (o *orm) FindJobsByPipelineSpecIDs(ids []int32) ([]Job, error) {
if err != nil {
return err
}
- for i := range jbs {
- err = o.LoadEnvConfigVars(&jbs[i])
- //We must return the jobs even if the chainID is disabled
- if err != nil && !errors.Is(err, chains.ErrNoSuchChainID) {
- return err
- }
- }
return nil
})
diff --git a/core/services/job/orm_test.go b/core/services/job/orm_test.go
index a6986d7fb32..41d02dba060 100644
--- a/core/services/job/orm_test.go
+++ b/core/services/job/orm_test.go
@@ -6,10 +6,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -19,34 +18,37 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
)
-func NewTestORM(t *testing.T, db *sqlx.DB, legacyChains evm.LegacyChainContainer, pipelineORM pipeline.ORM, bridgeORM bridges.ORM, keyStore keystore.Master, cfg pg.QConfig) job.ORM {
- o := job.NewORM(db, legacyChains, pipelineORM, bridgeORM, keyStore, logger.TestLogger(t), cfg)
+func NewTestORM(t *testing.T, db *sqlx.DB, pipelineORM pipeline.ORM, bridgeORM bridges.ORM, keyStore keystore.Master, cfg pg.QConfig) job.ORM {
+ o := job.NewORM(db, pipelineORM, bridgeORM, keyStore, logger.TestLogger(t), cfg)
t.Cleanup(func() { o.Close() })
return o
}
-func TestLoadEnvConfigVarsLocalOCR(t *testing.T) {
+func TestLoadConfigVarsLocalOCR(t *testing.T) {
t.Parallel()
config := configtest.NewTestGeneralConfig(t)
chainConfig := evmtest.NewChainScopedConfig(t, config)
jobSpec := &job.OCROracleSpec{}
- jobSpec = job.LoadEnvConfigVarsLocalOCR(chainConfig.EVM().OCR(), *jobSpec, chainConfig.OCR())
+ jobSpec = job.LoadConfigVarsLocalOCR(chainConfig.EVM().OCR(), *jobSpec, chainConfig.OCR())
- require.True(t, jobSpec.ObservationTimeoutEnv)
- require.True(t, jobSpec.BlockchainTimeoutEnv)
- require.True(t, jobSpec.ContractConfigTrackerSubscribeIntervalEnv)
- require.True(t, jobSpec.ContractConfigTrackerPollIntervalEnv)
- require.True(t, jobSpec.ContractConfigConfirmationsEnv)
- require.True(t, jobSpec.DatabaseTimeoutEnv)
- require.True(t, jobSpec.ObservationGracePeriodEnv)
- require.True(t, jobSpec.ContractTransmitterTransmitTimeoutEnv)
+ require.Equal(t, models.Interval(chainConfig.OCR().ObservationTimeout()), jobSpec.ObservationTimeout)
+ require.Equal(t, models.Interval(chainConfig.OCR().BlockchainTimeout()), jobSpec.BlockchainTimeout)
+ require.Equal(t, models.Interval(chainConfig.OCR().ContractSubscribeInterval()), jobSpec.ContractConfigTrackerSubscribeInterval)
+ require.Equal(t, models.Interval(chainConfig.OCR().ContractPollInterval()), jobSpec.ContractConfigTrackerPollInterval)
+ require.Equal(t, chainConfig.OCR().CaptureEATelemetry(), jobSpec.CaptureEATelemetry)
+
+ require.Equal(t, chainConfig.EVM().OCR().ContractConfirmations(), jobSpec.ContractConfigConfirmations)
+ require.Equal(t, models.Interval(chainConfig.EVM().OCR().DatabaseTimeout()), *jobSpec.DatabaseTimeout)
+ require.Equal(t, models.Interval(chainConfig.EVM().OCR().ObservationGracePeriod()), *jobSpec.ObservationGracePeriod)
+ require.Equal(t, models.Interval(chainConfig.EVM().OCR().ContractTransmitterTransmitTimeout()), *jobSpec.ContractTransmitterTransmitTimeout)
}
-func TestLoadEnvConfigVarsDR(t *testing.T) {
+func TestSetDRMinIncomingConfirmations(t *testing.T) {
t.Parallel()
config := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) {
@@ -59,15 +61,14 @@ func TestLoadEnvConfigVarsDR(t *testing.T) {
MinIncomingConfirmations: clnull.Uint32From(10),
}
- drs10 := job.LoadEnvConfigVarsDR(chainConfig.EVM(), jobSpec10)
- assert.True(t, drs10.MinIncomingConfirmationsEnv)
+ drs10 := job.SetDRMinIncomingConfirmations(chainConfig.EVM().MinIncomingConfirmations(), jobSpec10)
+ assert.Equal(t, uint32(100), drs10.MinIncomingConfirmations.Uint32)
jobSpec200 := job.DirectRequestSpec{
MinIncomingConfirmations: clnull.Uint32From(200),
}
- drs200 := job.LoadEnvConfigVarsDR(chainConfig.EVM(), jobSpec200)
- assert.False(t, drs200.MinIncomingConfirmationsEnv)
+ drs200 := job.SetDRMinIncomingConfirmations(chainConfig.EVM().MinIncomingConfirmations(), jobSpec200)
assert.True(t, drs200.MinIncomingConfirmations.Valid)
assert.Equal(t, uint32(200), drs200.MinIncomingConfirmations.Uint32)
}
diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go
index c0fff1e560a..deb4bff6b08 100644
--- a/core/services/job/runner_integration_test.go
+++ b/core/services/job/runner_integration_test.go
@@ -85,7 +85,7 @@ func TestRunner(t *testing.T) {
c := clhttptest.NewTestLocalOnlyHTTPClient()
runner := pipeline.NewRunner(pipelineORM, btORM, config.JobPipeline(), config.WebServer(), legacyChains, nil, nil, logger.TestLogger(t), c, c)
- jobORM := NewTestORM(t, db, legacyChains, pipelineORM, btORM, keyStore, config.Database())
+ jobORM := NewTestORM(t, db, pipelineORM, btORM, keyStore, config.Database())
_, placeHolderAddress := cltest.MustInsertRandomKey(t, keyStore.Eth())
@@ -428,45 +428,7 @@ answer1 [type=median index=0];
}
})
- t.Run("missing required env vars", func(t *testing.T) {
- s := `
- type = "offchainreporting"
- schemaVersion = 1
- contractAddress = "%s"
- isBootstrapPeer = false
- evmChainID = "0"
- observationSource = """
-ds1 [type=http method=GET url="%s" allowunrestrictednetworkaccess="true" %s];
-ds1_parse [type=jsonparse path="USD" lax=true];
-ds1 -> ds1_parse;
-"""
-`
- s = fmt.Sprintf(s, cltest.NewEIP55Address(), "http://blah.com", "")
- jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s)
- require.NoError(t, err)
- err = toml.Unmarshal([]byte(s), &jb)
- require.NoError(t, err)
- jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s"))
- err = jobORM.CreateJob(&jb)
- require.NoError(t, err)
- sd := ocr.NewDelegate(
- db,
- jobORM,
- keyStore,
- nil,
- nil,
- nil,
- legacyChains,
- logger.TestLogger(t),
- config.Database(),
- srvctest.Start(t, utils.NewMailboxMonitor(t.Name())),
- )
- _, err = sd.ServicesForSpec(jb)
- // We expect this to fail as neither the required vars are not set either via the env nor the job itself.
- require.Error(t, err)
- })
-
- t.Run("use env for minimal bootstrap", func(t *testing.T) {
+ t.Run("minimal bootstrap", func(t *testing.T) {
s := `
type = "offchainreporting"
schemaVersion = 1
@@ -504,53 +466,6 @@ ds1 -> ds1_parse;
require.NoError(t, err)
})
- t.Run("use env for minimal non-bootstrap", func(t *testing.T) {
- s := `
- type = "offchainreporting"
- schemaVersion = 1
- contractAddress = "%s"
- isBootstrapPeer = false
- observationTimeout = "15s"
- evmChainID = "0"
- observationSource = """
-ds1 [type=http method=GET url="%s" allowunrestrictednetworkaccess="true" %s];
-ds1_parse [type=jsonparse path="USD" lax=true];
-ds1 -> ds1_parse;
-"""
-`
- s = fmt.Sprintf(s, cltest.NewEIP55Address(), "http://blah.com", "")
- jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s)
- require.NoError(t, err)
- err = toml.Unmarshal([]byte(s), &jb)
- require.NoError(t, err)
- jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s"))
- err = jobORM.CreateJob(&jb)
- require.NoError(t, err)
- // Assert the override
- assert.Equal(t, jb.OCROracleSpec.ObservationTimeout, models.Interval(cltest.MustParseDuration(t, "15s")))
- // Assert that this is default
- assert.Equal(t, models.Interval(20000000000), jb.OCROracleSpec.BlockchainTimeout)
- assert.Equal(t, models.Interval(cltest.MustParseDuration(t, "1s")), jb.MaxTaskDuration)
-
- lggr := logger.TestLogger(t)
- pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr)
- require.NoError(t, pw.Start(testutils.Context(t)))
- sd := ocr.NewDelegate(
- db,
- jobORM,
- keyStore,
- nil,
- pw,
- monitoringEndpoint,
- legacyChains,
- lggr,
- config.Database(),
- srvctest.Start(t, utils.NewMailboxMonitor(t.Name())),
- )
- _, err = sd.ServicesForSpec(jb)
- require.NoError(t, err)
- })
-
t.Run("test min non-bootstrap", func(t *testing.T) {
kb, err := keyStore.OCR().Create()
require.NoError(t, err)
@@ -613,6 +528,74 @@ ds1 -> ds1_parse;
require.NoError(t, err)
})
+ t.Run("test enhanced telemetry service creation", func(t *testing.T) {
+ testCases := []struct {
+ jbCaptureEATelemetry bool
+ specCaptureEATelemetry bool
+ expected bool
+ }{{false, false, false},
+ {true, false, false},
+ {false, true, true},
+ {true, true, true},
+ }
+
+ for _, tc := range testCases {
+
+ config = configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) {
+ c.P2P.V1.Enabled = ptr(true)
+ c.OCR.CaptureEATelemetry = ptr(tc.specCaptureEATelemetry)
+ })
+
+ relayExtenders = evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: ethClient, GeneralConfig: config, KeyStore: ethKeyStore})
+ legacyChains = evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
+
+ kb, err := keyStore.OCR().Create()
+ require.NoError(t, err)
+
+ s := fmt.Sprintf(minimalNonBootstrapTemplate, cltest.NewEIP55Address(), transmitterAddress.Hex(), kb.ID(), "http://blah.com", "")
+ jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s)
+ require.NoError(t, err)
+ err = toml.Unmarshal([]byte(s), &jb)
+ require.NoError(t, err)
+
+ jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s"))
+ err = jobORM.CreateJob(&jb)
+ require.NoError(t, err)
+ assert.Equal(t, jb.MaxTaskDuration, models.Interval(cltest.MustParseDuration(t, "1s")))
+
+ lggr := logger.TestLogger(t)
+ pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr)
+ require.NoError(t, pw.Start(testutils.Context(t)))
+ sd := ocr.NewDelegate(
+ db,
+ jobORM,
+ keyStore,
+ nil,
+ pw,
+ monitoringEndpoint,
+ legacyChains,
+ lggr,
+ config.Database(),
+ srvctest.Start(t, utils.NewMailboxMonitor(t.Name())),
+ )
+
+ jb.OCROracleSpec.CaptureEATelemetry = tc.jbCaptureEATelemetry
+ services, err := sd.ServicesForSpec(jb)
+ require.NoError(t, err)
+
+ enhancedTelemetryServiceCreated := false
+ for _, service := range services {
+ _, ok := service.(*ocrcommon.EnhancedTelemetryService[ocrcommon.EnhancedTelemetryData])
+ enhancedTelemetryServiceCreated = ok
+ if enhancedTelemetryServiceCreated {
+ break
+ }
+ }
+
+ require.Equal(t, tc.expected, enhancedTelemetryServiceCreated)
+ }
+ })
+
t.Run("test job spec error is created", func(t *testing.T) {
// Create a keystore with an ocr key bundle and p2p key.
kb, err := keyStore.OCR().Create()
@@ -765,9 +748,6 @@ func TestRunner_Success_Callback_AsyncJob(t *testing.T) {
})
app := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager)
- keyStore := cltest.NewKeyStore(t, app.GetSqlxDB(), pgtest.NewQConfig(true))
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: app.GetSqlxDB(), Client: ethClient, GeneralConfig: cfg, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
require.NoError(t, app.Start(testutils.Context(t)))
var (
@@ -898,7 +878,7 @@ func TestRunner_Success_Callback_AsyncJob(t *testing.T) {
pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database())
- jobORM := NewTestORM(t, app.GetSqlxDB(), legacyChains, pipelineORM, bridgesORM, app.KeyStore, cfg.Database())
+ jobORM := NewTestORM(t, app.GetSqlxDB(), pipelineORM, bridgesORM, app.KeyStore, cfg.Database())
// Trigger v2/resume
select {
@@ -947,10 +927,6 @@ func TestRunner_Error_Callback_AsyncJob(t *testing.T) {
})
app := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager)
- keyStore := cltest.NewKeyStore(t, app.GetSqlxDB(), pgtest.NewQConfig(true))
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: app.GetSqlxDB(), Client: ethClient, GeneralConfig: cfg, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
-
require.NoError(t, app.Start(testutils.Context(t)))
var (
@@ -1079,7 +1055,7 @@ func TestRunner_Error_Callback_AsyncJob(t *testing.T) {
pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
bridgesORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database())
- jobORM := NewTestORM(t, app.GetSqlxDB(), legacyChains, pipelineORM, bridgesORM, app.KeyStore, cfg.Database())
+ jobORM := NewTestORM(t, app.GetSqlxDB(), pipelineORM, bridgesORM, app.KeyStore, cfg.Database())
// Trigger v2/resume
select {
diff --git a/core/services/job/spawner.go b/core/services/job/spawner.go
index b2a8dad68f0..03ee8cee13a 100644
--- a/core/services/job/spawner.go
+++ b/core/services/job/spawner.go
@@ -9,7 +9,7 @@ import (
pkgerrors "github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
relayservices "github.com/smartcontractkit/chainlink-relay/pkg/services"
diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go
index be4a480a6c9..cfe646d8660 100644
--- a/core/services/job/spawner_test.go
+++ b/core/services/job/spawner_test.go
@@ -9,10 +9,11 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/bridges"
mocklp "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -97,7 +98,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) {
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
t.Run("should respect its dependents", func(t *testing.T) {
lggr := logger.TestLogger(t)
- orm := NewTestORM(t, db, legacyChains, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
+ orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
a := utils.NewDependentAwaiter()
a.AddDependents(1)
spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{}, db, lggr, []utils.DependentAwaiter{a})
@@ -120,7 +121,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) {
jobB := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String())
lggr := logger.TestLogger(t)
- orm := NewTestORM(t, db, legacyChains, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
+ orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
eventuallyA := cltest.NewAwaiter()
serviceA1 := mocks.NewServiceCtx(t)
@@ -185,7 +186,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) {
serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventually.ItHappened() })
lggr := logger.TestLogger(t)
- orm := NewTestORM(t, db, legacyChains, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
+ orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
mailMon := srvctest.Start(t, utils.NewMailboxMonitor(t.Name()))
d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon)
delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d}
@@ -219,7 +220,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) {
serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyStart.ItHappened() })
lggr := logger.TestLogger(t)
- orm := NewTestORM(t, db, legacyChains, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
+ orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
mailMon := srvctest.Start(t, utils.NewMailboxMonitor(t.Name()))
d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon)
delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d}
@@ -297,7 +298,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) {
jobOCR2VRF := makeOCR2VRFJobSpec(t, keyStore, config, address, chain.ID(), 2)
- orm := NewTestORM(t, db, legacyChains, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
+ orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database())
mailMon := srvctest.Start(t, utils.NewMailboxMonitor(t.Name()))
processConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, func(name string) (*plugins.RegisteredLoop, error) { return nil, nil })
diff --git a/core/services/keeper/delegate.go b/core/services/keeper/delegate.go
index 6c68203f843..6d413624969 100644
--- a/core/services/keeper/delegate.go
+++ b/core/services/keeper/delegate.go
@@ -3,7 +3,7 @@ package keeper
import (
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/services/keeper/integration_test.go b/core/services/keeper/integration_test.go
index 39431063bcd..f76ef935741 100644
--- a/core/services/keeper/integration_test.go
+++ b/core/services/keeper/integration_test.go
@@ -236,7 +236,7 @@ func TestKeeperEthIntegration(t *testing.T) {
backend.Commit()
// setup app
- config, db := heavyweight.FullTestDBV2(t, fmt.Sprintf("keeper_eth_integration_%s", test.name), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.EVM[0].GasEstimator.EIP1559DynamicFees = &test.eip1559
c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps
c.Keeper.Registry.SyncInterval = models.MustNewDuration(24 * time.Hour) // disable full sync ticker for test
@@ -393,7 +393,7 @@ func TestKeeperForwarderEthIntegration(t *testing.T) {
backend.Commit()
// setup app
- config, db := heavyweight.FullTestDBV2(t, "keeper_forwarder_flow", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Feature.LogPoller = ptr(true)
c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true)
c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps
@@ -540,7 +540,7 @@ func TestMaxPerformDataSize(t *testing.T) {
backend.Commit()
// setup app
- config, db := heavyweight.FullTestDBV2(t, "keeper_max_perform_data_test", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps
c.Keeper.Registry.SyncInterval = models.MustNewDuration(24 * time.Hour) // disable full sync ticker for test
c.Keeper.Registry.MaxPerformDataSize = ptr(uint32(maxPerformDataSize)) // set the max perform data size
diff --git a/core/services/keeper/orm.go b/core/services/keeper/orm.go
index e281d610644..91883f8056c 100644
--- a/core/services/keeper/orm.go
+++ b/core/services/keeper/orm.go
@@ -3,9 +3,9 @@ package keeper
import (
"math/rand"
+ "github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
diff --git a/core/services/keeper/orm_test.go b/core/services/keeper/orm_test.go
index d990effa103..d67baa09a06 100644
--- a/core/services/keeper/orm_test.go
+++ b/core/services/keeper/orm_test.go
@@ -12,7 +12,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
evmconfig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
diff --git a/core/services/keeper/registry_synchronizer_helper_test.go b/core/services/keeper/registry_synchronizer_helper_test.go
index 63dc6343535..966366b1069 100644
--- a/core/services/keeper/registry_synchronizer_helper_test.go
+++ b/core/services/keeper/registry_synchronizer_helper_test.go
@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
diff --git a/core/services/keeper/upkeep_executer_test.go b/core/services/keeper/upkeep_executer_test.go
index 7f9698435f8..32b1d2c191d 100644
--- a/core/services/keeper/upkeep_executer_test.go
+++ b/core/services/keeper/upkeep_executer_test.go
@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
diff --git a/core/services/keystore/helpers_test.go b/core/services/keystore/helpers_test.go
index 13627dd0231..d0b2a21ab38 100644
--- a/core/services/keystore/helpers_test.go
+++ b/core/services/keystore/helpers_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
diff --git a/core/services/keystore/keystoretest.go b/core/services/keystore/keystoretest.go
index 0b5ce4e0057..6efc8e76bcf 100644
--- a/core/services/keystore/keystoretest.go
+++ b/core/services/keystore/keystoretest.go
@@ -4,7 +4,7 @@ import (
"errors"
"sync"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/keystore/master.go b/core/services/keystore/master.go
index fb28202b527..05f19495f9d 100644
--- a/core/services/keystore/master.go
+++ b/core/services/keystore/master.go
@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/cosmoskey"
diff --git a/core/services/keystore/orm.go b/core/services/keystore/orm.go
index 6f612105ea9..3d75d6f2369 100644
--- a/core/services/keystore/orm.go
+++ b/core/services/keystore/orm.go
@@ -7,8 +7,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
)
func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) ksORM {
diff --git a/core/services/ocr/config.go b/core/services/ocr/config.go
index e1bc997f269..53ec9f9cea9 100644
--- a/core/services/ocr/config.go
+++ b/core/services/ocr/config.go
@@ -14,7 +14,7 @@ type Config interface {
}
func toLocalConfig(cfg ValidationConfig, evmOcrConfig evmconfig.OCR, insecureCfg insecureConfig, spec job.OCROracleSpec, ocrConfig job.OCRConfig) ocrtypes.LocalConfig {
- concreteSpec := job.LoadEnvConfigVarsLocalOCR(evmOcrConfig, spec, ocrConfig)
+ concreteSpec := job.LoadConfigVarsLocalOCR(evmOcrConfig, spec, ocrConfig)
lc := ocrtypes.LocalConfig{
BlockchainTimeout: concreteSpec.BlockchainTimeout.Duration(),
ContractConfigConfirmations: concreteSpec.ContractConfigConfirmations,
diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go
index c5f3e431e45..db19bdd4f0a 100644
--- a/core/services/ocr/contract_tracker.go
+++ b/core/services/ocr/contract_tracker.go
@@ -14,13 +14,14 @@ import (
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator"
"github.com/smartcontractkit/libocr/offchainreporting/confighelper"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
@@ -401,7 +402,7 @@ func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight
// care about the block height; we have no way of getting the L1 block
// height anyway
return 0, nil
- case "", config.ChainArbitrum, config.ChainCelo, config.ChainOptimismBedrock, config.ChainXDai:
+ case "", config.ChainArbitrum, config.ChainCelo, config.ChainOptimismBedrock, config.ChainXDai, config.ChainKroma, config.ChainWeMix, config.ChainZkSync:
// continue
}
latestBlockHeight := t.getLatestBlockHeight()
diff --git a/core/services/ocr/database.go b/core/services/ocr/database.go
index cd8e584e39a..524dfa0e7bb 100644
--- a/core/services/ocr/database.go
+++ b/core/services/ocr/database.go
@@ -11,9 +11,9 @@ import (
"github.com/pkg/errors"
"go.uber.org/multierr"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/ocr/delegate.go b/core/services/ocr/delegate.go
index dee349a9a0d..0559469abb4 100644
--- a/core/services/ocr/delegate.go
+++ b/core/services/ocr/delegate.go
@@ -9,7 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
relaylogger "github.com/smartcontractkit/chainlink-relay/pkg/logger"
@@ -95,7 +95,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err e
if err != nil {
return nil, err
}
- concreteSpec, err := job.LoadEnvConfigVarsOCR(chain.Config().EVM().OCR(), chain.Config().OCR(), *jb.OCROracleSpec)
+ concreteSpec, err := job.LoadConfigVarsOCR(chain.Config().EVM().OCR(), chain.Config().OCR(), *jb.OCROracleSpec)
if err != nil {
return nil, err
}
@@ -295,10 +295,13 @@ func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err e
configOverrider = configOverriderService
}
+ jb.OCROracleSpec.CaptureEATelemetry = chain.Config().OCR().CaptureEATelemetry()
enhancedTelemChan := make(chan ocrcommon.EnhancedTelemetryData, 100)
if ocrcommon.ShouldCollectEnhancedTelemetry(&jb) {
- enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(concreteSpec.ContractAddress.String(), synchronization.EnhancedEA, "EVM", chain.ID().String()), lggr.Named("EnhancedTelemetry"))
+ enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint("EVM", chain.ID().String(), concreteSpec.ContractAddress.String(), synchronization.EnhancedEA), lggr.Named("EnhancedTelemetry"))
services = append(services, enhancedTelemService)
+ } else {
+ lggr.Infow("Enhanced telemetry is disabled for job", "job", jb.Name)
}
oracle, err := ocr.NewOracle(ocr.OracleArgs{
@@ -319,7 +322,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err e
Logger: ocrLogger,
V1Bootstrappers: v1BootstrapPeers,
V2Bootstrappers: v2Bootstrappers,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(concreteSpec.ContractAddress.String(), synchronization.OCR, "EVM", chain.ID().String()),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint("EVM", chain.ID().String(), concreteSpec.ContractAddress.String(), synchronization.OCR),
ConfigOverrider: configOverrider,
})
if err != nil {
diff --git a/core/services/ocr/helpers_internal_test.go b/core/services/ocr/helpers_internal_test.go
index 9a1f887986e..57b669ef401 100644
--- a/core/services/ocr/helpers_internal_test.go
+++ b/core/services/ocr/helpers_internal_test.go
@@ -3,7 +3,7 @@ package ocr
import (
"testing"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/services/ocr2/database.go b/core/services/ocr2/database.go
index 7061ad0452f..5591f33fd40 100644
--- a/core/services/ocr2/database.go
+++ b/core/services/ocr2/database.go
@@ -6,11 +6,11 @@ import (
"encoding/binary"
"time"
+ "github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
ocrcommon "github.com/smartcontractkit/libocr/commontypes"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/ocr2/database_test.go b/core/services/ocr2/database_test.go
index aabb2b33a79..b70ac629da1 100644
--- a/core/services/ocr2/database_test.go
+++ b/core/services/ocr2/database_test.go
@@ -10,8 +10,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/jmoiron/sqlx"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go
index efb6f04fd3d..bbb3b5cf7ae 100644
--- a/core/services/ocr2/delegate.go
+++ b/core/services/ocr2/delegate.go
@@ -8,11 +8,13 @@ import (
"log"
"time"
+ "google.golang.org/grpc"
"gopkg.in/guregu/null.v4"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/commontypes"
libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
@@ -26,10 +28,10 @@ import (
"github.com/smartcontractkit/ocr2vrf/altbn_128"
dkgpkg "github.com/smartcontractkit/ocr2vrf/dkg"
"github.com/smartcontractkit/ocr2vrf/ocr2vrf"
- "github.com/smartcontractkit/sqlx"
relaylogger "github.com/smartcontractkit/chainlink-relay/pkg/logger"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop/reportingplugins"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
@@ -43,6 +45,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/dkg/persistence"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/median"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper"
@@ -70,7 +73,28 @@ import (
"github.com/smartcontractkit/chainlink/v2/plugins"
)
-var ErrJobSpecNoRelayer = errors.New("OCR2 job spec could not get relayer id")
+type ErrJobSpecNoRelayer struct {
+ PluginName string
+ Err error
+}
+
+func (e ErrJobSpecNoRelayer) Unwrap() error { return e.Err }
+
+func (e ErrJobSpecNoRelayer) Error() string {
+ return fmt.Sprintf("%s services: OCR2 job spec could not get relayer ID: %s", e.PluginName, e.Err)
+}
+
+type ErrRelayNotEnabled struct {
+ PluginName string
+ Relay string
+ Err error
+}
+
+func (e ErrRelayNotEnabled) Unwrap() error { return e.Err }
+
+func (e ErrRelayNotEnabled) Error() string {
+ return fmt.Sprintf("%s services: failed to get relay %s, is it enabled? %s", e.PluginName, e.Relay, e.Err)
+}
type RelayGetter interface {
Get(id relay.ID) (loop.Relayer, error)
@@ -245,7 +269,7 @@ func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error {
rid, err := spec.RelayID()
if err != nil {
- d.lggr.Errorw("DeleteJob: "+ErrJobSpecNoRelayer.Error(), "err", err)
+ d.lggr.Errorw("DeleteJob", "err", ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)})
return nil
}
// we only have clean to do for the EVM
@@ -337,7 +361,7 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("ServicesForSpec: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}
}
if rid.Network == relay.EVM {
@@ -428,6 +452,9 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
s4PluginDB := NewDB(d.db, spec.ID, s4PluginId, lggr, d.cfg.Database())
return d.newServicesOCR2Functions(lggr, jb, runResults, bootstrapPeers, kb, ocrDB, thresholdPluginDB, s4PluginDB, lc, ocrLogger)
+ case types.GenericPlugin:
+ return d.newServicesGenericPlugin(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger)
+
default:
return nil, errors.Errorf("plugin type %s not supported", spec.PluginType)
}
@@ -473,6 +500,142 @@ func GetEVMEffectiveTransmitterID(jb *job.Job, chain evm.Chain, lggr logger.Suga
return spec.TransmitterID.String, nil
}
+type connProvider interface {
+ ClientConn() grpc.ClientConnInterface
+}
+
+func defaultPathFromPluginName(pluginName string) string {
+ // By default we install the command on the system path, in the
+ // form: `chainlink-`
+ return fmt.Sprintf("chainlink-%s", pluginName)
+}
+
+func (d *Delegate) newServicesGenericPlugin(
+ ctx context.Context,
+ lggr logger.SugaredLogger,
+ jb job.Job,
+ bootstrapPeers []commontypes.BootstrapperLocator,
+ kb ocr2key.KeyBundle,
+ ocrDB *db,
+ lc ocrtypes.LocalConfig,
+ ocrLogger commontypes.Logger,
+) (srvs []job.ServiceCtx, err error) {
+ spec := jb.OCR2OracleSpec
+
+ p := validate.OCR2GenericPluginConfig{}
+ err = json.Unmarshal(spec.PluginConfig.Bytes(), &p)
+ if err != nil {
+ return nil, err
+ }
+ cconf := p.CoreConfig
+
+ command := cconf.Command
+ if command == "" {
+ command = defaultPathFromPluginName(cconf.PluginName)
+ }
+
+ // NOTE: we don't need to validate this config, since that happens as part of creating the job.
+ // See: validate/validate.go's `validateSpec`.
+
+ rid, err := spec.RelayID()
+ if err != nil {
+ return nil, ErrJobSpecNoRelayer{PluginName: cconf.PluginName, Err: err}
+ }
+
+ relayer, err := d.RelayGetter.Get(rid)
+ if err != nil {
+ return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: p.CoreConfig.PluginName}
+ }
+
+ provider, err := relayer.NewPluginProvider(ctx, types.RelayArgs{
+ ExternalJobID: jb.ExternalJobID,
+ JobID: spec.ID,
+ ContractID: spec.ContractID,
+ New: d.isNewlyCreatedJob,
+ RelayConfig: spec.RelayConfig.Bytes(),
+ ProviderType: cconf.ProviderType,
+ }, types.PluginArgs{
+ TransmitterID: spec.TransmitterID.String,
+ PluginConfig: spec.PluginConfig.Bytes(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ srvs = append(srvs, provider)
+
+ oracleEndpoint := d.monitoringEndpointGen.GenMonitoringEndpoint(
+ rid.Network,
+ rid.ChainID,
+ spec.ContractID,
+ synchronization.TelemetryType(cconf.TelemetryType),
+ )
+ oracleArgs := libocr2.OCR2OracleArgs{
+ BinaryNetworkEndpointFactory: d.peerWrapper.Peer2,
+ V2Bootstrappers: bootstrapPeers,
+ Database: ocrDB,
+ LocalConfig: lc,
+ Logger: ocrLogger,
+ MonitoringEndpoint: oracleEndpoint,
+ OffchainKeyring: kb,
+ OnchainKeyring: kb,
+ ContractTransmitter: provider.ContractTransmitter(),
+ ContractConfigTracker: provider.ContractConfigTracker(),
+ OffchainConfigDigester: provider.OffchainConfigDigester(),
+ }
+
+ pluginLggr := lggr.Named(cconf.PluginName).Named(spec.ContractID).Named(spec.GetID())
+ cmdFn, grpcOpts, err := d.cfg.RegisterLOOP(fmt.Sprintf("%s-%s-%s", cconf.PluginName, spec.ContractID, spec.GetID()), command)
+ if err != nil {
+ return nil, fmt.Errorf("failed to register loop: %w", err)
+ }
+
+ errorLog := &errorLog{jobID: jb.ID, recordError: d.jobORM.RecordError}
+ var providerClientConn grpc.ClientConnInterface
+ providerConn, ok := provider.(connProvider)
+ if ok {
+ providerClientConn = providerConn.ClientConn()
+ } else {
+ //We chose to deal with the difference between a LOOP provider and an embedded provider here rather than
+ //in NewServerAdapter because this has a smaller blast radius, as the scope of this workaround is to
+ //enable the medianpoc for EVM and not touch the other providers.
+ //TODO: remove this workaround when the EVM relayer is running inside of an LOOPP
+ d.lggr.Info("provider is not a LOOPP provider, switching to provider server")
+
+ ps, err2 := relay.NewProviderServer(provider, types.OCR2PluginType(cconf.ProviderType), d.lggr)
+ if err2 != nil {
+ return nil, fmt.Errorf("cannot start EVM provider server: %s", err)
+ }
+ providerClientConn, err2 = ps.GetConn()
+ if err2 != nil {
+ return nil, fmt.Errorf("cannot connect to EVM provider server: %s", err)
+ }
+ srvs = append(srvs, ps)
+ }
+
+ pluginConfig := types.ReportingPluginServiceConfig{
+ PluginName: cconf.PluginName,
+ Command: command,
+ ProviderType: cconf.ProviderType,
+ TelemetryType: cconf.TelemetryType,
+ PluginConfig: string(p.PluginConfig),
+ }
+
+ pr := generic.NewPipelineRunnerAdapter(pluginLggr, jb, d.pipelineRunner)
+ ta := generic.NewTelemetryAdapter(d.monitoringEndpointGen)
+
+ plugin := reportingplugins.NewLOOPPService(pluginLggr, grpcOpts, cmdFn, pluginConfig, providerClientConn, pr, ta, errorLog)
+ oracleArgs.ReportingPluginFactory = plugin
+ srvs = append(srvs, plugin)
+
+ oracle, err := libocr2.NewOracle(oracleArgs)
+ if err != nil {
+ return nil, err
+ }
+
+ srvs = append(srvs, job.NewServiceAdapter(oracle))
+ return srvs, nil
+}
+
func (d *Delegate) newServicesMercury(
ctx context.Context,
lggr logger.SugaredLogger,
@@ -498,18 +661,14 @@ func (d *Delegate) newServicesMercury(
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("mercury services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "mercury"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("mercury services: expected EVM relayer got %s", rid.Network)
}
relayer, err := d.RelayGetter.Get(rid)
if err != nil {
- return nil, fmt.Errorf("failed to get relay %s is it enabled?: %w", spec.Relay, err)
- }
- chain, err := d.legacyChains.Get(rid.ChainID)
- if err != nil {
- return nil, fmt.Errorf("mercury services: failed to get chain %s: %w", rid.ChainID, err)
+ return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "mercury"}
}
provider, err2 := relayer.NewPluginProvider(ctx,
@@ -541,7 +700,7 @@ func (d *Delegate) newServicesMercury(
Database: ocrDB,
LocalConfig: lc,
Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.FeedID.String(), synchronization.OCR3Mercury, rid.Network, rid.ChainID),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.OCR3Mercury),
OffchainConfigDigester: mercuryProvider.OffchainConfigDigester(),
OffchainKeyring: kb,
OnchainKeyring: kb,
@@ -549,11 +708,13 @@ func (d *Delegate) newServicesMercury(
chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 100)
- mercuryServices, err2 := mercury.NewServices(jb, mercuryProvider, d.pipelineRunner, runResults, lggr, oracleArgsNoPlugin, d.cfg.JobPipeline(), chEnhancedTelem, chain, d.mercuryORM, (mercuryutils.FeedID)(*spec.FeedID))
+ mercuryServices, err2 := mercury.NewServices(jb, mercuryProvider, d.pipelineRunner, runResults, lggr, oracleArgsNoPlugin, d.cfg.JobPipeline(), chEnhancedTelem, d.mercuryORM, (mercuryutils.FeedID)(*spec.FeedID))
if ocrcommon.ShouldCollectEnhancedTelemetryMercury(jb) {
- enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, chEnhancedTelem, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(spec.FeedID.String(), synchronization.EnhancedEAMercury, rid.Network, rid.ChainID), lggr.Named("EnhancedTelemetryMercury"))
+ enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, chEnhancedTelem, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.EnhancedEAMercury), lggr.Named("EnhancedTelemetryMercury"))
mercuryServices = append(mercuryServices, enhancedTelemService)
+ } else {
+ lggr.Infow("Enhanced telemetry is disabled for mercury job", "job", jb.Name)
}
return mercuryServices, err2
@@ -574,7 +735,7 @@ func (d *Delegate) newServicesMedian(
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("median services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "median"}
}
oracleArgsNoPlugin := libocr2.OCR2OracleArgs{
@@ -583,7 +744,7 @@ func (d *Delegate) newServicesMedian(
Database: ocrDB,
LocalConfig: lc,
Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.OCR2Median, rid.Network, rid.ChainID),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Median),
OffchainKeyring: kb,
OnchainKeyring: kb,
}
@@ -593,14 +754,16 @@ func (d *Delegate) newServicesMedian(
relayer, err := d.RelayGetter.Get(rid)
if err != nil {
- return nil, fmt.Errorf("median services; failed to get relay %s is it enabled?: %w", spec.Relay, err)
+ return nil, ErrRelayNotEnabled{Err: err, PluginName: "median", Relay: spec.Relay}
}
medianServices, err2 := median.NewMedianServices(ctx, jb, d.isNewlyCreatedJob, relayer, d.pipelineRunner, runResults, lggr, oracleArgsNoPlugin, mConfig, enhancedTelemChan, errorLog)
if ocrcommon.ShouldCollectEnhancedTelemetry(&jb) {
- enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.EnhancedEA, rid.Network, rid.ChainID), lggr.Named("EnhancedTelemetry"))
+ enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.EnhancedEA), lggr.Named("EnhancedTelemetry"))
medianServices = append(medianServices, enhancedTelemService)
+ } else {
+ lggr.Infow("Enhanced telemetry is disabled for job", "job", jb.Name)
}
return medianServices, err2
@@ -618,7 +781,7 @@ func (d *Delegate) newServicesDKG(
spec := jb.OCR2OracleSpec
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("DKG services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "DKG"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("DKG services: expected EVM relayer got %s", rid.Network)
@@ -687,7 +850,7 @@ func (d *Delegate) newServicesOCR2VRF(
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("VRF services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "VRF"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("VRF services: expected EVM relayer got %s", rid.Network)
@@ -820,7 +983,7 @@ func (d *Delegate) newServicesOCR2VRF(
VRFContractTransmitter: vrfProvider.ContractTransmitter(),
VRFDatabase: ocrDB,
VRFLocalConfig: lc,
- VRFMonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.OCR2VRF, rid.Network, rid.ChainID),
+ VRFMonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2VRF),
DKGContractConfigTracker: dkgProvider.ContractConfigTracker(),
DKGOffchainConfigDigester: dkgProvider.OffchainConfigDigester(),
DKGContract: dkgpkg.NewOnchainContract(dkgContract, &altbn_128.G2{}),
@@ -912,7 +1075,7 @@ func (d *Delegate) newServicesOCR2Keepers21(
mc := d.cfg.Mercury().Credentials(credName)
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("keeper2 services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "keeper2"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("keeper2 services: expected EVM relayer got %s", rid.Network)
@@ -959,7 +1122,7 @@ func (d *Delegate) newServicesOCR2Keepers21(
ContractConfigTracker: keeperProvider.ContractConfigTracker(),
KeepersDatabase: ocrDB,
Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.OCR2Automation, rid.Network, rid.ChainID),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR3Automation),
OffchainConfigDigester: keeperProvider.OffchainConfigDigester(),
OffchainKeyring: kb,
OnchainKeyring: services.Keyring(),
@@ -1026,7 +1189,7 @@ func (d *Delegate) newServicesOCR2Keepers20(
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("keepers2.0 services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "keepers2.0"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("keepers2.0 services: expected EVM relayer got %s", rid.Network)
@@ -1104,7 +1267,7 @@ func (d *Delegate) newServicesOCR2Keepers20(
KeepersDatabase: ocrDB,
LocalConfig: lc,
Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.OCR2Automation, rid.Network, rid.ChainID),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Automation),
OffchainConfigDigester: keeperProvider.OffchainConfigDigester(),
OffchainKeyring: kb,
OnchainKeyring: kb,
@@ -1161,7 +1324,7 @@ func (d *Delegate) newServicesOCR2Functions(
rid, err := spec.RelayID()
if err != nil {
- return nil, fmt.Errorf("functions services: %w: %w", ErrJobSpecNoRelayer, err)
+ return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "functions"}
}
if rid.Network != relay.EVM {
return nil, fmt.Errorf("functions services: expected EVM relayer got %s", rid.Network)
@@ -1213,7 +1376,7 @@ func (d *Delegate) newServicesOCR2Functions(
Database: functionsOcrDB,
LocalConfig: lc,
Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.OCR2Functions, rid.Network, rid.ChainID),
+ MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Functions),
OffchainConfigDigester: functionsProvider.OffchainConfigDigester(),
OffchainKeyring: kb,
OnchainKeyring: kb,
@@ -1277,7 +1440,7 @@ func (d *Delegate) newServicesOCR2Functions(
ContractID: spec.ContractID,
Logger: lggr,
MailMon: d.mailMon,
- URLsMonEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(spec.ContractID, synchronization.FunctionsRequests, rid.Network, rid.ChainID),
+ URLsMonEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.FunctionsRequests),
EthKeystore: d.ethKs,
ThresholdKeyShare: thresholdKeyShare,
LogPollerWrapper: functionsProvider.LogPollerWrapper(),
diff --git a/core/services/ocr2/plugins/dkg/persistence/db.go b/core/services/ocr2/plugins/dkg/persistence/db.go
index 75fb3b391fa..c020a68cbe7 100644
--- a/core/services/ocr2/plugins/dkg/persistence/db.go
+++ b/core/services/ocr2/plugins/dkg/persistence/db.go
@@ -9,13 +9,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
ocr2vrftypes "github.com/smartcontractkit/ocr2vrf/types"
"github.com/smartcontractkit/ocr2vrf/types/hash"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/ocr2/plugins/dkg/persistence/db_test.go b/core/services/ocr2/plugins/dkg/persistence/db_test.go
index b830a8db3bc..4e029c1cb2a 100644
--- a/core/services/ocr2/plugins/dkg/persistence/db_test.go
+++ b/core/services/ocr2/plugins/dkg/persistence/db_test.go
@@ -7,9 +7,9 @@ import (
"testing"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/jmoiron/sqlx"
ocr2vrftypes "github.com/smartcontractkit/ocr2vrf/types"
"github.com/smartcontractkit/ocr2vrf/types/hash"
- "github.com/smartcontractkit/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/core/services/ocr2/plugins/dkg/plugin.go b/core/services/ocr2/plugins/dkg/plugin.go
index 540518b553c..92910ff7bbe 100644
--- a/core/services/ocr2/plugins/dkg/plugin.go
+++ b/core/services/ocr2/plugins/dkg/plugin.go
@@ -6,12 +6,12 @@ import (
"fmt"
"math/big"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/smartcontractkit/libocr/commontypes"
libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
"github.com/smartcontractkit/ocr2vrf/altbn_128"
"github.com/smartcontractkit/ocr2vrf/dkg"
- "github.com/smartcontractkit/sqlx"
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/services/ocr2/plugins/functions/config/config.go b/core/services/ocr2/plugins/functions/config/config.go
index 3f35d1dba9b..0978500deb5 100644
--- a/core/services/ocr2/plugins/functions/config/config.go
+++ b/core/services/ocr2/plugins/functions/config/config.go
@@ -23,7 +23,11 @@ type PluginConfig struct {
EnableRequestSignatureCheck bool `json:"enableRequestSignatureCheck"`
DONID string `json:"donID"`
ContractVersion uint32 `json:"contractVersion"`
+ MinRequestConfirmations uint32 `json:"minRequestConfirmations"`
+ MinResponseConfirmations uint32 `json:"minResponseConfirmations"`
MinIncomingConfirmations uint32 `json:"minIncomingConfirmations"`
+ PastBlocksToPoll uint32 `json:"pastBlocksToPoll"`
+ LogPollerCacheDurationSec uint32 `json:"logPollerCacheDurationSec"` // Duration to cache previously detected request or response logs such that they can be filtered when calling logpoller_wrapper.LatestEvents()
RequestTimeoutSec uint32 `json:"requestTimeoutSec"`
RequestTimeoutCheckFrequencySec uint32 `json:"requestTimeoutCheckFrequencySec"`
RequestTimeoutBatchLookupSize uint32 `json:"requestTimeoutBatchLookupSize"`
diff --git a/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go
index 5c824323eb6..9f63d60eef6 100644
--- a/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go
+++ b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go
@@ -302,7 +302,6 @@ func StartNewNode(
t *testing.T,
owner *bind.TransactOpts,
port int,
- dbName string,
b *backends.SimulatedBackend,
maxGas uint32,
p2pV2Bootstrappers []commontypes.BootstrapperLocator,
@@ -310,7 +309,7 @@ func StartNewNode(
thresholdKeyShare string,
) *Node {
p2pKey := keystest.NewP2PKeyV2(t)
- config, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", dbName, port), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Insecure.OCRDevelopmentMode = ptr(true)
c.Feature.LogPoller = ptr(true)
@@ -550,7 +549,7 @@ func CreateFunctionsNodes(
}
bootstrapPort := freeport.GetOne(t)
- bootstrapNode = StartNewNode(t, owner, bootstrapPort, "bootstrap", b, uint32(maxGas), nil, nil, "")
+ bootstrapNode = StartNewNode(t, owner, bootstrapPort, b, uint32(maxGas), nil, nil, "")
AddBootstrapJob(t, bootstrapNode.App, routerAddress)
// oracle nodes with jobs, bridges and mock EAs
@@ -568,7 +567,7 @@ func CreateFunctionsNodes(
} else {
ocr2Keystore = ocr2Keystores[i]
}
- oracleNode := StartNewNode(t, owner, ports[i], fmt.Sprintf("oracle%d", i), b, uint32(maxGas), []commontypes.BootstrapperLocator{
+ oracleNode := StartNewNode(t, owner, ports[i], b, uint32(maxGas), []commontypes.BootstrapperLocator{
{PeerID: bootstrapNode.PeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapPort)}},
}, ocr2Keystore, thresholdKeyShare)
oracleNodes = append(oracleNodes, oracleNode.App)
diff --git a/core/services/ocr2/plugins/functions/plugin.go b/core/services/ocr2/plugins/functions/plugin.go
index 10f780371b2..26cffac5abf 100644
--- a/core/services/ocr2/plugins/functions/plugin.go
+++ b/core/services/ocr2/plugins/functions/plugin.go
@@ -7,8 +7,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/libocr/commontypes"
libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
diff --git a/core/services/ocr2/plugins/generic/helpers_test.go b/core/services/ocr2/plugins/generic/helpers_test.go
new file mode 100644
index 00000000000..e23e8e46429
--- /dev/null
+++ b/core/services/ocr2/plugins/generic/helpers_test.go
@@ -0,0 +1,7 @@
+package generic
+
+import "github.com/smartcontractkit/libocr/commontypes"
+
+func (t *TelemetryAdapter) Endpoints() map[[4]string]commontypes.MonitoringEndpoint {
+ return t.endpoints
+}
diff --git a/core/services/ocr2/plugins/generic/merge_test.go b/core/services/ocr2/plugins/generic/merge_test.go
new file mode 100644
index 00000000000..9618c62357d
--- /dev/null
+++ b/core/services/ocr2/plugins/generic/merge_test.go
@@ -0,0 +1,32 @@
+package generic
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMerge(t *testing.T) {
+ vars := map[string]interface{}{
+ "jb": map[string]interface{}{
+ "databaseID": "some-job-id",
+ },
+ }
+ addedVars := map[string]interface{}{
+ "jb": map[string]interface{}{
+ "some-other-var": "foo",
+ },
+ "val": 0,
+ }
+
+ merge(vars, addedVars)
+
+ assert.True(t, reflect.DeepEqual(vars, map[string]interface{}{
+ "jb": map[string]interface{}{
+ "databaseID": "some-job-id",
+ "some-other-var": "foo",
+ },
+ "val": 0,
+ }), vars)
+}
diff --git a/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go b/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go
index 6afb35ca758..def33114e8c 100644
--- a/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go
+++ b/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go
@@ -23,7 +23,7 @@ type PipelineRunnerAdapter struct {
logger logger.Logger
}
-func (p *PipelineRunnerAdapter) ExecuteRun(ctx context.Context, spec string, vars types.Vars, options types.Options) ([]types.TaskResult, error) {
+func (p *PipelineRunnerAdapter) ExecuteRun(ctx context.Context, spec string, vars types.Vars, options types.Options) (types.TaskResults, error) {
s := pipeline.Spec{
DotDagSource: spec,
CreatedAt: time.Now(),
@@ -54,9 +54,13 @@ func (p *PipelineRunnerAdapter) ExecuteRun(ctx context.Context, spec string, var
taskResults[i] = types.TaskResult{
ID: trr.ID.String(),
Type: string(trr.Task.Type()),
- Value: trr.Result.Value,
- Error: trr.Result.Error,
- Index: int(trr.TaskRun.Index),
+ Index: int(trr.Task.OutputIndex()),
+
+ TaskValue: types.TaskValue{
+ Value: trr.Result.Value,
+ Error: trr.Result.Error,
+ IsTerminal: len(trr.Task.Outputs()) == 0,
+ },
}
}
return taskResults, nil
diff --git a/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go b/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go
index ee0038232dc..ef0e7421b50 100644
--- a/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go
+++ b/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go
@@ -1,9 +1,8 @@
-package generic
+package generic_test
import (
"context"
"net/http"
- "reflect"
"testing"
"time"
@@ -20,6 +19,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -52,7 +52,7 @@ func TestAdapter_Integration(t *testing.T) {
http.DefaultClient,
http.DefaultClient,
)
- pra := NewPipelineRunnerAdapter(logger, job.Job{}, pr)
+ pra := generic.NewPipelineRunnerAdapter(logger, job.Job{}, pr)
results, err := pra.ExecuteRun(context.Background(), spec, types.Vars{Vars: map[string]interface{}{"val": 1}}, types.Options{})
require.NoError(t, err)
@@ -83,7 +83,7 @@ func TestAdapter_AddsDefaultVars(t *testing.T) {
logger := logger.TestLogger(t)
mpr := newMockPipelineRunner()
jobID, externalJobID, name := int32(100), uuid.New(), null.StringFrom("job-name")
- pra := NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name}, mpr)
+ pra := generic.NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name}, mpr)
_, err := pra.ExecuteRun(context.Background(), spec, types.Vars{}, types.Options{})
require.NoError(t, err)
@@ -105,7 +105,7 @@ func TestPipelineRunnerAdapter_SetsVarsOnSpec(t *testing.T) {
logger := logger.TestLogger(t)
mpr := newMockPipelineRunner()
jobID, externalJobID, name, jobType := int32(100), uuid.New(), null.StringFrom("job-name"), job.Type("generic")
- pra := NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name, Type: jobType}, mpr)
+ pra := generic.NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name, Type: jobType}, mpr)
maxDuration := time.Duration(100 * time.Second)
_, err := pra.ExecuteRun(context.Background(), spec, types.Vars{}, types.Options{MaxTaskDuration: maxDuration})
@@ -115,29 +115,4 @@ func TestPipelineRunnerAdapter_SetsVarsOnSpec(t *testing.T) {
assert.Equal(t, name.ValueOrZero(), mpr.spec.JobName)
assert.Equal(t, string(jobType), mpr.spec.JobType)
assert.Equal(t, maxDuration, mpr.spec.MaxTaskDuration.Duration())
-
-}
-
-func TestMerge(t *testing.T) {
- vars := map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": "some-job-id",
- },
- }
- addedVars := map[string]interface{}{
- "jb": map[string]interface{}{
- "some-other-var": "foo",
- },
- "val": 0,
- }
-
- merge(vars, addedVars)
-
- assert.True(t, reflect.DeepEqual(vars, map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": "some-job-id",
- "some-other-var": "foo",
- },
- "val": 0,
- }), vars)
}
diff --git a/core/services/ocr2/plugins/generic/telemetry_adapter.go b/core/services/ocr2/plugins/generic/telemetry_adapter.go
index a81befa9854..51d94f5cfe7 100644
--- a/core/services/ocr2/plugins/generic/telemetry_adapter.go
+++ b/core/services/ocr2/plugins/generic/telemetry_adapter.go
@@ -6,17 +6,20 @@ import (
"github.com/smartcontractkit/libocr/commontypes"
+ "github.com/smartcontractkit/chainlink/v2/core/services/synchronization"
+ "github.com/smartcontractkit/chainlink/v2/core/services/telemetry"
+
"github.com/smartcontractkit/chainlink-relay/pkg/types"
)
var _ types.TelemetryService = (*TelemetryAdapter)(nil)
type TelemetryAdapter struct {
- endpointGenerator types.MonitoringEndpointGenerator
+ endpointGenerator telemetry.MonitoringEndpointGenerator
endpoints map[[4]string]commontypes.MonitoringEndpoint
}
-func NewTelemetryAdapter(endpointGen types.MonitoringEndpointGenerator) *TelemetryAdapter {
+func NewTelemetryAdapter(endpointGen telemetry.MonitoringEndpointGenerator) *TelemetryAdapter {
return &TelemetryAdapter{
endpoints: make(map[[4]string]commontypes.MonitoringEndpoint),
endpointGenerator: endpointGen,
@@ -24,7 +27,7 @@ func NewTelemetryAdapter(endpointGen types.MonitoringEndpointGenerator) *Telemet
}
func (t *TelemetryAdapter) Send(ctx context.Context, network string, chainID string, contractID string, telemetryType string, payload []byte) error {
- e, err := t.getOrCreateEndpoint(contractID, telemetryType, network, chainID)
+ e, err := t.getOrCreateEndpoint(network, chainID, contractID, telemetryType)
if err != nil {
return err
}
@@ -32,7 +35,7 @@ func (t *TelemetryAdapter) Send(ctx context.Context, network string, chainID str
return nil
}
-func (t *TelemetryAdapter) getOrCreateEndpoint(contractID string, telemetryType string, network string, chainID string) (commontypes.MonitoringEndpoint, error) {
+func (t *TelemetryAdapter) getOrCreateEndpoint(network string, chainID string, contractID string, telemetryType string) (commontypes.MonitoringEndpoint, error) {
if contractID == "" {
return nil, errors.New("contractID cannot be empty")
}
@@ -49,7 +52,7 @@ func (t *TelemetryAdapter) getOrCreateEndpoint(contractID string, telemetryType
key := [4]string{network, chainID, contractID, telemetryType}
e, ok := t.endpoints[key]
if !ok {
- e = t.endpointGenerator.GenMonitoringEndpoint(network, chainID, contractID, telemetryType)
+ e = t.endpointGenerator.GenMonitoringEndpoint(network, chainID, contractID, synchronization.TelemetryType(telemetryType))
t.endpoints[key] = e
}
return e, nil
diff --git a/core/services/ocr2/plugins/generic/telemetry_adapter_test.go b/core/services/ocr2/plugins/generic/telemetry_adapter_test.go
index d430b889a4b..e137343f2b4 100644
--- a/core/services/ocr2/plugins/generic/telemetry_adapter_test.go
+++ b/core/services/ocr2/plugins/generic/telemetry_adapter_test.go
@@ -1,13 +1,15 @@
-package generic
+package generic_test
import (
"context"
- "fmt"
"testing"
"github.com/smartcontractkit/libocr/commontypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic"
+ "github.com/smartcontractkit/chainlink/v2/core/services/synchronization"
)
type mockEndpoint struct {
@@ -22,17 +24,17 @@ func (m *mockEndpoint) SendLog(payload []byte) { m.payload = payload }
type mockGenerator struct{}
-func (m *mockGenerator) GenMonitoringEndpoint(network, chainID, contractID, telemetryType string) commontypes.MonitoringEndpoint {
+func (m *mockGenerator) GenMonitoringEndpoint(network string, chainID string, contractID string, telemetryType synchronization.TelemetryType) commontypes.MonitoringEndpoint {
return &mockEndpoint{
network: network,
chainID: chainID,
contractID: contractID,
- telemetryType: telemetryType,
+ telemetryType: string(telemetryType),
}
}
func TestTelemetryAdapter(t *testing.T) {
- ta := NewTelemetryAdapter(&mockGenerator{})
+ ta := generic.NewTelemetryAdapter(&mockGenerator{})
tests := []struct {
name string
@@ -92,8 +94,7 @@ func TestTelemetryAdapter(t *testing.T) {
} else {
require.NoError(t, err)
key := [4]string{test.networkID, test.chainID, test.contractID, test.telemetryType}
- fmt.Printf("%+v", ta.endpoints)
- endpoint, ok := ta.endpoints[key]
+ endpoint, ok := ta.Endpoints()[key]
require.True(t, ok)
me := endpoint.(*mockEndpoint)
diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go
index 60904b58139..588f772120e 100644
--- a/core/services/ocr2/plugins/mercury/helpers_test.go
+++ b/core/services/ocr2/plugins/mercury/helpers_test.go
@@ -163,7 +163,7 @@ func setupNode(
p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)}
- config, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", dbName, port), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
// [JobPipeline]
// MaxSuccessfulRuns = 0
c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0))
diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go
index 69a3b53c284..ddef1374a4c 100644
--- a/core/services/ocr2/plugins/mercury/plugin.go
+++ b/core/services/ocr2/plugins/mercury/plugin.go
@@ -37,7 +37,6 @@ func NewServices(
argsNoPlugin libocr2.MercuryOracleArgs,
cfg Config,
chEnhancedTelem chan ocrcommon.EnhancedTelemetryMercuryData,
- chainHeadTracker types.ChainHeadTracker,
orm types.DataSourceORM,
feedID utils.FeedID,
) ([]job.ServiceCtx, error) {
@@ -66,7 +65,7 @@ func NewServices(
lggr,
runResults,
chEnhancedTelem,
- chainHeadTracker,
+ ocr2Provider.ChainReader(),
ocr2Provider.MercuryServerFetcher(),
pluginConfig.InitialBlockNumber.Ptr(),
feedID,
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/core/utils.go b/core/services/ocr2/plugins/ocr2keeper/evm21/core/utils.go
index 6a31b938fc6..1da28c1ad09 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/core/utils.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/core/utils.go
@@ -3,7 +3,6 @@ package core
import (
"context"
"math/big"
- "strings"
"github.com/ethereum/go-ethereum/common"
@@ -14,20 +13,8 @@ import (
// GetTxBlock calls eth_getTransactionReceipt on the eth client to obtain a tx receipt
func GetTxBlock(ctx context.Context, client client.Client, txHash common.Hash) (*big.Int, common.Hash, error) {
receipt := types.Receipt{}
- err := client.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash)
- if err != nil {
- if strings.Contains(err.Error(), "not yet been implemented") {
- // workaround for simulated chains
- // Exploratory: fix this properly (e.g. in the simulated backend)
- r, err1 := client.TransactionReceipt(ctx, txHash)
- if err1 != nil {
- return nil, common.Hash{}, err1
- }
- if r.Status != 1 {
- return nil, common.Hash{}, nil
- }
- return r.BlockNumber, r.BlockHash, nil
- }
+
+ if err := client.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash); err != nil {
return nil, common.Hash{}, err
}
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go
index dad35420398..9a1e99610c1 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go
@@ -3,7 +3,6 @@ package logprovider_test
import (
"context"
"errors"
- "fmt"
"math/big"
"testing"
"time"
@@ -18,7 +17,7 @@ import (
"go.uber.org/zap/zapcore"
"golang.org/x/time/rate"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
ocr2keepers "github.com/smartcontractkit/ocr2keepers/pkg/v3/types"
@@ -693,7 +692,7 @@ func setupBackend(t *testing.T) (*backends.SimulatedBackend, func(), []*bind.Tra
func ptr[T any](v T) *T { return &v }
func setupDB(t *testing.T) *sqlx.DB {
- _, db := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", "chainlink_test", 5432), func(c *chainlink.Config, s *chainlink.Secrets) {
+ _, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Feature.LogPoller = ptr(true)
c.OCR.Enabled = ptr(false)
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evm21/registry.go
index 0ca20477f20..73e2bc0a9c0 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/registry.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/registry.go
@@ -20,6 +20,7 @@ import (
ocr2keepers "github.com/smartcontractkit/ocr2keepers/pkg/v3/types"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
@@ -35,11 +36,14 @@ import (
)
const (
+ defaultPluginRetryExpiration = 30 * time.Minute
// defaultAllowListExpiration decides how long an upkeep's allow list info will be valid for.
- defaultAllowListExpiration = 20 * time.Minute
- // allowListCleanupInterval decides when the expired items in allowList cache will be deleted.
- allowListCleanupInterval = 5 * time.Minute
+ defaultAllowListExpiration = 10 * time.Minute
+ // cleanupInterval decides when the expired items in cache will be deleted.
+ cleanupInterval = 5 * time.Minute
logTriggerRefreshBatchSize = 32
+ totalFastPluginRetries = 5
+ totalMediumPluginRetries = 10
)
var (
@@ -100,9 +104,10 @@ func NewEvmRegistry(
headFunc: func(ocr2keepers.BlockKey) {},
chLog: make(chan logpoller.Log, 1000),
mercury: &MercuryConfig{
- cred: mc,
- abi: core.StreamsCompatibleABI,
- allowListCache: cache.New(defaultAllowListExpiration, allowListCleanupInterval),
+ cred: mc,
+ abi: core.StreamsCompatibleABI,
+ allowListCache: cache.New(defaultAllowListExpiration, cleanupInterval),
+ pluginRetryCache: cache.New(defaultPluginRetryExpiration, cleanupInterval),
},
hc: http.DefaultClient,
logEventProvider: logEventProvider,
@@ -126,6 +131,8 @@ type MercuryConfig struct {
abi abi.ABI
// allowListCache stores the upkeeps privileges. In 2.1, this only includes a JSON bytes for allowed to use mercury
allowListCache *cache.Cache
+
+ pluginRetryCache *cache.Cache
}
type EvmRegistry struct {
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go
index 29b7c8b8700..ceae85728a0 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/registry_check_pipeline.go
@@ -41,7 +41,10 @@ func (r *EvmRegistry) CheckUpkeeps(ctx context.Context, keys ...ocr2keepers.Upke
}
chResult := make(chan checkResult, 1)
- go r.doCheck(ctx, keys, chResult)
+
+ r.threadCtrl.Go(func(ctx context.Context) {
+ r.doCheck(ctx, keys, chResult)
+ })
select {
case rs := <-chResult:
@@ -187,11 +190,11 @@ func (r *EvmRegistry) checkUpkeeps(ctx context.Context, payloads []ocr2keepers.U
continue
}
- // call gas estimator (ge) component to get L2 gas cost
- // l1GasCost = ge.getL1GasCost(chain_id, block_id, block_hash, tx_call_data)
- // fast_gas = ...
+ // call gas estimator (GE) component to get L2 gas cost
+ // estimatedL1GasCost = GE.getL1GasCost(chain_id, block_number, block_hash, estimated_tx_call_data)
+ // fast_gas = GE.getFastGas(chain_id, block_number)
// link_native = ...
- // results[i].l1GasCost = l1GasCost
+ // results[i].estimatedL1GasCost = estimatedL1GasCost
// results[i].fastGas = fastGas
// results[i].linkNative = linkNative
@@ -209,7 +212,7 @@ func (r *EvmRegistry) checkUpkeeps(ctx context.Context, payloads []ocr2keepers.U
}
// check data will include the log trigger config
- payload, err = r.abi.Pack("checkUpkeep", upkeepId, p.CheckData /* ChainConfig(l1GasCost, fast_gas, link_native) */)
+ payload, err = r.abi.Pack("checkUpkeep", upkeepId, p.CheckData /* ChainConfig(estimatedL1GasCost, fast_gas, link_native) */)
if err != nil {
// pack error, no retryable
r.lggr.Warnf("failed to pack log trigger checkUpkeep data for upkeepId %s with check data %s: %s", upkeepId, hexutil.Encode(p.CheckData), err)
@@ -219,7 +222,7 @@ func (r *EvmRegistry) checkUpkeeps(ctx context.Context, payloads []ocr2keepers.U
default:
// checkUpkeep is overloaded on the contract for conditionals and log upkeeps
// Need to use the first function (checkUpkeep0) for conditionals
- payload, err = r.abi.Pack("checkUpkeep0", upkeepId /* ChainConfig(l1GasCost, fast_gas, link_native) */)
+ payload, err = r.abi.Pack("checkUpkeep0", upkeepId /* ChainConfig(estimatedL1GasCost, fast_gas, link_native) */)
if err != nil {
// pack error, no retryable
r.lggr.Warnf("failed to pack conditional checkUpkeep data for upkeepId %s with check data %s: %s", upkeepId, hexutil.Encode(p.CheckData), err)
@@ -367,8 +370,11 @@ func (r *EvmRegistry) simulatePerformUpkeeps(ctx context.Context, checkResults [
checkResults[performToKeyIdx[i]].Eligible = false
checkResults[performToKeyIdx[i]].IneligibilityReason = uint8(encoding.UpkeepFailureReasonSimulationFailed)
} else {
- // actualL1GasCost = GE.getL1GasCost(checkResults[performToKeyIdx[i]].PerformData + bytes padding);
- // checkResults[performToKeyIdx[i]].executionL1GasCost = actualL1GasCost
+ // at this point, the core node knows the exact perform data of the upkeep and the call data to L1.
+ // it can calculate a relatively accurate L1 gas cost
+ // executionL1GasCost = GE.getL1GasCost(checkResults[performToKeyIdx[i]].PerformData + bytes padding);
+ // checkResults[performToKeyIdx[i]].fastGas = GE.getFastGas(chain_id, block_number)
+ // checkResults[performToKeyIdx[i]].executionL1GasCost = executionL1GasCost
}
}
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/services.go b/core/services/ocr2/plugins/ocr2keeper/evm21/services.go
index f9d3dd92591..d178a9af574 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/services.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/services.go
@@ -4,11 +4,11 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/smartcontractkit/ocr2keepers/pkg/v3/plugin"
ocr2keepers "github.com/smartcontractkit/ocr2keepers/pkg/v3/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1"
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
index 28804985723..fb2821a74b7 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
@@ -149,10 +149,15 @@ func (r *EvmRegistry) streamsLookup(ctx context.Context, checkResults []ocr2keep
}
var wg sync.WaitGroup
+
for i, lookup := range lookups {
+ i := i
wg.Add(1)
- go r.doLookup(ctx, &wg, lookup, i, checkResults, lggr)
+ r.threadCtrl.Go(func(ctx context.Context) {
+ r.doLookup(ctx, &wg, lookup, i, checkResults, lggr)
+ })
}
+
wg.Wait()
// don't surface error to plugin bc StreamsLookup process should be self-contained.
@@ -162,10 +167,11 @@ func (r *EvmRegistry) streamsLookup(ctx context.Context, checkResults []ocr2keep
func (r *EvmRegistry) doLookup(ctx context.Context, wg *sync.WaitGroup, lookup *StreamsLookup, i int, checkResults []ocr2keepers.CheckResult, lggr logger.Logger) {
defer wg.Done()
- state, reason, values, retryable, err := r.doMercuryRequest(ctx, lookup, lggr)
+ state, reason, values, retryable, ri, err := r.doMercuryRequest(ctx, lookup, generatePluginRetryKey(checkResults[i].WorkID, lookup.block), lggr)
if err != nil {
- lggr.Errorf("upkeep %s retryable %v doMercuryRequest: %s", lookup.upkeepId, retryable, err.Error())
+ lggr.Errorf("upkeep %s retryable %v retryInterval %s doMercuryRequest: %s", lookup.upkeepId, retryable, ri, err.Error())
checkResults[i].Retryable = retryable
+ checkResults[i].RetryInterval = ri
checkResults[i].PipelineExecutionState = uint8(state)
checkResults[i].IneligibilityReason = uint8(reason)
return
@@ -278,29 +284,35 @@ func (r *EvmRegistry) checkCallback(ctx context.Context, values [][]byte, lookup
}
// doMercuryRequest sends requests to Mercury API to retrieve mercury data.
-func (r *EvmRegistry) doMercuryRequest(ctx context.Context, sl *StreamsLookup, lggr logger.Logger) (encoding.PipelineExecutionState, encoding.UpkeepFailureReason, [][]byte, bool, error) {
+func (r *EvmRegistry) doMercuryRequest(ctx context.Context, sl *StreamsLookup, prk string, lggr logger.Logger) (encoding.PipelineExecutionState, encoding.UpkeepFailureReason, [][]byte, bool, time.Duration, error) {
var isMercuryV03 bool
resultLen := len(sl.Feeds)
ch := make(chan MercuryData, resultLen)
if len(sl.Feeds) == 0 {
- return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", sl.FeedParamKey, sl.TimeParamKey, sl.Feeds)
+ return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0 * time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", sl.FeedParamKey, sl.TimeParamKey, sl.Feeds)
}
if sl.FeedParamKey == feedIdHex && sl.TimeParamKey == blockNumber {
// only mercury v0.2
for i := range sl.Feeds {
- go r.singleFeedRequest(ctx, ch, i, sl, lggr)
+ i := i
+ r.threadCtrl.Go(func(ctx context.Context) {
+ r.singleFeedRequest(ctx, ch, i, sl, lggr)
+ })
}
} else if sl.FeedParamKey == feedIDs {
// only mercury v0.3
resultLen = 1
isMercuryV03 = true
ch = make(chan MercuryData, resultLen)
- go r.multiFeedsRequest(ctx, ch, sl, lggr)
+ r.threadCtrl.Go(func(ctx context.Context) {
+ r.multiFeedsRequest(ctx, ch, sl, lggr)
+ })
} else {
- return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", sl.FeedParamKey, sl.TimeParamKey, sl.Feeds)
+ return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0 * time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", sl.FeedParamKey, sl.TimeParamKey, sl.Feeds)
}
var reqErr error
+ var ri time.Duration
results := make([][]byte, len(sl.Feeds))
retryable := true
allSuccess := true
@@ -323,8 +335,11 @@ func (r *EvmRegistry) doMercuryRequest(ctx context.Context, sl *StreamsLookup, l
results[m.Index] = m.Bytes[0]
}
}
+ if retryable && !allSuccess {
+ ri = r.calculateRetryConfig(prk)
+ }
// only retry when not all successful AND none are not retryable
- return state, encoding.UpkeepFailureReasonNone, results, retryable && !allSuccess, reqErr
+ return state, encoding.UpkeepFailureReasonNone, results, retryable && !allSuccess, ri, reqErr
}
// singleFeedRequest sends a v0.2 Mercury request for a single feed report.
@@ -378,7 +393,7 @@ func (r *EvmRegistry) singleFeedRequest(ctx context.Context, ch chan<- MercuryDa
return err1
}
- if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusInternalServerError {
+ if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusInternalServerError || resp.StatusCode == http.StatusBadGateway || resp.StatusCode == http.StatusServiceUnavailable || resp.StatusCode == http.StatusGatewayTimeout {
lggr.Warnf("at block %s upkeep %s received status code %d for feed %s", sl.Time.String(), sl.upkeepId.String(), resp.StatusCode, sl.Feeds[index])
retryable = true
state = encoding.MercuryFlakyFailure
@@ -415,9 +430,9 @@ func (r *EvmRegistry) singleFeedRequest(ctx context.Context, ch chan<- MercuryDa
sent = true
return nil
},
- // only retry when the error is 404 Not Found or 500 Internal Server Error
+ // only retry when the error is 404 Not Found, 500 Internal Server Error, 502 Bad Gateway, 503 Service Unavailable, 504 Gateway Timeout
retry.RetryIf(func(err error) bool {
- return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError)
+ return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError) || err.Error() == fmt.Sprintf("%d", http.StatusBadGateway) || err.Error() == fmt.Sprintf("%d", http.StatusServiceUnavailable) || err.Error() == fmt.Sprintf("%d", http.StatusGatewayTimeout)
}),
retry.Context(ctx),
retry.Delay(retryDelay),
@@ -504,15 +519,16 @@ func (r *EvmRegistry) multiFeedsRequest(ctx context.Context, ch chan<- MercuryDa
retryable = false
state = encoding.InvalidMercuryRequest
return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3 with message: %s", sl.Time.String(), sl.upkeepId.String(), resp.StatusCode, string(body))
- } else if resp.StatusCode == http.StatusInternalServerError {
+ } else if resp.StatusCode == http.StatusInternalServerError || resp.StatusCode == http.StatusBadGateway || resp.StatusCode == http.StatusServiceUnavailable || resp.StatusCode == http.StatusGatewayTimeout {
retryable = true
state = encoding.MercuryFlakyFailure
- return fmt.Errorf("%d", http.StatusInternalServerError)
- } else if resp.StatusCode == 420 {
- // in 0.3, this will happen when missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds
- retryable = false
- state = encoding.InvalidMercuryRequest
- return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds", sl.Time.String(), sl.upkeepId.String(), resp.StatusCode)
+ return fmt.Errorf("%d", resp.StatusCode)
+ } else if resp.StatusCode == http.StatusPartialContent {
+ // TODO (AUTO-5044): handle response code 206 entirely with errors field parsing
+ lggr.Warnf("at timestamp %s upkeep %s requested [%s] feeds but mercury v0.3 server returned 206 status, treating it as 404 and retrying", sl.Time.String(), sl.upkeepId.String(), sl.Feeds)
+ retryable = true
+ state = encoding.MercuryFlakyFailure
+ return fmt.Errorf("%d", http.StatusPartialContent)
} else if resp.StatusCode != http.StatusOK {
retryable = false
state = encoding.InvalidMercuryRequest
@@ -532,8 +548,11 @@ func (r *EvmRegistry) multiFeedsRequest(ctx context.Context, ch chan<- MercuryDa
// in v0.3, if some feeds are not available, the server will only return available feeds, but we need to make sure ALL feeds are retrieved before calling user contract
// hence, retry in this case. retry will help when we send a very new timestamp and reports are not yet generated
if len(response.Reports) != len(sl.Feeds) {
- // TODO: AUTO-5044: calculate what reports are missing and log a warning
- lggr.Warnf("at timestamp %s upkeep %s mercury v0.3 server returned 200 status with %d reports while we requested %d feeds, treating as 404 (not found) and retrying", sl.Time.String(), sl.upkeepId.String(), len(response.Reports), len(sl.Feeds))
+ var receivedFeeds []string
+ for _, f := range response.Reports {
+ receivedFeeds = append(receivedFeeds, f.FeedID)
+ }
+ lggr.Warnf("at timestamp %s upkeep %s mercury v0.3 server returned 206 status with [%s] reports while we requested [%s] feeds, retrying", sl.Time.String(), sl.upkeepId.String(), receivedFeeds, sl.Feeds)
retryable = true
state = encoding.MercuryFlakyFailure
return fmt.Errorf("%d", http.StatusNotFound)
@@ -558,9 +577,9 @@ func (r *EvmRegistry) multiFeedsRequest(ctx context.Context, ch chan<- MercuryDa
sent = true
return nil
},
- // only retry when the error is 404 Not Found or 500 Internal Server Error
+ // only retry when the error is 206 Partial Content, 404 Not Found, 500 Internal Server Error, 502 Bad Gateway, 503 Service Unavailable, 504 Gateway Timeout
retry.RetryIf(func(err error) bool {
- return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError)
+ return err.Error() == fmt.Sprintf("%d", http.StatusPartialContent) || err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError) || err.Error() == fmt.Sprintf("%d", http.StatusBadGateway) || err.Error() == fmt.Sprintf("%d", http.StatusServiceUnavailable) || err.Error() == fmt.Sprintf("%d", http.StatusGatewayTimeout)
}),
retry.Context(ctx),
retry.Delay(retryDelay),
@@ -593,3 +612,29 @@ func (r *EvmRegistry) generateHMAC(method string, path string, body []byte, clie
userHmac := hex.EncodeToString(signedMessage.Sum(nil))
return userHmac
}
+
+// calculateRetryConfig returns plugin retry interval based on how many times plugin has retried this work
+func (r *EvmRegistry) calculateRetryConfig(prk string) time.Duration {
+ var ri time.Duration
+ var retries int
+ totalAttempts, ok := r.mercury.pluginRetryCache.Get(prk)
+ if ok {
+ retries = totalAttempts.(int)
+ if retries < totalFastPluginRetries {
+ ri = 1 * time.Second
+ } else if retries < totalMediumPluginRetries {
+ ri = 5 * time.Second
+ }
+ // if the core node has retried totalMediumPluginRetries times, do not set retry interval and plugin will use
+ // the default interval
+ } else {
+ ri = 1 * time.Second
+ }
+ r.mercury.pluginRetryCache.Set(prk, retries+1, cache.DefaultExpiration)
+ return ri
+}
+
+// generatePluginRetryKey returns a plugin retry cache key
+func generatePluginRetryKey(workID string, block uint64) string {
+ return workID + "|" + fmt.Sprintf("%d", block)
+}
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
index 6f7065ef875..145d701454d 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
@@ -10,6 +10,7 @@ import (
"net/http"
"strings"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
@@ -24,6 +25,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evm21/encoding"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evm21/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
evmClientMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
@@ -65,10 +67,12 @@ func setupEVMRegistry(t *testing.T) *EvmRegistry {
Username: "FakeClientID",
Password: "FakeClientKey",
},
- abi: streamsLookupCompatibleABI,
- allowListCache: cache.New(defaultAllowListExpiration, allowListCleanupInterval),
+ abi: streamsLookupCompatibleABI,
+ allowListCache: cache.New(defaultAllowListExpiration, cleanupInterval),
+ pluginRetryCache: cache.New(defaultPluginRetryExpiration, cleanupInterval),
},
- hc: mockHttpClient,
+ hc: mockHttpClient,
+ threadCtrl: utils.NewThreadControl(),
}
return r
}
@@ -218,6 +222,7 @@ func TestEvmRegistry_StreamsLookup(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := setupEVMRegistry(t)
+ defer r.Close()
client := new(evmClientMocks.Client)
r.client = client
@@ -360,6 +365,7 @@ func TestEvmRegistry_AllowedToUseMercury(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := setupEVMRegistry(t)
+ defer r.Close()
client := new(evmClientMocks.Client)
r.client = client
@@ -427,15 +433,18 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
upkeepId, _ := new(big.Int).SetString("88786950015966611018675766524283132478093844178961698330929478019253453382042", 10)
tests := []struct {
- name string
- lookup *StreamsLookup
- mockHttpStatusCode int
- mockChainlinkBlobs []string
- expectedValues [][]byte
- expectedRetryable bool
- expectedError error
- state encoding.PipelineExecutionState
- reason encoding.UpkeepFailureReason
+ name string
+ lookup *StreamsLookup
+ mockHttpStatusCode int
+ mockChainlinkBlobs []string
+ pluginRetries int
+ pluginRetryKey string
+ expectedValues [][]byte
+ expectedRetryable bool
+ expectedRetryInterval time.Duration
+ expectedError error
+ state encoding.PipelineExecutionState
+ reason encoding.UpkeepFailureReason
}{
{
name: "success",
@@ -456,7 +465,7 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
expectedError: nil,
},
{
- name: "failure - retryable",
+ name: "failure - retryable and interval is 1s",
lookup: &StreamsLookup{
StreamsLookupError: &encoding.StreamsLookupError{
FeedParamKey: feedIdHex,
@@ -467,6 +476,49 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
},
upkeepId: upkeepId,
},
+ mockHttpStatusCode: http.StatusInternalServerError,
+ mockChainlinkBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"},
+ expectedValues: [][]byte{nil},
+ expectedRetryable: true,
+ pluginRetries: 0,
+ expectedRetryInterval: 1 * time.Second,
+ expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 500\n#2: 500\n#3: 500"),
+ state: encoding.MercuryFlakyFailure,
+ },
+ {
+ name: "failure - retryable and interval is 5s",
+ lookup: &StreamsLookup{
+ StreamsLookupError: &encoding.StreamsLookupError{
+ FeedParamKey: feedIdHex,
+ Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"},
+ TimeParamKey: blockNumber,
+ Time: big.NewInt(25880526),
+ ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100},
+ },
+ upkeepId: upkeepId,
+ },
+ pluginRetries: 5,
+ mockHttpStatusCode: http.StatusInternalServerError,
+ mockChainlinkBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"},
+ expectedValues: [][]byte{nil},
+ expectedRetryable: true,
+ expectedRetryInterval: 5 * time.Second,
+ expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 500\n#2: 500\n#3: 500"),
+ state: encoding.MercuryFlakyFailure,
+ },
+ {
+ name: "failure - not retryable because there are many plugin retries already",
+ lookup: &StreamsLookup{
+ StreamsLookupError: &encoding.StreamsLookupError{
+ FeedParamKey: feedIdHex,
+ Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"},
+ TimeParamKey: blockNumber,
+ Time: big.NewInt(25880526),
+ ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100},
+ },
+ upkeepId: upkeepId,
+ },
+ pluginRetries: 10,
mockHttpStatusCode: http.StatusInternalServerError,
mockChainlinkBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"},
expectedValues: [][]byte{nil},
@@ -486,11 +538,11 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
},
upkeepId: upkeepId,
},
- mockHttpStatusCode: http.StatusBadGateway,
+ mockHttpStatusCode: http.StatusTooManyRequests,
mockChainlinkBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"},
expectedValues: [][]byte{nil},
expectedRetryable: false,
- expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 25880526 upkeep 88786950015966611018675766524283132478093844178961698330929478019253453382042 received status code 502 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
+ expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 25880526 upkeep 88786950015966611018675766524283132478093844178961698330929478019253453382042 received status code 429 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
state: encoding.InvalidMercuryRequest,
},
{
@@ -528,6 +580,12 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := setupEVMRegistry(t)
+ defer r.Close()
+
+ if tt.pluginRetries != 0 {
+ r.mercury.pluginRetryCache.Set(tt.pluginRetryKey, tt.pluginRetries, cache.DefaultExpiration)
+ }
+
hc := mocks.NewHttpClient(t)
for _, blob := range tt.mockChainlinkBlobs {
@@ -539,7 +597,7 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
StatusCode: tt.mockHttpStatusCode,
Body: io.NopCloser(bytes.NewReader(b)),
}
- if tt.expectedError != nil && tt.expectedRetryable {
+ if tt.expectedError != nil && tt.expectedRetryable || tt.pluginRetries > 0 {
hc.On("Do", mock.Anything).Return(resp, nil).Times(totalAttempt)
} else {
hc.On("Do", mock.Anything).Return(resp, nil).Once()
@@ -547,13 +605,18 @@ func TestEvmRegistry_DoMercuryRequestV02(t *testing.T) {
}
r.hc = hc
- state, reason, values, retryable, reqErr := r.doMercuryRequest(context.Background(), tt.lookup, r.lggr)
+ state, reason, values, retryable, ri, reqErr := r.doMercuryRequest(context.Background(), tt.lookup, tt.pluginRetryKey, r.lggr)
assert.Equal(t, tt.expectedValues, values)
assert.Equal(t, tt.expectedRetryable, retryable)
+ if retryable {
+ newRetries, _ := r.mercury.pluginRetryCache.Get(tt.pluginRetryKey)
+ assert.Equal(t, tt.pluginRetries+1, newRetries.(int))
+ }
+ assert.Equal(t, tt.expectedRetryInterval, ri)
assert.Equal(t, tt.state, state)
assert.Equal(t, tt.reason, reason)
if tt.expectedError != nil {
- assert.Equal(t, tt.expectedError.Error(), reqErr.Error())
+ assert.True(t, strings.HasPrefix(reqErr.Error(), "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"))
}
})
}
@@ -563,15 +626,17 @@ func TestEvmRegistry_DoMercuryRequestV03(t *testing.T) {
upkeepId, _ := new(big.Int).SetString("88786950015966611018675766524283132478093844178961698330929478019253453382042", 10)
tests := []struct {
- name string
- lookup *StreamsLookup
- mockHttpStatusCode int
- mockChainlinkBlobs []string
- expectedValues [][]byte
- expectedRetryable bool
- expectedError error
- state encoding.PipelineExecutionState
- reason encoding.UpkeepFailureReason
+ name string
+ lookup *StreamsLookup
+ mockHttpStatusCode int
+ mockChainlinkBlobs []string
+ pluginRetryKey string
+ expectedValues [][]byte
+ expectedRetryable bool
+ expectedRetryInterval time.Duration
+ expectedError error
+ state encoding.PipelineExecutionState
+ reason encoding.UpkeepFailureReason
}{
{
name: "success v0.3",
@@ -622,9 +687,10 @@ func TestEvmRegistry_DoMercuryRequestV03(t *testing.T) {
}
r.hc = hc
- state, reason, values, retryable, reqErr := r.doMercuryRequest(context.Background(), tt.lookup, r.lggr)
+ state, reason, values, retryable, ri, reqErr := r.doMercuryRequest(context.Background(), tt.lookup, tt.pluginRetryKey, r.lggr)
assert.Equal(t, tt.expectedValues, values)
assert.Equal(t, tt.expectedRetryable, retryable)
+ assert.Equal(t, tt.expectedRetryInterval, ri)
assert.Equal(t, tt.state, state)
assert.Equal(t, tt.reason, reason)
if tt.expectedError != nil {
@@ -640,6 +706,7 @@ func TestEvmRegistry_SingleFeedRequest(t *testing.T) {
name string
index int
lookup *StreamsLookup
+ pluginRetryKey string
blob string
statusCode int
lastStatusCode int
@@ -728,8 +795,8 @@ func TestEvmRegistry_SingleFeedRequest(t *testing.T) {
blob: "0xab2123dc",
retryNumber: 1,
statusCode: http.StatusNotFound,
- lastStatusCode: http.StatusBadGateway,
- errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 404\n#2: at block 123456 upkeep 123456789 received status code 502 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
+ lastStatusCode: http.StatusTooManyRequests,
+ errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 404\n#2: at block 123456 upkeep 123456789 received status code 429 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
},
{
name: "failure - returns not retryable",
@@ -744,14 +811,16 @@ func TestEvmRegistry_SingleFeedRequest(t *testing.T) {
upkeepId: upkeepId,
},
blob: "0xab2123dc",
- statusCode: http.StatusBadGateway,
- errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 123456 upkeep 123456789 received status code 502 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
+ statusCode: http.StatusConflict,
+ errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 123456 upkeep 123456789 received status code 409 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := setupEVMRegistry(t)
+ defer r.Close()
+
hc := mocks.NewHttpClient(t)
mr := MercuryV02Response{ChainlinkBlob: tt.blob}
@@ -819,6 +888,8 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
lookup *StreamsLookup
statusCode int
lastStatusCode int
+ pluginRetries int
+ pluginRetryKey string
retryNumber int
retryable bool
errorMessage string
@@ -883,6 +954,47 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
},
statusCode: http.StatusOK,
},
+ {
+ name: "success - retry 206",
+ lookup: &StreamsLookup{
+ StreamsLookupError: &encoding.StreamsLookupError{
+ FeedParamKey: feedIDs,
+ Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"},
+ TimeParamKey: timestamp,
+ Time: big.NewInt(123456),
+ },
+ upkeepId: upkeepId,
+ },
+ firstResponse: &MercuryV03Response{
+ Reports: []MercuryV03Report{
+ {
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
+ ValidFromTimestamp: 123456,
+ ObservationsTimestamp: 123456,
+ FullReport: "0xab2123dc00000012",
+ },
+ },
+ },
+ response: &MercuryV03Response{
+ Reports: []MercuryV03Report{
+ {
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
+ ValidFromTimestamp: 123456,
+ ObservationsTimestamp: 123456,
+ FullReport: "0xab2123dc00000012",
+ },
+ {
+ FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000",
+ ValidFromTimestamp: 123458,
+ ObservationsTimestamp: 123458,
+ FullReport: "0xab2123dc00000019",
+ },
+ },
+ },
+ retryNumber: 1,
+ statusCode: http.StatusPartialContent,
+ lastStatusCode: http.StatusOK,
+ },
{
name: "success - retry for 500",
lookup: &StreamsLookup{
@@ -946,7 +1058,7 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
errorMessage: "All attempts fail:\n#1: hex string without 0x prefix",
},
{
- name: "failure - returns retryable",
+ name: "failure - returns retryable with 1s plugin retry interval",
lookup: &StreamsLookup{
StreamsLookupError: &encoding.StreamsLookupError{
FeedParamKey: feedIDs,
@@ -962,7 +1074,7 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
errorMessage: "All attempts fail:\n#1: 500\n#2: 500\n#3: 500",
},
{
- name: "failure - returns retryable and then non-retryable",
+ name: "failure - returns retryable with 5s plugin retry interval",
lookup: &StreamsLookup{
StreamsLookupError: &encoding.StreamsLookupError{
FeedParamKey: feedIDs,
@@ -972,27 +1084,30 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
},
upkeepId: upkeepId,
},
- retryNumber: 1,
- statusCode: http.StatusInternalServerError,
- lastStatusCode: http.StatusUnauthorized,
- errorMessage: "All attempts fail:\n#1: 500\n#2: at timestamp 123456 upkeep 123456789 received status code 401 from mercury v0.3, most likely this is caused by unauthorized upkeep",
+ pluginRetries: 6,
+ retryNumber: totalAttempt,
+ statusCode: http.StatusInternalServerError,
+ retryable: true,
+ errorMessage: "All attempts fail:\n#1: 500\n#2: 500\n#3: 500",
},
{
- name: "failure - returns status code 420 not retryable",
+ name: "failure - returns retryable and then non-retryable",
lookup: &StreamsLookup{
StreamsLookupError: &encoding.StreamsLookupError{
FeedParamKey: feedIDs,
- Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"},
+ Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"},
TimeParamKey: timestamp,
Time: big.NewInt(123456),
},
upkeepId: upkeepId,
},
- statusCode: 420,
- errorMessage: "All attempts fail:\n#1: at timestamp 123456 upkeep 123456789 received status code 420 from mercury v0.3, most likely this is caused by missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds",
+ retryNumber: 1,
+ statusCode: http.StatusInternalServerError,
+ lastStatusCode: http.StatusUnauthorized,
+ errorMessage: "All attempts fail:\n#1: 500\n#2: at timestamp 123456 upkeep 123456789 received status code 401 from mercury v0.3, most likely this is caused by unauthorized upkeep",
},
{
- name: "failure - returns status code 502 not retryable",
+ name: "failure - returns status code 422 not retryable",
lookup: &StreamsLookup{
StreamsLookupError: &encoding.StreamsLookupError{
FeedParamKey: feedIDs,
@@ -1002,8 +1117,8 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
},
upkeepId: upkeepId,
},
- statusCode: http.StatusBadGateway,
- errorMessage: "All attempts fail:\n#1: at timestamp 123456 upkeep 123456789 received status code 502 from mercury v0.3",
+ statusCode: http.StatusUnprocessableEntity,
+ errorMessage: "All attempts fail:\n#1: at timestamp 123456 upkeep 123456789 received status code 422 from mercury v0.3",
},
{
name: "success - retry when reports length does not match feeds length",
@@ -1042,14 +1157,21 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
},
},
},
- retryNumber: 1,
- statusCode: http.StatusOK,
+ retryNumber: 1,
+ statusCode: http.StatusOK,
+ lastStatusCode: http.StatusOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := setupEVMRegistry(t)
+ defer r.Close()
+
+ if tt.pluginRetries != 0 {
+ r.mercury.pluginRetryCache.Set(tt.pluginRetryKey, tt.pluginRetries, cache.DefaultExpiration)
+ }
+
hc := mocks.NewHttpClient(t)
b, err := json.Marshal(tt.response)
assert.Nil(t, err)
@@ -1071,7 +1193,7 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
b1, err := json.Marshal(tt.response)
assert.Nil(t, err)
resp1 := &http.Response{
- StatusCode: tt.statusCode,
+ StatusCode: tt.lastStatusCode,
Body: io.NopCloser(bytes.NewReader(b1)),
}
hc.On("Do", mock.Anything).Return(resp0, nil).Once().On("Do", mock.Anything).Return(resp1, nil).Once()
@@ -1208,6 +1330,8 @@ func TestEvmRegistry_CheckCallback(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
client := new(evmClientMocks.Client)
r := setupEVMRegistry(t)
+ defer r.Close()
+
payload, err := r.abi.Pack("checkCallback", tt.lookup.upkeepId, values, tt.lookup.ExtraData)
require.Nil(t, err)
args := map[string]interface{}{
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/upkeepstate/orm.go b/core/services/ocr2/plugins/ocr2keeper/evm21/upkeepstate/orm.go
index 5db2f8bd0f3..c918ad595fa 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/upkeepstate/orm.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/upkeepstate/orm.go
@@ -4,8 +4,8 @@ import (
"math/big"
"time"
+ "github.com/jmoiron/sqlx"
"github.com/lib/pq"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go
index 15280de73cf..562f972bc42 100644
--- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go
@@ -479,7 +479,7 @@ func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IK
// Setup bootstrap + oracle nodes
bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, "bootstrap_keeper_ocr", nodeKeys[0], backend, nil, mServer)
+ appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, mServer)
bootstrapNode := Node{
appBootstrap, bootstrapTransmitter, bootstrapKb,
}
@@ -490,7 +490,7 @@ func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IK
// Set up the minimum 4 oracles all funded
ports := freeport.GetN(t, 4)
for i := 0; i < 4; i++ {
- app, peerID, transmitter, kb := setupNode(t, ports[i], fmt.Sprintf("oracle_keeper%d", i), nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
+ app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
// Supply the bootstrap IP and port as a V2 peer address
{PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}},
}, mServer)
diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
index 7c881a18eb9..f50321631ce 100644
--- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
@@ -111,7 +111,6 @@ func deployKeeper20Registry(
func setupNode(
t *testing.T,
port int,
- dbName string,
nodeKey ethkey.KeyV2,
backend *backends.SimulatedBackend,
p2pV2Bootstrappers []commontypes.BootstrapperLocator,
@@ -119,7 +118,7 @@ func setupNode(
) (chainlink.Application, string, common.Address, ocr2key.KeyBundle) {
p2pKey := keystest.NewP2PKeyV2(t)
p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)}
- cfg, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", dbName, port), func(c *chainlink.Config, s *chainlink.Secrets) {
+ cfg, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Feature.LogPoller = ptr(true)
c.OCR.Enabled = ptr(false)
@@ -240,7 +239,7 @@ func TestIntegration_KeeperPluginBasic(t *testing.T) {
// Setup bootstrap + oracle nodes
bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, "bootstrap_keeper_ocr", nodeKeys[0], backend, nil, NewSimulatedMercuryServer())
+ appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, NewSimulatedMercuryServer())
bootstrapNode := Node{
appBootstrap, bootstrapTransmitter, bootstrapKb,
}
@@ -251,7 +250,7 @@ func TestIntegration_KeeperPluginBasic(t *testing.T) {
// Set up the minimum 4 oracles all funded
ports := freeport.GetN(t, 4)
for i := 0; i < 4; i++ {
- app, peerID, transmitter, kb := setupNode(t, ports[i], fmt.Sprintf("oracle_keeper%d", i), nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
+ app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
// Supply the bootstrap IP and port as a V2 peer address
{PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}},
}, NewSimulatedMercuryServer())
@@ -501,7 +500,7 @@ func TestIntegration_KeeperPluginForwarderEnabled(t *testing.T) {
effectiveTransmitters := make([]common.Address, 0)
// Setup bootstrap + oracle nodes
bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, "bootstrap_keeper_ocr", nodeKeys[0], backend, nil, NewSimulatedMercuryServer())
+ appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, NewSimulatedMercuryServer())
bootstrapNode := Node{
appBootstrap, bootstrapTransmitter, bootstrapKb,
@@ -513,7 +512,7 @@ func TestIntegration_KeeperPluginForwarderEnabled(t *testing.T) {
// Set up the minimum 4 oracles all funded
ports := freeport.GetN(t, 4)
for i := 0; i < 4; i++ {
- app, peerID, transmitter, kb := setupNode(t, ports[i], fmt.Sprintf("oracle_keeper%d", i), nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
+ app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{
// Supply the bootstrap IP and port as a V2 peer address
{PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}},
}, NewSimulatedMercuryServer())
diff --git a/core/services/ocr2/plugins/ocr2keeper/util.go b/core/services/ocr2/plugins/ocr2keeper/util.go
index 132afd0d29d..fca98d87005 100644
--- a/core/services/ocr2/plugins/ocr2keeper/util.go
+++ b/core/services/ocr2/plugins/ocr2keeper/util.go
@@ -3,12 +3,12 @@ package ocr2keeper
import (
"fmt"
+ "github.com/jmoiron/sqlx"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
ocr2keepers20 "github.com/smartcontractkit/ocr2keepers/pkg/v2"
ocr2keepers20coordinator "github.com/smartcontractkit/ocr2keepers/pkg/v2/coordinator"
ocr2keepers20polling "github.com/smartcontractkit/ocr2keepers/pkg/v2/observer/polling"
ocr2keepers20runner "github.com/smartcontractkit/ocr2keepers/pkg/v2/runner"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
diff --git a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go
index cf7a408725d..0dbb6a5915e 100644
--- a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go
+++ b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go
@@ -226,7 +226,7 @@ func setupNodeOCR2(
p2pV2Bootstrappers []commontypes.BootstrapperLocator,
) *ocr2Node {
p2pKey := keystest.NewP2PKeyV2(t)
- config, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("%s%d", dbName, port), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test.
c.Feature.LogPoller = ptr(true)
diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go
index cde1a1f9276..78802f6559c 100644
--- a/core/services/ocr2/validate/validate.go
+++ b/core/services/ocr2/validate/validate.go
@@ -114,6 +114,8 @@ func validateSpec(tree *toml.Tree, spec job.Job) error {
return nil
case types.Mercury:
return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID)
+ case types.GenericPlugin:
+ return validateOCR2GenericPluginSpec(spec.OCR2OracleSpec.PluginConfig)
case "":
return errors.New("no plugin specified")
default:
@@ -123,6 +125,37 @@ func validateSpec(tree *toml.Tree, spec job.Job) error {
return nil
}
+type coreConfig struct {
+ Command string `json:"command"`
+ ProviderType string `json:"providerType"`
+ PluginName string `json:"pluginName"`
+ TelemetryType string `json:"telemetryType"`
+}
+
+type OCR2GenericPluginConfig struct {
+ CoreConfig coreConfig `json:"coreConfig"`
+ PluginConfig json.RawMessage
+}
+
+func validateOCR2GenericPluginSpec(jsonConfig job.JSONConfig) error {
+ p := OCR2GenericPluginConfig{}
+ err := json.Unmarshal(jsonConfig.Bytes(), &p)
+ if err != nil {
+ return err
+ }
+
+ cc := p.CoreConfig
+ if cc.PluginName == "" {
+ return errors.New("generic config invalid: must provide plugin name")
+ }
+
+ if cc.TelemetryType == "" {
+ return errors.New("generic config invalid: must provide telemetry type")
+ }
+
+ return nil
+}
+
func validateDKGSpec(jsonConfig job.JSONConfig) error {
if jsonConfig == nil {
return errors.New("pluginConfig is empty")
diff --git a/core/services/ocr2/validate/validate_test.go b/core/services/ocr2/validate/validate_test.go
index 4685ed745dd..5b40224a4bf 100644
--- a/core/services/ocr2/validate/validate_test.go
+++ b/core/services/ocr2/validate/validate_test.go
@@ -580,6 +580,89 @@ KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c
require.Contains(t, err.Error(), "validation error for keyID")
},
},
+ {
+ name: "Generic plugin config validation - nothing provided",
+ toml: `
+type = "offchainreporting2"
+schemaVersion = 1
+name = "dkg"
+externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330"
+maxTaskDuration = "1s"
+contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee"
+ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17"
+p2pv2Bootstrappers = [
+ "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000"
+]
+relay = "evm"
+pluginType = "plugin"
+transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35"
+
+[relayConfig]
+chainID = 4
+
+[pluginConfig.coreConfig]
+`,
+ assertion: func(t *testing.T, os job.Job, err error) {
+ require.Error(t, err)
+ require.ErrorContains(t, err, "must provide plugin name")
+ },
+ },
+ {
+ name: "Generic plugin config validation - plugin name provided",
+ toml: `
+type = "offchainreporting2"
+schemaVersion = 1
+name = "dkg"
+externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330"
+maxTaskDuration = "1s"
+contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee"
+ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17"
+p2pv2Bootstrappers = [
+ "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000"
+]
+relay = "evm"
+pluginType = "plugin"
+transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35"
+
+[relayConfig]
+chainID = 4
+
+[pluginConfig.coreConfig]
+pluginName = "median"
+`,
+ assertion: func(t *testing.T, os job.Job, err error) {
+ require.Error(t, err)
+ require.ErrorContains(t, err, "must provide telemetry type")
+ },
+ },
+ {
+ name: "Generic plugin config validation - all provided",
+ toml: `
+type = "offchainreporting2"
+schemaVersion = 1
+name = "dkg"
+externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330"
+maxTaskDuration = "1s"
+contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee"
+ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17"
+p2pv2Bootstrappers = [
+ "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000"
+]
+relay = "evm"
+pluginType = "plugin"
+transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35"
+
+[relayConfig]
+chainID = 4
+
+[pluginConfig.coreConfig]
+pluginName = "median"
+telemetryType = "median"
+`,
+ assertion: func(t *testing.T, os job.Job, err error) {
+ require.NoError(t, err)
+ },
+ },
}
for _, tc := range tt {
diff --git a/core/services/ocrbootstrap/database_test.go b/core/services/ocrbootstrap/database_test.go
index 2f160eff582..e00e318c69c 100644
--- a/core/services/ocrbootstrap/database_test.go
+++ b/core/services/ocrbootstrap/database_test.go
@@ -3,7 +3,7 @@ package ocrbootstrap_test
import (
"testing"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go
index 3910ca05d38..34e3ee0a710 100644
--- a/core/services/ocrbootstrap/delegate.go
+++ b/core/services/ocrbootstrap/delegate.go
@@ -7,8 +7,8 @@ import (
"github.com/pkg/errors"
+ "github.com/jmoiron/sqlx"
ocr "github.com/smartcontractkit/libocr/offchainreporting2plus"
- "github.com/smartcontractkit/sqlx"
relaylogger "github.com/smartcontractkit/chainlink-relay/pkg/logger"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
diff --git a/core/services/ocrcommon/data_source.go b/core/services/ocrcommon/data_source.go
index ed832e45fcf..0363a7124b6 100644
--- a/core/services/ocrcommon/data_source.go
+++ b/core/services/ocrcommon/data_source.go
@@ -144,6 +144,8 @@ func (ds *inMemoryDataSource) executeRun(ctx context.Context, timestamp Observat
FinalResults: finalResult,
RepTimestamp: timestamp,
})
+ } else {
+ ds.lggr.Infow("Enhanced telemetry is disabled for job", "job", ds.jb.Name)
}
return run, finalResult, err
diff --git a/core/services/ocrcommon/peer_wrapper.go b/core/services/ocrcommon/peer_wrapper.go
index 0781303275d..1daa84b7212 100644
--- a/core/services/ocrcommon/peer_wrapper.go
+++ b/core/services/ocrcommon/peer_wrapper.go
@@ -11,11 +11,11 @@ import (
"github.com/pkg/errors"
"go.uber.org/multierr"
+ "github.com/jmoiron/sqlx"
ocrnetworking "github.com/smartcontractkit/libocr/networking"
ocrnetworkingtypes "github.com/smartcontractkit/libocr/networking/types"
ocr1types "github.com/smartcontractkit/libocr/offchainreporting/types"
ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
relaylogger "github.com/smartcontractkit/chainlink-relay/pkg/logger"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
diff --git a/core/services/ocrcommon/peerstore.go b/core/services/ocrcommon/peerstore.go
index f1c318a3bff..02a4d90f578 100644
--- a/core/services/ocrcommon/peerstore.go
+++ b/core/services/ocrcommon/peerstore.go
@@ -12,9 +12,10 @@ import (
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/recovery"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
diff --git a/core/services/ocrcommon/telemetry_test.go b/core/services/ocrcommon/telemetry_test.go
index e6a798780b5..9e3dedce8a8 100644
--- a/core/services/ocrcommon/telemetry_test.go
+++ b/core/services/ocrcommon/telemetry_test.go
@@ -189,7 +189,7 @@ func TestSendEATelemetry(t *testing.T) {
wg := sync.WaitGroup{}
ingressClient := mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("0xa", synchronization.EnhancedEA, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEA)
var sentMessage []byte
ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
@@ -305,7 +305,7 @@ func TestCollectAndSend(t *testing.T) {
wg := sync.WaitGroup{}
ingressClient := mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("0xa", synchronization.EnhancedEA, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEA)
ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
wg.Done()
})
@@ -548,7 +548,7 @@ func TestCollectMercuryEnhancedTelemetryV1(t *testing.T) {
wg := sync.WaitGroup{}
ingressClient := mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("0xa", synchronization.EnhancedEAMercury, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
var sentMessage []byte
ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
@@ -664,7 +664,7 @@ func TestCollectMercuryEnhancedTelemetryV2(t *testing.T) {
wg := sync.WaitGroup{}
ingressClient := mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("0xa", synchronization.EnhancedEAMercury, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
var sentMessage []byte
ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
diff --git a/core/services/pg/channels.go b/core/services/pg/channels.go
index 736cd407962..aed132a7f2c 100644
--- a/core/services/pg/channels.go
+++ b/core/services/pg/channels.go
@@ -1,8 +1,4 @@
package pg
// Postgres channel to listen for new evm.txes
-const (
- ChannelInsertOnTx = "evm.insert_on_txes"
- ChannelInsertOnCosmosMsg = "insert_on_cosmos_msg"
- ChannelInsertOnEVMLogs = "evm.insert_on_logs"
-)
+const ChannelInsertOnEVMLogs = "evm.insert_on_logs"
diff --git a/core/services/pg/connection.go b/core/services/pg/connection.go
index 19c48a118b9..0bafd5dcd0f 100644
--- a/core/services/pg/connection.go
+++ b/core/services/pg/connection.go
@@ -6,8 +6,8 @@ import (
"github.com/google/uuid"
_ "github.com/jackc/pgx/v4/stdlib" // need to make sure pgx driver is registered before opening connection
+ "github.com/jmoiron/sqlx"
"github.com/scylladb/go-reflectx"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/store/dialects"
)
diff --git a/core/services/pg/connection_test.go b/core/services/pg/connection_test.go
index 92781343c61..651bf9d2d9b 100644
--- a/core/services/pg/connection_test.go
+++ b/core/services/pg/connection_test.go
@@ -5,7 +5,7 @@ import (
"github.com/google/uuid"
_ "github.com/jackc/pgx/v4/stdlib"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
diff --git a/core/services/pg/event_broadcaster_test.go b/core/services/pg/event_broadcaster_test.go
index bea7dfb5a85..a82e26e0589 100644
--- a/core/services/pg/event_broadcaster_test.go
+++ b/core/services/pg/event_broadcaster_test.go
@@ -16,7 +16,7 @@ import (
)
func TestEventBroadcaster(t *testing.T) {
- config, _ := heavyweight.FullTestDBNoFixturesV2(t, "event_broadcaster", nil)
+ config, _ := heavyweight.FullTestDBNoFixturesV2(t, nil)
eventBroadcaster := cltest.NewEventBroadcaster(t, config.Database().URL())
require.NoError(t, eventBroadcaster.Start(testutils.Context(t)))
diff --git a/core/services/pg/helpers_test.go b/core/services/pg/helpers_test.go
index c5ccda6bd9a..52158535a2e 100644
--- a/core/services/pg/helpers_test.go
+++ b/core/services/pg/helpers_test.go
@@ -1,6 +1,6 @@
package pg
-import "github.com/smartcontractkit/sqlx"
+import "github.com/jmoiron/sqlx"
func SetConn(lock interface{}, conn *sqlx.Conn) {
switch v := lock.(type) {
diff --git a/core/services/pg/lease_lock.go b/core/services/pg/lease_lock.go
index 656005016ef..e21cec44bda 100644
--- a/core/services/pg/lease_lock.go
+++ b/core/services/pg/lease_lock.go
@@ -8,8 +8,8 @@ import (
"time"
"github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/services/pg/lease_lock_test.go b/core/services/pg/lease_lock_test.go
index 9f857ffa20b..1b4116b5bf9 100644
--- a/core/services/pg/lease_lock_test.go
+++ b/core/services/pg/lease_lock_test.go
@@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
@@ -24,7 +24,7 @@ func newLeaseLock(t *testing.T, db *sqlx.DB, cfg pg.LeaseLockConfig) pg.LeaseLoc
}
func Test_LeaseLock(t *testing.T) {
- cfg, db := heavyweight.FullTestDBNoFixturesV2(t, "leaselock", func(c *chainlink.Config, s *chainlink.Secrets) {
+ cfg, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
t := true
c.Database.Lock.Enabled = &t
})
@@ -207,7 +207,7 @@ func Test_LeaseLock(t *testing.T) {
require.NoError(t, db.Close())
t.Run("on virgin database", func(t *testing.T) {
- _, db := heavyweight.FullTestDBEmptyV2(t, "leaselock", nil)
+ _, db := heavyweight.FullTestDBEmptyV2(t, nil)
cfg := pg.LeaseLockConfig{
DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(),
LeaseDuration: 15 * time.Second,
diff --git a/core/services/pg/locked_db.go b/core/services/pg/locked_db.go
index af4481285ce..a9157fe1ae1 100644
--- a/core/services/pg/locked_db.go
+++ b/core/services/pg/locked_db.go
@@ -8,7 +8,7 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/services/pg/q.go b/core/services/pg/q.go
index 9c70d813ab6..470d39c825c 100644
--- a/core/services/pg/q.go
+++ b/core/services/pg/q.go
@@ -15,7 +15,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/logger"
)
diff --git a/core/services/pg/sqlx.go b/core/services/pg/sqlx.go
index cd5427463dd..820cd51712e 100644
--- a/core/services/pg/sqlx.go
+++ b/core/services/pg/sqlx.go
@@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
mapper "github.com/scylladb/go-reflectx"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/logger"
)
diff --git a/core/services/pg/transaction.go b/core/services/pg/transaction.go
index 932b1120859..d237c20d4c6 100644
--- a/core/services/pg/transaction.go
+++ b/core/services/pg/transaction.go
@@ -10,7 +10,7 @@ import (
"github.com/pkg/errors"
"go.uber.org/multierr"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/logger"
diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go
index 148901bb36c..056a7deab28 100644
--- a/core/services/pipeline/orm.go
+++ b/core/services/pipeline/orm.go
@@ -11,7 +11,7 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
@@ -306,13 +306,13 @@ func (o *orm) UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, sta
WHERE pipeline_task_runs.id = $1 AND pipeline_runs.state in ('running', 'suspended')
FOR UPDATE`
if err = tx.Get(&run, sql, taskID); err != nil {
- return err
+ return fmt.Errorf("failed to find pipeline run for ID %s: %w", taskID.String(), err)
}
// Update the task with result
sql = `UPDATE pipeline_task_runs SET output = $2, error = $3, finished_at = $4 WHERE id = $1`
if _, err = tx.Exec(sql, taskID, result.OutputDB(), result.ErrorDB(), time.Now()); err != nil {
- return errors.Wrap(err, "UpdateTaskRunResult")
+ return fmt.Errorf("failed to update pipeline task run: %w", err)
}
if run.State == RunStatusSuspended {
@@ -321,7 +321,7 @@ func (o *orm) UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, sta
sql = `UPDATE pipeline_runs SET state = $2 WHERE id = $1`
if _, err = tx.Exec(sql, run.ID, run.State); err != nil {
- return errors.Wrap(err, "UpdateTaskRunResult")
+ return fmt.Errorf("failed to update pipeline run state: %w", err)
}
}
diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go
index a487c231fb8..dcbbfd9c97e 100644
--- a/core/services/pipeline/orm_test.go
+++ b/core/services/pipeline/orm_test.go
@@ -10,21 +10,19 @@ import (
"go.uber.org/zap/zapcore"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -35,11 +33,11 @@ type ormconfig struct {
func (ormconfig) JobPipelineMaxSuccessfulRuns() uint64 { return 123456 }
-func setupORM(t *testing.T, name string) (db *sqlx.DB, orm pipeline.ORM) {
+func setupORM(t *testing.T, heavy bool) (db *sqlx.DB, orm pipeline.ORM) {
t.Helper()
- if name != "" {
- _, db = heavyweight.FullTestDBV2(t, name, nil)
+ if heavy {
+ _, db = heavyweight.FullTestDBV2(t, nil)
} else {
db = pgtest.NewSqlxDB(t)
}
@@ -49,12 +47,12 @@ func setupORM(t *testing.T, name string) (db *sqlx.DB, orm pipeline.ORM) {
return
}
-func setupHeavyORM(t *testing.T, name string) (db *sqlx.DB, orm pipeline.ORM) {
- return setupORM(t, name)
+func setupHeavyORM(t *testing.T) (db *sqlx.DB, orm pipeline.ORM) {
+ return setupORM(t, true)
}
func setupLiteORM(t *testing.T) (db *sqlx.DB, orm pipeline.ORM) {
- return setupORM(t, "")
+ return setupORM(t, false)
}
func Test_PipelineORM_CreateSpec(t *testing.T) {
@@ -466,7 +464,7 @@ func Test_PipelineORM_DeleteRun(t *testing.T) {
}
func Test_PipelineORM_DeleteRunsOlderThan(t *testing.T) {
- _, orm := setupHeavyORM(t, "pipeline_runs_reaper")
+ _, orm := setupHeavyORM(t)
var runsIds []int64
@@ -522,9 +520,7 @@ func Test_GetUnfinishedRuns_Keepers(t *testing.T) {
porm := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgeORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jorm := job.NewORM(db, legacyChains, porm, bridgeORM, keyStore, lggr, config.Database())
+ jorm := job.NewORM(db, porm, bridgeORM, keyStore, lggr, config.Database())
defer func() { assert.NoError(t, jorm.Close()) }()
timestamp := time.Now()
@@ -624,9 +620,7 @@ func Test_GetUnfinishedRuns_DirectRequest(t *testing.T) {
porm := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns())
bridgeORM := bridges.NewORM(db, lggr, config.Database())
- relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()})
- legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jorm := job.NewORM(db, legacyChains, porm, bridgeORM, keyStore, lggr, config.Database())
+ jorm := job.NewORM(db, porm, bridgeORM, keyStore, lggr, config.Database())
defer func() { assert.NoError(t, jorm.Close()) }()
timestamp := time.Now()
diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go
index 3dbe94747e7..d33913b4753 100644
--- a/core/services/pipeline/runner.go
+++ b/core/services/pipeline/runner.go
@@ -609,7 +609,7 @@ func (r *runner) ResumeRun(taskID uuid.UUID, value interface{}, err error) error
Error: err,
})
if err != nil {
- return err
+ return fmt.Errorf("failed to update task run result: %w", err)
}
// TODO: Should probably replace this with a listener to update events
diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go
index 3abcdbe0abe..695590e7bd0 100644
--- a/core/services/pipeline/runner_test.go
+++ b/core/services/pipeline/runner_test.go
@@ -37,7 +37,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline/mocks"
"github.com/smartcontractkit/chainlink/v2/core/utils"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
)
func newRunner(t testing.TB, db *sqlx.DB, bridgeORM bridges.ORM, cfg chainlink.GeneralConfig) (pipeline.Runner, *mocks.ORM) {
diff --git a/core/services/pipeline/task.eth_tx.go b/core/services/pipeline/task.eth_tx.go
index 57f1c0a7ed8..384c86446e7 100644
--- a/core/services/pipeline/task.eth_tx.go
+++ b/core/services/pipeline/task.eth_tx.go
@@ -155,6 +155,7 @@ func (t *ETHTxTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inpu
ForwarderAddress: forwarderAddress,
Strategy: strategy,
Checker: transmitChecker,
+ SignalCallback: true,
}
if minOutgoingConfirmations > 0 {
diff --git a/core/services/pipeline/task.eth_tx_test.go b/core/services/pipeline/task.eth_tx_test.go
index e5f50bc29e5..a0ff54d4448 100644
--- a/core/services/pipeline/task.eth_tx_test.go
+++ b/core/services/pipeline/task.eth_tx_test.go
@@ -95,6 +95,7 @@ func TestETHTxTask(t *testing.T) {
CheckerType: txmgr.TransmitCheckerTypeVRFV2,
VRFCoordinatorAddress: &addr,
},
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -138,6 +139,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: gasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -215,6 +217,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: gasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -260,6 +263,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: gasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -290,6 +294,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: gasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -324,6 +329,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: drJobTypeGasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -358,6 +364,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: specGasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, nil)
},
nil, nil, "", pipeline.RunInfo{},
@@ -423,6 +430,7 @@ func TestETHTxTask(t *testing.T) {
FeeLimit: gasLimit,
Meta: txMeta,
Strategy: txmgrcommon.NewSendEveryStrategy(),
+ SignalCallback: true,
}).Return(txmgr.Tx{}, errors.New("uh oh"))
},
nil, pipeline.ErrTaskRunFailed, "while creating transaction", pipeline.RunInfo{IsRetryable: true},
diff --git a/core/services/pipeline/test_helpers_test.go b/core/services/pipeline/test_helpers_test.go
index 8353f5fb5b4..3b72a1625be 100644
--- a/core/services/pipeline/test_helpers_test.go
+++ b/core/services/pipeline/test_helpers_test.go
@@ -16,7 +16,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
)
func fakeExternalAdapter(t *testing.T, expectedRequest, response interface{}) http.Handler {
diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go
index 3f45b41f46e..aa1d1d774bd 100644
--- a/core/services/relay/evm/evm.go
+++ b/core/services/relay/evm/evm.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
pkgerrors "github.com/pkg/errors"
"go.uber.org/multierr"
@@ -18,7 +19,6 @@ import (
"github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median/evmreportcodec"
"github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
relaytypes "github.com/smartcontractkit/chainlink-relay/pkg/types"
@@ -189,7 +189,8 @@ func (r *Relayer) NewMercuryProvider(rargs relaytypes.RelayArgs, pargs relaytype
}
transmitter := mercury.NewTransmitter(lggr, cw.ContractConfigTracker(), client, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.db, r.pgCfg, transmitterCodec)
- return NewMercuryProvider(cw, transmitter, reportCodecV1, reportCodecV2, reportCodecV3, lggr), nil
+ chainReader := NewChainReader(r.chain.HeadTracker())
+ return NewMercuryProvider(cw, transmitter, reportCodecV1, reportCodecV2, reportCodecV3, chainReader, lggr), nil
}
func (r *Relayer) NewFunctionsProvider(rargs relaytypes.RelayArgs, pargs relaytypes.PluginArgs) (relaytypes.FunctionsProvider, error) {
diff --git a/core/services/relay/evm/evm_test.go b/core/services/relay/evm/evm_test.go
index 4e9c44a7b93..8f49128ff2d 100644
--- a/core/services/relay/evm/evm_test.go
+++ b/core/services/relay/evm/evm_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/assert"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go
index d355bd6569b..6193f4ba862 100644
--- a/core/services/relay/evm/functions/logpoller_wrapper.go
+++ b/core/services/relay/evm/functions/logpoller_wrapper.go
@@ -24,21 +24,39 @@ import (
type logPollerWrapper struct {
services.StateMachine
- routerContract *functions_router.FunctionsRouter
- pluginConfig config.PluginConfig
- client client.Client
- logPoller logpoller.LogPoller
- subscribers map[string]evmRelayTypes.RouteUpdateSubscriber
- activeCoordinator common.Address
- proposedCoordinator common.Address
- blockOffset int64
- nextBlock int64
- mu sync.Mutex
- closeWait sync.WaitGroup
- stopCh utils.StopChan
- lggr logger.Logger
+ routerContract *functions_router.FunctionsRouter
+ pluginConfig config.PluginConfig
+ client client.Client
+ logPoller logpoller.LogPoller
+ subscribers map[string]evmRelayTypes.RouteUpdateSubscriber
+ activeCoordinator common.Address
+ proposedCoordinator common.Address
+ requestBlockOffset int64
+ responseBlockOffset int64
+ pastBlocksToPoll int64
+ logPollerCacheDurationSec int64
+ detectedRequests detectedEvents
+ detectedResponses detectedEvents
+ mu sync.Mutex
+ closeWait sync.WaitGroup
+ stopCh utils.StopChan
+ lggr logger.Logger
}
+type detectedEvent struct {
+ requestId [32]byte
+ timeDetected time.Time
+}
+
+type detectedEvents struct {
+ isPreviouslyDetected map[[32]byte]struct{}
+ detectedEventsOrdered []detectedEvent
+}
+
+const logPollerCacheDurationSecDefault = 300
+const pastBlocksToPollDefault = 50
+const maxLogsToProcess = 1000
+
var _ evmRelayTypes.LogPollerWrapper = &logPollerWrapper{}
func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig config.PluginConfig, client client.Client, logPoller logpoller.LogPoller, lggr logger.Logger) (evmRelayTypes.LogPollerWrapper, error) {
@@ -48,18 +66,48 @@ func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig conf
}
blockOffset := int64(pluginConfig.MinIncomingConfirmations) - 1
if blockOffset < 0 {
+ lggr.Warnw("invalid minIncomingConfirmations, using 1 instead", "minIncomingConfirmations", pluginConfig.MinIncomingConfirmations)
blockOffset = 0
}
+ requestBlockOffset := int64(pluginConfig.MinRequestConfirmations) - 1
+ if requestBlockOffset < 0 {
+ lggr.Warnw("invalid minRequestConfirmations, using minIncomingConfirmations instead", "minRequestConfirmations", pluginConfig.MinRequestConfirmations)
+ requestBlockOffset = blockOffset
+ }
+ responseBlockOffset := int64(pluginConfig.MinResponseConfirmations) - 1
+ if responseBlockOffset < 0 {
+ lggr.Warnw("invalid minResponseConfirmations, using minIncomingConfirmations instead", "minResponseConfirmations", pluginConfig.MinResponseConfirmations)
+ responseBlockOffset = blockOffset
+ }
+ logPollerCacheDurationSec := int64(pluginConfig.LogPollerCacheDurationSec)
+ if logPollerCacheDurationSec <= 0 {
+ lggr.Warnw("invalid logPollerCacheDuration, using 300 instead", "logPollerCacheDurationSec", logPollerCacheDurationSec)
+ logPollerCacheDurationSec = logPollerCacheDurationSecDefault
+ }
+ pastBlocksToPoll := int64(pluginConfig.PastBlocksToPoll)
+ if pastBlocksToPoll <= 0 {
+ lggr.Warnw("invalid pastBlocksToPoll, using 50 instead", "pastBlocksToPoll", pastBlocksToPoll)
+ pastBlocksToPoll = pastBlocksToPollDefault
+ }
+ if blockOffset >= pastBlocksToPoll || requestBlockOffset >= pastBlocksToPoll || responseBlockOffset >= pastBlocksToPoll {
+ lggr.Errorw("invalid config: number of required confirmation blocks >= pastBlocksToPoll", "pastBlocksToPoll", pastBlocksToPoll, "minIncomingConfirmations", pluginConfig.MinIncomingConfirmations, "minRequestConfirmations", pluginConfig.MinRequestConfirmations, "minResponseConfirmations", pluginConfig.MinResponseConfirmations)
+ return nil, errors.Errorf("invalid config: number of required confirmation blocks >= pastBlocksToPoll")
+ }
return &logPollerWrapper{
- routerContract: routerContract,
- pluginConfig: pluginConfig,
- blockOffset: blockOffset,
- logPoller: logPoller,
- client: client,
- subscribers: make(map[string]evmRelayTypes.RouteUpdateSubscriber),
- stopCh: make(utils.StopChan),
- lggr: lggr,
+ routerContract: routerContract,
+ pluginConfig: pluginConfig,
+ requestBlockOffset: requestBlockOffset,
+ responseBlockOffset: responseBlockOffset,
+ pastBlocksToPoll: pastBlocksToPoll,
+ logPollerCacheDurationSec: logPollerCacheDurationSec,
+ detectedRequests: detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})},
+ detectedResponses: detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})},
+ logPoller: logPoller,
+ client: client,
+ subscribers: make(map[string]evmRelayTypes.RouteUpdateSubscriber),
+ stopCh: make(utils.StopChan),
+ lggr: lggr,
}, nil
}
@@ -68,20 +116,11 @@ func (l *logPollerWrapper) Start(context.Context) error {
l.lggr.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion)
l.mu.Lock()
defer l.mu.Unlock()
- if l.pluginConfig.ContractVersion == 0 {
- l.activeCoordinator = l.routerContract.Address()
- l.proposedCoordinator = l.routerContract.Address()
- } else if l.pluginConfig.ContractVersion == 1 {
- nextBlock, err := l.logPoller.LatestBlock()
- if err != nil {
- l.lggr.Errorw("LogPollerWrapper: LatestBlock() failed, starting from 0", "error", err)
- } else {
- l.lggr.Debugw("LogPollerWrapper: LatestBlock() got starting block", "block", nextBlock)
- l.nextBlock = nextBlock.BlockNumber - l.blockOffset
- }
- l.closeWait.Add(1)
- go l.checkForRouteUpdates()
+ if l.pluginConfig.ContractVersion != 1 {
+ return errors.New("only contract version 1 is supported")
}
+ l.closeWait.Add(1)
+ go l.checkForRouteUpdates()
return nil
})
}
@@ -117,16 +156,15 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR
if l.proposedCoordinator != (common.Address{}) && l.activeCoordinator != l.proposedCoordinator {
coordinators = append(coordinators, l.proposedCoordinator)
}
- nextBlock := l.nextBlock
latest, err := l.logPoller.LatestBlock()
if err != nil {
l.mu.Unlock()
return nil, nil, err
}
- latestBlockNumber := latest.BlockNumber
- latestBlockNumber -= l.blockOffset
- if latestBlockNumber >= nextBlock {
- l.nextBlock = latestBlockNumber + 1
+ latestBlockNum := latest.BlockNumber
+ startBlockNum := latestBlockNum - l.pastBlocksToPoll
+ if startBlockNum < 0 {
+ startBlockNum = 0
}
l.mu.Unlock()
@@ -137,22 +175,24 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR
l.lggr.Debug("LatestEvents: no non-zero coordinators to check")
return resultsReq, resultsResp, errors.New("no non-zero coordinators to check")
}
- if latestBlockNumber < nextBlock {
- l.lggr.Debugw("LatestEvents: no new blocks to check", "latest", latest, "nextBlock", nextBlock)
- return resultsReq, resultsResp, nil
- }
for _, coordinator := range coordinators {
- requestLogs, err := l.logPoller.Logs(nextBlock, latestBlockNumber, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator)
+ requestEndBlock := latestBlockNum - l.requestBlockOffset
+ requestLogs, err := l.logPoller.Logs(startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator)
if err != nil {
- l.lggr.Errorw("LatestEvents: fetching request logs from LogPoller failed", "latest", latest, "nextBlock", nextBlock)
+ l.lggr.Errorw("LatestEvents: fetching request logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", requestEndBlock)
return nil, nil, err
}
- responseLogs, err := l.logPoller.Logs(nextBlock, latestBlockNumber, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator)
+ l.lggr.Debugw("LatestEvents: fetched request logs", "nRequestLogs", len(requestLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", requestEndBlock)
+ requestLogs = l.filterPreviouslyDetectedEvents(requestLogs, &l.detectedRequests, "requests")
+ responseEndBlock := latestBlockNum - l.responseBlockOffset
+ responseLogs, err := l.logPoller.Logs(startBlockNum, responseEndBlock, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator)
if err != nil {
- l.lggr.Errorw("LatestEvents: fetching response logs from LogPoller failed", "latest", latest, "nextBlock", nextBlock)
+ l.lggr.Errorw("LatestEvents: fetching response logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", responseEndBlock)
return nil, nil, err
}
+ l.lggr.Debugw("LatestEvents: fetched request logs", "nResponseLogs", len(responseLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", responseEndBlock)
+ responseLogs = l.filterPreviouslyDetectedEvents(responseLogs, &l.detectedResponses, "responses")
parsingContract, err := functions_coordinator.NewFunctionsCoordinator(coordinator, l.client)
if err != nil {
@@ -165,7 +205,7 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR
gethLog := log.ToGethLog()
oracleRequest, err := parsingContract.ParseOracleRequest(gethLog)
if err != nil {
- l.lggr.Errorw("LatestEvents: failed to parse a request log, skipping")
+ l.lggr.Errorw("LatestEvents: failed to parse a request log, skipping", "err", err)
continue
}
@@ -241,10 +281,46 @@ func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmR
}
}
- l.lggr.Debugw("LatestEvents: done", "nRequestLogs", len(resultsReq), "nResponseLogs", len(resultsResp), "nextBlock", nextBlock, "latest", latest)
+ l.lggr.Debugw("LatestEvents: done", "nRequestLogs", len(resultsReq), "nResponseLogs", len(resultsResp), "startBlock", startBlockNum, "endBlock", latestBlockNum)
return resultsReq, resultsResp, nil
}
+func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log, detectedEvents *detectedEvents, filterType string) []logpoller.Log {
+ if len(logs) > maxLogsToProcess {
+ l.lggr.Errorw("filterPreviouslyDetectedEvents: too many logs to process, only processing latest maxLogsToProcess logs", "filterType", filterType, "nLogs", len(logs), "maxLogsToProcess", maxLogsToProcess)
+ logs = logs[len(logs)-maxLogsToProcess:]
+ }
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ filteredLogs := []logpoller.Log{}
+ for _, log := range logs {
+ var requestId [32]byte
+ if len(log.Topics) < 2 || len(log.Topics[1]) != 32 {
+ l.lggr.Errorw("filterPreviouslyDetectedEvents: invalid log, skipping", "filterType", filterType, "log", log)
+ continue
+ }
+ copy(requestId[:], log.Topics[1]) // requestId is the second topic (1st topic is the event signature)
+ if _, ok := detectedEvents.isPreviouslyDetected[requestId]; !ok {
+ filteredLogs = append(filteredLogs, log)
+ detectedEvents.isPreviouslyDetected[requestId] = struct{}{}
+ detectedEvents.detectedEventsOrdered = append(detectedEvents.detectedEventsOrdered, detectedEvent{requestId: requestId, timeDetected: time.Now()})
+ }
+ }
+ expiredRequests := 0
+ for _, detectedEvent := range detectedEvents.detectedEventsOrdered {
+ expirationTime := time.Now().Add(-time.Second * time.Duration(l.logPollerCacheDurationSec))
+ if detectedEvent.timeDetected.Before(expirationTime) {
+ delete(detectedEvents.isPreviouslyDetected, detectedEvent.requestId)
+ expiredRequests++
+ } else {
+ break
+ }
+ }
+ detectedEvents.detectedEventsOrdered = detectedEvents.detectedEventsOrdered[expiredRequests:]
+ l.lggr.Debugw("filterPreviouslyDetectedEvents: done", "filterType", filterType, "nLogs", len(logs), "nFilteredLogs", len(filteredLogs), "nExpiredRequests", expiredRequests, "previouslyDetectedCacheSize", len(detectedEvents.detectedEventsOrdered))
+ return filteredLogs
+}
+
// "internal" method called only by EVM relayer components
func (l *logPollerWrapper) SubscribeToUpdates(subscriberName string, subscriber evmRelayTypes.RouteUpdateSubscriber) {
if l.pluginConfig.ContractVersion == 0 {
diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go
index c91c3c49aad..2108e822d5e 100644
--- a/core/services/relay/evm/functions/logpoller_wrapper_test.go
+++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go
@@ -1,22 +1,24 @@
-package functions_test
+package functions
import (
+ "crypto/rand"
"encoding/hex"
"sync"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/common"
- gethcommon "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_coordinator"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions/config"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/functions"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
)
@@ -57,17 +59,34 @@ func setUp(t *testing.T, updateFrequencySec uint32) (*lpmocks.LogPoller, types.L
ContractUpdateCheckFrequencySec: updateFrequencySec,
ContractVersion: 1,
}
- lpWrapper, err := functions.NewLogPollerWrapper(gethcommon.Address{}, config, client, lp, lggr)
+ lpWrapper, err := NewLogPollerWrapper(common.Address{}, config, client, lp, lggr)
require.NoError(t, err)
- lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil)
-
return lp, lpWrapper, client
}
+func getMockedRequestLog(t *testing.T) logpoller.Log {
+ // NOTE: Change this to be a more readable log generation
+ data, err := hex.DecodeString("000000000000000000000000c113ba31b0080f940ca5812bbccc1e038ea9efb40000000000000000000000000000000000000000000000000000000000000001000000000000000000000000c113ba31b0080f940ca5812bbccc1e038ea9efb4000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001117082cd81744eb9504dc37f53a86db7e3fb24929b8e7507b097d501ab5b315fb20e0000000000000000000000001b4f2b0e6363097f413c249910d5bc632993ed08000000000000000000000000000000000000000000000000015bcf880382c000000000000000000000000000665785a800593e8fa915208c1ce62f6e57fd75ba0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000001117000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004f588000000000000000000000000000000000000000000000000000000000000c350000000000000000000000000000000000000000000000000000000000000021c00000000000000000000000000000000000000000000000000000000000008866c636f64654c6f636174696f6ec258200000000000000000000000000000000000000000000000000000000000000000686c616e6775616765c25820000000000000000000000000000000000000000000000000000000000000000066736f757263657907d0633836366665643238326533313137636466303836633934396662613133643834666331376131656335353934656361643034353133646632326137623538356333363763633132326236373138306334383737303435616235383033373463353066313862346564386132346131323437383532363731623030633035663237373163663036363632333663333236393939323139363866323833346438626462616266306661643165313237613837643237363936323831643965656539326134646263316337356137316136656333613135356438633230616661643064623432383362613433353736303734653035633433633561653061656466643332323838346536613231386466323430323630316436356437316131303061633065376563643037663565646364633535643562373932646130626632353665623038363139336463376431333965613764373965653531653831356465333834386565643363366330353837393265366461333434363738626436373239346636643639656564356132663836323835343965616530323235323835346232666361333635646265623032383433386537326234383465383864316136646563373933633739656265353834666465363465663831383363313365386231623735663037636532303963393138633532643637613735343862653236366433663964316439656132613162303166633838376231316162383739663164333861373833303563373031316533643938346130393863663634383931316536653065383038396365306130363230393136663134323935343036336630376239343931326435666331393366303138633764616135363136323562313966376463323036663930353365623234643036323234616164326338623430646162663631656166666635326234653831373239353837333830313561643730663739316663643864333739343035353737393563383937363164636665333639373938373437353439633234643530646464303563623337613465613863353162306530313032363738643433653766306563353039653434633564343764353335626261363831303936383264643864653439326532363633646336653133653532383539663664336565306533633430336236366362653338643236366137356163373639363863613465653331396166363965373431333137393162653630376537353832373430366164653038306335623239653665343262386563386137373761663865383166336234616337626263666531643066616633393338613664353061316561633835643933643234343066313863333037356237306433626134663930323836396439383937663266636562626262366263646439333436633336633663643838626434336265306562333134323562343665613765386338336638386230363933343836383666366134313839623535666132666431396634326264333730313634616339356530303635656461663130373761633131366632393930303833616631333839636661666336613433323439376531363437393762633738616633366335613435366136646661326636626430626639326136613930366130653930313130626266323265613066333163663364353132663466303331653236343330633831663935656431323362323938356266623830623161396432646337306232356264613961386261303839323833666166663634383661316231646235613938353564346237363966623835663531353063393935306462303964373536326537353133633234653531636163366634366634633231636234373561613937363166666466626434656138613531626465613432383037313466363538393630656336643139656539373237626339316635313665346466306665346264613762623035343161393462326334396636323938616132396337656130646662653635346632306437663164323239633066303262356535326137363031376237306439383232643533383166623966613166393361353861376338383632326631326462643363623937323363626132313639633337643538303939336333663666393065323039336331336130363132323334303064393731363031656262313631343332613966666333373033396562663537326364326566666635636562323539346236346462336261616431633734663532653938343938353964383363313238353465376263393764363432363464653931343735386333386438383739343132333937653263643534653431366234373962363331623830626633306266653062366239353564393066356362303435346361373531303963393938366330636536316165356566376534653433353036313432633633646235363862383634353139623463306636366137633161376661336538666431323231376666336665383164663830643138386232646334343833356132663332323733666133353139633531343764643233353763326161346336326461386238353232306535386130333565373662633133316634623734376632663731643263663933376431303832356138316533623963323136663962316134646431663239383463656635656363656265353530363662363061373263363063323864303336653766386635323131343735386638326366323330646636363930636364617267739f64617267316461726732ff6f736563726574734c6f636174696f6ec2582000000000000000000000000000000000000000000000000000000000000000016773656372657473430102030000000000000000000000000000000000000000000000000000")
+ require.NoError(t, err)
+ topic0, err := hex.DecodeString("bf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff")
+ require.NoError(t, err)
+ // Create a random requestID
+ topic1 := make([]byte, 32)
+ _, err = rand.Read(topic1)
+ require.NoError(t, err)
+ topic2, err := hex.DecodeString("000000000000000000000000665785a800593e8fa915208c1ce62f6e57fd75ba")
+ require.NoError(t, err)
+ return logpoller.Log{
+ Topics: [][]byte{topic0, topic1, topic2},
+ Data: data,
+ }
+}
+
func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) {
t.Parallel()
lp, lpWrapper, client := setUp(t, 100_000) // check only once
+ lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil)
lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil)
client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil)
@@ -87,7 +106,8 @@ func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) {
func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) {
t.Parallel()
- _, lpWrapper, client := setUp(t, 100_000) // check only once
+ lp, lpWrapper, client := setUp(t, 100_000) // check only once
+ lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil)
client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "00"), nil)
@@ -96,3 +116,96 @@ func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) {
require.Error(t, err)
lpWrapper.Close()
}
+
+func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) {
+ t.Parallel()
+ lp, lpWrapper, client := setUp(t, 100_000)
+ lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil)
+ client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil)
+ lp.On("RegisterFilter", mock.Anything).Return(nil)
+ subscriber := newSubscriber(1)
+ lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber)
+ mockedLog := getMockedRequestLog(t)
+ // All logPoller queries for responses return none
+ lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil)
+ // On the first logPoller query for requests, the request log appears
+ lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once()
+ // On the 2nd query, the request log disappears
+ lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil).Once()
+ // On the 3rd query, the original request log appears again
+ lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once()
+
+ require.NoError(t, lpWrapper.Start(testutils.Context(t)))
+ subscriber.updates.Wait()
+
+ oracleRequests, _, err := lpWrapper.LatestEvents()
+ require.NoError(t, err)
+ assert.Equal(t, 1, len(oracleRequests))
+ oracleRequests, _, err = lpWrapper.LatestEvents()
+ require.NoError(t, err)
+ assert.Equal(t, 0, len(oracleRequests))
+ require.NoError(t, err)
+ oracleRequests, _, err = lpWrapper.LatestEvents()
+ require.NoError(t, err)
+ assert.Equal(t, 0, len(oracleRequests))
+}
+
+func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_TruncatesLogs(t *testing.T) {
+ t.Parallel()
+ _, lpWrapper, _ := setUp(t, 100_000)
+
+ inputLogs := make([]logpoller.Log, maxLogsToProcess+100)
+ for i := 0; i < 1100; i++ {
+ inputLogs[i] = getMockedRequestLog(t)
+ }
+
+ functionsLpWrapper := lpWrapper.(*logPollerWrapper)
+ mockedDetectedEvents := detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})}
+ outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request")
+
+ assert.Equal(t, maxLogsToProcess, len(outputLogs))
+ assert.Equal(t, 1000, len(mockedDetectedEvents.detectedEventsOrdered))
+ assert.Equal(t, 1000, len(mockedDetectedEvents.isPreviouslyDetected))
+}
+
+func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_SkipsInvalidLog(t *testing.T) {
+ t.Parallel()
+ _, lpWrapper, _ := setUp(t, 100_000)
+ inputLogs := []logpoller.Log{getMockedRequestLog(t)}
+ inputLogs[0].Topics = [][]byte{[]byte("invalid topic")}
+ mockedDetectedEvents := detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})}
+
+ functionsLpWrapper := lpWrapper.(*logPollerWrapper)
+ outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request")
+
+ assert.Equal(t, 0, len(outputLogs))
+ assert.Equal(t, 0, len(mockedDetectedEvents.detectedEventsOrdered))
+ assert.Equal(t, 0, len(mockedDetectedEvents.isPreviouslyDetected))
+}
+
+func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_FiltersPreviouslyDetectedEvent(t *testing.T) {
+ t.Parallel()
+ _, lpWrapper, _ := setUp(t, 100_000)
+ mockedRequestLog := getMockedRequestLog(t)
+ inputLogs := []logpoller.Log{mockedRequestLog}
+ var mockedRequestId [32]byte
+ copy(mockedRequestId[:], mockedRequestLog.Topics[1])
+
+ mockedDetectedEvents := detectedEvents{
+ isPreviouslyDetected: make(map[[32]byte]struct{}),
+ detectedEventsOrdered: make([]detectedEvent, 1),
+ }
+ mockedDetectedEvents.isPreviouslyDetected[mockedRequestId] = struct{}{}
+ mockedDetectedEvents.detectedEventsOrdered[0] = detectedEvent{
+ requestId: mockedRequestId,
+ timeDetected: time.Now().Add(-time.Second * time.Duration(logPollerCacheDurationSecDefault+1)),
+ }
+
+ functionsLpWrapper := lpWrapper.(*logPollerWrapper)
+ outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request")
+
+ assert.Equal(t, 0, len(outputLogs))
+ // Ensure that expired events are removed from the cache
+ assert.Equal(t, 0, len(mockedDetectedEvents.detectedEventsOrdered))
+ assert.Equal(t, 0, len(mockedDetectedEvents.isPreviouslyDetected))
+}
diff --git a/core/services/relay/evm/median.go b/core/services/relay/evm/median.go
index b7d751e01e1..5184326cf25 100644
--- a/core/services/relay/evm/median.go
+++ b/core/services/relay/evm/median.go
@@ -7,12 +7,12 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
"github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
offchain_aggregator_wrapper "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator"
diff --git a/core/services/relay/evm/mercury/config_poller_test.go b/core/services/relay/evm/mercury/config_poller_test.go
index 6a692b0eacd..1b3ba72128d 100644
--- a/core/services/relay/evm/mercury/config_poller_test.go
+++ b/core/services/relay/evm/mercury/config_poller_test.go
@@ -115,6 +115,7 @@ func TestMercuryConfigPoller(t *testing.T) {
}
func TestNotify(t *testing.T) {
+ testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/BCF-2746")
feedIDStr := "8257737fdf4f79639585fd0ed01bea93c248a9ad940e98dd27f41c9b6230fed1"
feedIDBytes, err := hexutil.Decode("0x" + feedIDStr)
require.NoError(t, err)
diff --git a/core/services/relay/evm/mercury/mocks/chain_head_tracker.go b/core/services/relay/evm/mercury/mocks/chain_head_tracker.go
deleted file mode 100644
index 1a5a7e47c5b..00000000000
--- a/core/services/relay/evm/mercury/mocks/chain_head_tracker.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Code generated by mockery v2.35.4. DO NOT EDIT.
-
-package mocks
-
-import (
- common "github.com/ethereum/go-ethereum/common"
- client "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
-
- commontypes "github.com/smartcontractkit/chainlink/v2/common/types"
-
- evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
-
- mock "github.com/stretchr/testify/mock"
-)
-
-// ChainHeadTracker is an autogenerated mock type for the ChainHeadTracker type
-type ChainHeadTracker struct {
- mock.Mock
-}
-
-// Client provides a mock function with given fields:
-func (_m *ChainHeadTracker) Client() client.Client {
- ret := _m.Called()
-
- var r0 client.Client
- if rf, ok := ret.Get(0).(func() client.Client); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(client.Client)
- }
- }
-
- return r0
-}
-
-// HeadTracker provides a mock function with given fields:
-func (_m *ChainHeadTracker) HeadTracker() commontypes.HeadTracker[*evmtypes.Head, common.Hash] {
- ret := _m.Called()
-
- var r0 commontypes.HeadTracker[*evmtypes.Head, common.Hash]
- if rf, ok := ret.Get(0).(func() commontypes.HeadTracker[*evmtypes.Head, common.Hash]); ok {
- r0 = rf()
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(commontypes.HeadTracker[*evmtypes.Head, common.Hash])
- }
- }
-
- return r0
-}
-
-// NewChainHeadTracker creates a new instance of ChainHeadTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-// The first argument is typically a *testing.T value.
-func NewChainHeadTracker(t interface {
- mock.TestingT
- Cleanup(func())
-}) *ChainHeadTracker {
- mock := &ChainHeadTracker{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
-}
diff --git a/core/services/relay/evm/mercury/orm.go b/core/services/relay/evm/mercury/orm.go
index 7273519f6b6..f8d4c8cb1ee 100644
--- a/core/services/relay/evm/mercury/orm.go
+++ b/core/services/relay/evm/mercury/orm.go
@@ -8,9 +8,9 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/lib/pq"
pkgerrors "github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
diff --git a/core/services/relay/evm/mercury/persistence_manager_test.go b/core/services/relay/evm/mercury/persistence_manager_test.go
index d185a64a8f1..dbdb9777252 100644
--- a/core/services/relay/evm/mercury/persistence_manager_test.go
+++ b/core/services/relay/evm/mercury/persistence_manager_test.go
@@ -6,8 +6,8 @@ import (
"time"
"github.com/cometbft/cometbft/libs/rand"
+ "github.com/jmoiron/sqlx"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
diff --git a/core/services/relay/evm/mercury/transmitter.go b/core/services/relay/evm/mercury/transmitter.go
index 88c3113abc6..269f28b122d 100644
--- a/core/services/relay/evm/mercury/transmitter.go
+++ b/core/services/relay/evm/mercury/transmitter.go
@@ -16,9 +16,9 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
relaymercury "github.com/smartcontractkit/chainlink-relay/pkg/reportingplugins/mercury"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
@@ -33,6 +33,7 @@ import (
var (
maxTransmitQueueSize = 10_000
+ maxDeleteQueueSize = 10_000
transmitTimeout = 5 * time.Second
)
@@ -60,6 +61,24 @@ var (
},
[]string{"feedID"},
)
+ transmitQueueDeleteErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "mercury_transmit_queue_delete_error_count",
+ Help: "Running count of DB errors when trying to delete an item from the queue DB",
+ },
+ []string{"feedID"},
+ )
+ transmitQueueInsertErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "mercury_transmit_queue_insert_error_count",
+ Help: "Running count of DB errors when trying to insert an item into the queue DB",
+ },
+ []string{"feedID"},
+ )
+ transmitQueuePushErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "mercury_transmit_queue_push_error_count",
+ Help: "Running count of DB errors when trying to push an item onto the queue",
+ },
+ []string{"feedID"},
+ )
transmitServerErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "mercury_transmit_server_error_count",
Help: "Number of errored transmissions that failed due to an error returned by the mercury server",
@@ -99,9 +118,14 @@ type mercuryTransmitter struct {
queue *TransmitQueue
wg sync.WaitGroup
- transmitSuccessCount prometheus.Counter
- transmitDuplicateCount prometheus.Counter
- transmitConnectionErrorCount prometheus.Counter
+ deleteQueue chan *pb.TransmitRequest
+
+ transmitSuccessCount prometheus.Counter
+ transmitDuplicateCount prometheus.Counter
+ transmitConnectionErrorCount prometheus.Counter
+ transmitQueueDeleteErrorCount prometheus.Counter
+ transmitQueueInsertErrorCount prometheus.Counter
+ transmitQueuePushErrorCount prometheus.Counter
}
var PayloadTypes = getPayloadTypes()
@@ -139,9 +163,13 @@ func NewTransmitter(lggr logger.Logger, cfgTracker ConfigTracker, rpcClient wsrp
make(chan (struct{})),
nil,
sync.WaitGroup{},
+ make(chan *pb.TransmitRequest, maxDeleteQueueSize),
transmitSuccessCount.WithLabelValues(feedIDHex),
transmitDuplicateCount.WithLabelValues(feedIDHex),
transmitConnectionErrorCount.WithLabelValues(feedIDHex),
+ transmitQueueDeleteErrorCount.WithLabelValues(feedIDHex),
+ transmitQueueInsertErrorCount.WithLabelValues(feedIDHex),
+ transmitQueuePushErrorCount.WithLabelValues(feedIDHex),
}
}
@@ -164,6 +192,8 @@ func (mt *mercuryTransmitter) Start(ctx context.Context) (err error) {
return err
}
mt.wg.Add(1)
+ go mt.runDeleteQueueLoop()
+ mt.wg.Add(1)
go mt.runQueueLoop()
return nil
})
@@ -192,6 +222,46 @@ func (mt *mercuryTransmitter) HealthReport() map[string]error {
return report
}
+func (mt *mercuryTransmitter) runDeleteQueueLoop() {
+ defer mt.wg.Done()
+ runloopCtx, cancel := mt.stopCh.Ctx(context.Background())
+ defer cancel()
+
+ // Exponential backoff for very rarely occurring errors (DB disconnect etc)
+ b := backoff.Backoff{
+ Min: 1 * time.Second,
+ Max: 120 * time.Second,
+ Factor: 2,
+ Jitter: true,
+ }
+
+ for {
+ select {
+ case req := <-mt.deleteQueue:
+ for {
+ if err := mt.persistenceManager.Delete(runloopCtx, req); err != nil {
+ mt.lggr.Errorw("Failed to delete transmit request record", "error", err, "req", req)
+ mt.transmitQueueDeleteErrorCount.Inc()
+ select {
+ case <-time.After(b.Duration()):
+ // Wait a backoff duration before trying to delete again
+ continue
+ case <-mt.stopCh:
+ // abort and return immediately on stop even if items remain in queue
+ return
+ }
+ }
+ break
+ }
+ // success
+ b.Reset()
+ case <-mt.stopCh:
+ // abort and return immediately on stop even if items remain in queue
+ return
+ }
+ }
+}
+
func (mt *mercuryTransmitter) runQueueLoop() {
defer mt.wg.Done()
// Exponential backoff with very short retry interval (since latency is a priority)
@@ -253,9 +323,10 @@ func (mt *mercuryTransmitter) runQueueLoop() {
}
}
- if err := mt.persistenceManager.Delete(runloopCtx, t.Req); err != nil {
- mt.lggr.Errorw("Failed to delete transmit request record", "error", err, "reportCtx", t.ReportCtx)
- return
+ select {
+ case mt.deleteQueue <- t.Req:
+ default:
+ mt.lggr.Criticalw("Delete queue is full", "reportCtx", t.ReportCtx)
}
}
}
@@ -288,9 +359,11 @@ func (mt *mercuryTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.R
mt.lggr.Tracew("Transmit enqueue", "req", req, "report", report, "reportCtx", reportCtx, "signatures", signatures)
if err := mt.persistenceManager.Insert(ctx, req, reportCtx); err != nil {
+ mt.transmitQueueInsertErrorCount.Inc()
return err
}
if ok := mt.queue.Push(req, reportCtx); !ok {
+ mt.transmitQueuePushErrorCount.Inc()
return errors.New("transmit queue is closed")
}
return nil
diff --git a/core/services/relay/evm/mercury/types/types.go b/core/services/relay/evm/mercury/types/types.go
index ca266ca8ccd..49bffb6c290 100644
--- a/core/services/relay/evm/mercury/types/types.go
+++ b/core/services/relay/evm/mercury/types/types.go
@@ -8,17 +8,9 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
- httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
)
-//go:generate mockery --quiet --name ChainHeadTracker --output ../mocks/ --case=underscore
-type ChainHeadTracker interface {
- Client() evmclient.Client
- HeadTracker() httypes.HeadTracker
-}
-
type DataSourceORM interface {
LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error)
}
diff --git a/core/services/relay/evm/mercury/v1/data_source.go b/core/services/relay/evm/mercury/v1/data_source.go
index d225dbee68e..0bdfb67de78 100644
--- a/core/services/relay/evm/mercury/v1/data_source.go
+++ b/core/services/relay/evm/mercury/v1/data_source.go
@@ -8,13 +8,14 @@ import (
"sync"
pkgerrors "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
relaymercury "github.com/smartcontractkit/chainlink-relay/pkg/reportingplugins/mercury"
relaymercuryv1 "github.com/smartcontractkit/chainlink-relay/pkg/reportingplugins/mercury/v1"
- evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
@@ -25,6 +26,23 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
+var (
+ insufficientBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "mercury_insufficient_blocks_count",
+ Help: fmt.Sprintf("Count of times that there were not enough blocks in the chain during observation (need: %d)", nBlocksObservation),
+ },
+ []string{"feedID"},
+ )
+ zeroBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{
+ Name: "mercury_zero_blocks_count",
+ Help: "Count of times that there were zero blocks in the chain during observation",
+ },
+ []string{"feedID"},
+ )
+)
+
+const nBlocksObservation int = relaymercuryv1.MaxAllowedBlocks
+
type Runner interface {
ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
}
@@ -48,21 +66,38 @@ type datasource struct {
mu sync.RWMutex
chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
- chainHeadTracker types.ChainHeadTracker
+ chainReader relaymercury.ChainReader
fetcher Fetcher
initialBlockNumber *int64
+
+ insufficientBlocksCounter prometheus.Counter
+ zeroBlocksCounter prometheus.Counter
}
var _ relaymercuryv1.DataSource = &datasource{}
-func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, rr chan *pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, chainHeadTracker types.ChainHeadTracker, fetcher Fetcher, initialBlockNumber *int64, feedID [32]byte) *datasource {
- return &datasource{pr, jb, spec, lggr, rr, orm, reportcodec.ReportCodec{}, feedID, sync.RWMutex{}, enhancedTelemChan, chainHeadTracker, fetcher, initialBlockNumber}
+func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, rr chan *pipeline.Run, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, chainReader relaymercury.ChainReader, fetcher Fetcher, initialBlockNumber *int64, feedID mercuryutils.FeedID) *datasource {
+ return &datasource{pr, jb, spec, lggr, rr, orm, reportcodec.ReportCodec{}, feedID, sync.RWMutex{}, enhancedTelemChan, chainReader, fetcher, initialBlockNumber, insufficientBlocksCount.WithLabelValues(feedID.String()), zeroBlocksCount.WithLabelValues(feedID.String())}
+}
+
+type ErrEmptyLatestReport struct {
+ Err error
+}
+
+func (e ErrEmptyLatestReport) Unwrap() error { return e.Err }
+
+func (e ErrEmptyLatestReport) Error() string {
+ return fmt.Sprintf("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: %v", e.Err)
}
func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedBlockNum bool) (obs relaymercuryv1.Observation, pipelineExecutionErr error) {
- // setCurrentBlock must come first, along with observationTimestamp, to
- // avoid front-running
- ds.setCurrentBlock(ctx, &obs)
+ // setLatestBlocks must come chronologically before observations, along
+ // with observationTimestamp, to avoid front-running
+
+ // Errors are not expected when reading from the underlying ChainReader
+ if err := ds.setLatestBlocks(ctx, &obs); err != nil {
+ return obs, err
+ }
var wg sync.WaitGroup
if fetchMaxFinalizedBlockNum {
@@ -89,7 +124,7 @@ func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestam
}
if ds.initialBlockNumber == nil {
if obs.CurrentBlockNum.Err != nil {
- obs.MaxFinalizedBlockNumber.Err = fmt.Errorf("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: %w", obs.CurrentBlockNum.Err)
+ obs.MaxFinalizedBlockNumber.Err = ErrEmptyLatestReport{Err: obs.CurrentBlockNum.Err}
} else {
// Subract 1 here because we will later add 1 to the
// maxFinalizedBlockNumber to get the first validFromBlockNum, which
@@ -258,37 +293,36 @@ func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.T
return run, trrs, err
}
-func (ds *datasource) setCurrentBlock(ctx context.Context, obs *relaymercuryv1.Observation) {
- latestHead, err := ds.getCurrentBlock(ctx)
+func (ds *datasource) setLatestBlocks(ctx context.Context, obs *relaymercuryv1.Observation) error {
+ latestBlocks, err := ds.chainReader.LatestHeads(ctx, nBlocksObservation)
if err != nil {
- obs.CurrentBlockNum.Err = err
- obs.CurrentBlockHash.Err = err
- obs.CurrentBlockTimestamp.Err = err
- return
+ ds.lggr.Errorw("failed to read latest blocks", "error", err)
+ return err
+ }
+
+ if len(latestBlocks) < nBlocksObservation {
+ ds.insufficientBlocksCounter.Inc()
+ ds.lggr.Warnw("Insufficient blocks", "latestBlocks", latestBlocks, "lenLatestBlocks", len(latestBlocks), "nBlocksObservation", nBlocksObservation)
}
- obs.CurrentBlockNum.Val = latestHead.Number
- obs.CurrentBlockHash.Val = latestHead.Hash.Bytes()
- if latestHead.Timestamp.IsZero() {
- obs.CurrentBlockTimestamp.Val = 0
+ // TODO: remove with https://smartcontract-it.atlassian.net/browse/BCF-2209
+ if len(latestBlocks) == 0 {
+ obsErr := fmt.Errorf("no blocks available")
+ ds.zeroBlocksCounter.Inc()
+ obs.CurrentBlockNum.Err = obsErr
+ obs.CurrentBlockHash.Err = obsErr
+ obs.CurrentBlockTimestamp.Err = obsErr
} else {
- obs.CurrentBlockTimestamp.Val = uint64(latestHead.Timestamp.Unix())
+ obs.CurrentBlockNum.Val = int64(latestBlocks[0].Number)
+ obs.CurrentBlockHash.Val = latestBlocks[0].Hash
+ obs.CurrentBlockTimestamp.Val = latestBlocks[0].Timestamp
}
-}
-func (ds *datasource) getCurrentBlock(ctx context.Context) (*evmtypes.Head, error) {
- // Use the headtracker's view of the latest block, this is very fast since
- // it doesn't make any external network requests, and it is the
- // headtracker's job to ensure it has an up-to-date view of the chain based
- // on responses from all available RPC nodes
- latestHead := ds.chainHeadTracker.HeadTracker().LatestChain()
- if latestHead == nil {
- logger.Sugared(ds.lggr).AssumptionViolation("HeadTracker unexpectedly returned nil head, falling back to RPC call")
- var err error
- latestHead, err = ds.chainHeadTracker.Client().HeadByNumber(ctx, nil)
- if err != nil {
- return nil, err
- }
+ for _, block := range latestBlocks {
+ obs.LatestBlocks = append(
+ obs.LatestBlocks,
+ relaymercuryv1.NewBlock(int64(block.Number), block.Hash, block.Timestamp))
}
- return latestHead, nil
+
+ return nil
}
diff --git a/core/services/relay/evm/mercury/v1/data_source_test.go b/core/services/relay/evm/mercury/v1/data_source_test.go
index 6e460951301..40542c2631a 100644
--- a/core/services/relay/evm/mercury/v1/data_source_test.go
+++ b/core/services/relay/evm/mercury/v1/data_source_test.go
@@ -3,6 +3,7 @@ package mercury_v1
import (
"context"
"fmt"
+ "io"
"math/big"
"math/rand"
"testing"
@@ -11,7 +12,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/mock"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
@@ -19,17 +19,16 @@ import (
relaymercuryv1 "github.com/smartcontractkit/chainlink-relay/pkg/reportingplugins/mercury/v1"
commonmocks "github.com/smartcontractkit/chainlink/v2/common/mocks"
"github.com/smartcontractkit/chainlink/v2/core/assets"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
- evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
- httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
+ mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -53,16 +52,6 @@ func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
return 0, nil
}
-var _ types.ChainHeadTracker = &mockHeadTracker{}
-
-type mockHeadTracker struct {
- c evmclient.Client
- h httypes.HeadTracker
-}
-
-func (m *mockHeadTracker) Client() evmclient.Client { return m.c }
-func (m *mockHeadTracker) HeadTracker() httypes.HeadTracker { return m.h }
-
type mockORM struct {
report []byte
err error
@@ -72,9 +61,19 @@ func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg
return m.report, m.err
}
+type mockChainReader struct {
+ err error
+ obs []relaymercury.Head
+}
+
+func (m *mockChainReader) LatestHeads(context.Context, int) ([]relaymercury.Head, error) {
+ return m.obs, m.err
+}
+
func TestMercury_Observe(t *testing.T) {
orm := &mockORM{}
- ds := &datasource{lggr: logger.TestLogger(t), orm: orm, codec: (reportcodecv1.ReportCodec{})}
+ lggr := logger.TestLogger(t)
+ ds := NewDataSource(orm, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{})
ctx := testutils.Context(t)
repts := ocrtypes.ReportTimestamp{}
@@ -108,12 +107,7 @@ func TestMercury_Observe(t *testing.T) {
ds.spec = spec
h := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
- c := evmclimocks.NewClient(t)
- ht := &mockHeadTracker{
- c: c,
- h: h,
- }
- ds.chainHeadTracker = ht
+ ds.chainReader = evm.NewChainReader(h)
head := &evmtypes.Head{
Number: int64(rand.Int31()),
@@ -202,25 +196,21 @@ func TestMercury_Observe(t *testing.T) {
assert.NoError(t, obs.MaxFinalizedBlockNumber.Err)
assert.Equal(t, head.Number-1, obs.MaxFinalizedBlockNumber.Val)
})
- t.Run("if current block num errored", func(t *testing.T) {
+ t.Run("if no current block available", func(t *testing.T) {
h2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
h2.On("LatestChain").Return((*evmtypes.Head)(nil))
- ht.h = h2
- c2 := evmclimocks.NewClient(t)
- c2.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, errors.New("head retrieval failed"))
- ht.c = c2
+ ds.chainReader = evm.NewChainReader(h2)
obs, err := ds.Observe(ctx, repts, true)
assert.NoError(t, err)
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: head retrieval failed")
+ assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: no blocks available")
})
})
})
})
- ht.h = h
- ht.c = c
+ ds.chainReader = evm.NewChainReader(h)
t.Run("when fetchMaxFinalizedBlockNum=false", func(t *testing.T) {
t.Run("when run execution fails, returns error", func(t *testing.T) {
@@ -322,52 +312,108 @@ func TestMercury_Observe(t *testing.T) {
t.Fatal("expected run on channel")
}
})
- t.Run("if head tracker returns nil, falls back to RPC method", func(t *testing.T) {
- t.Run("if call succeeds", func(t *testing.T) {
- h = commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
- h.On("LatestChain").Return((*evmtypes.Head)(nil))
- ht.h = h
- c.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(head, nil).Once()
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
+ })
- assert.Equal(t, head.Number, obs.CurrentBlockNum.Val)
- assert.NoError(t, obs.CurrentBlockNum.Err)
- assert.Equal(t, fmt.Sprintf("%x", head.Hash), fmt.Sprintf("%x", obs.CurrentBlockHash.Val))
- assert.NoError(t, obs.CurrentBlockHash.Err)
- assert.Equal(t, uint64(head.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
- assert.NoError(t, obs.CurrentBlockTimestamp.Err)
+ t.Run("LatestBlocks is populated correctly", func(t *testing.T) {
+ t.Run("when chain length is zero", func(t *testing.T) {
+ ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
+ ht2.On("LatestChain").Return((*evmtypes.Head)(nil))
+ ds.chainReader = evm.NewChainReader(ht2)
- h.AssertExpectations(t)
- c.AssertExpectations(t)
- })
- t.Run("if call fails, returns error for that observation", func(t *testing.T) {
- c = evmclimocks.NewClient(t)
- ht.c = c
- c.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, errors.New("client call failed")).Once()
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
+ assert.Len(t, obs.LatestBlocks, 0)
- assert.Zero(t, obs.CurrentBlockNum.Val)
- assert.EqualError(t, obs.CurrentBlockNum.Err, "client call failed")
- assert.Zero(t, obs.CurrentBlockHash.Val)
- assert.EqualError(t, obs.CurrentBlockHash.Err, "client call failed")
- assert.Zero(t, obs.CurrentBlockTimestamp.Val)
- assert.EqualError(t, obs.CurrentBlockTimestamp.Err, "client call failed")
+ ht2.AssertExpectations(t)
+ })
+ t.Run("when chain is too short", func(t *testing.T) {
+ h4 := &evmtypes.Head{
+ Number: 4,
+ Parent: nil,
+ }
+ h5 := &evmtypes.Head{
+ Number: 5,
+ Parent: h4,
+ }
+ h6 := &evmtypes.Head{
+ Number: 6,
+ Parent: h5,
+ }
- c.AssertExpectations(t)
- })
+ ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
+ ht2.On("LatestChain").Return(h6)
+ ds.chainReader = evm.NewChainReader(ht2)
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.Len(t, obs.LatestBlocks, 3)
+ assert.Equal(t, 6, int(obs.LatestBlocks[0].Num))
+ assert.Equal(t, 5, int(obs.LatestBlocks[1].Num))
+ assert.Equal(t, 4, int(obs.LatestBlocks[2].Num))
+
+ ht2.AssertExpectations(t)
+ })
+ t.Run("when chain is long enough", func(t *testing.T) {
+ h1 := &evmtypes.Head{
+ Number: 1,
+ }
+ h2 := &evmtypes.Head{
+ Number: 2,
+ Parent: h1,
+ }
+ h3 := &evmtypes.Head{
+ Number: 3,
+ Parent: h2,
+ }
+ h4 := &evmtypes.Head{
+ Number: 4,
+ Parent: h3,
+ }
+ h5 := &evmtypes.Head{
+ Number: 5,
+ Parent: h4,
+ }
+ h6 := &evmtypes.Head{
+ Number: 6,
+ Parent: h5,
+ }
+
+ ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
+ ht2.On("LatestChain").Return(h6)
+ ds.chainReader = evm.NewChainReader(ht2)
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.NoError(t, err)
+
+ assert.Len(t, obs.LatestBlocks, 5)
+ assert.Equal(t, 6, int(obs.LatestBlocks[0].Num))
+ assert.Equal(t, 5, int(obs.LatestBlocks[1].Num))
+ assert.Equal(t, 4, int(obs.LatestBlocks[2].Num))
+ assert.Equal(t, 3, int(obs.LatestBlocks[3].Num))
+ assert.Equal(t, 2, int(obs.LatestBlocks[4].Num))
+
+ ht2.AssertExpectations(t)
+ })
+
+ t.Run("when chain reader returns an error", func(t *testing.T) {
+
+ ds.chainReader = &mockChainReader{
+ err: io.EOF,
+ obs: nil,
+ }
+
+ obs, err := ds.Observe(ctx, repts, true)
+ assert.Error(t, err)
+ assert.Equal(t, obs, relaymercuryv1.Observation{})
})
})
}
-func TestMercury_SetCurrentBlock(t *testing.T) {
+func TestMercury_SetLatestBlocks(t *testing.T) {
lggr := logger.TestLogger(t)
- ds := datasource{
- lggr: lggr,
- }
+ ds := NewDataSource(nil, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{})
h := evmtypes.Head{
Number: testutils.NewRandomPositiveInt64(),
@@ -382,72 +428,39 @@ func TestMercury_SetCurrentBlock(t *testing.T) {
t.Run("returns head from headtracker if present", func(t *testing.T) {
headTracker := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
- chainHeadTracker := mercurymocks.NewChainHeadTracker(t)
-
- chainHeadTracker.On("HeadTracker").Return(headTracker)
headTracker.On("LatestChain").Return(&h, nil)
-
- ds.chainHeadTracker = chainHeadTracker
+ ds.chainReader = evm.NewChainReader(headTracker)
obs := relaymercuryv1.Observation{}
- ds.setCurrentBlock(context.Background(), &obs)
+ err := ds.setLatestBlocks(context.Background(), &obs)
+ assert.NoError(t, err)
assert.Equal(t, h.Number, obs.CurrentBlockNum.Val)
assert.Equal(t, h.Hash.Bytes(), obs.CurrentBlockHash.Val)
assert.Equal(t, uint64(h.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
- chainHeadTracker.AssertExpectations(t)
+ assert.Len(t, obs.LatestBlocks, 1)
headTracker.AssertExpectations(t)
})
- t.Run("if headtracker returns nil head and eth call succeeds", func(t *testing.T) {
- ethClient := evmclimocks.NewClient(t)
+ t.Run("if headtracker returns nil head", func(t *testing.T) {
headTracker := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
- chainHeadTracker := mercurymocks.NewChainHeadTracker(t)
-
- chainHeadTracker.On("Client").Return(ethClient)
- chainHeadTracker.On("HeadTracker").Return(headTracker)
// This can happen in some cases e.g. RPC node is offline
headTracker.On("LatestChain").Return((*evmtypes.Head)(nil))
- ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&h, nil)
-
- ds.chainHeadTracker = chainHeadTracker
-
- obs := relaymercuryv1.Observation{}
- ds.setCurrentBlock(context.Background(), &obs)
-
- assert.Equal(t, h.Number, obs.CurrentBlockNum.Val)
- assert.Equal(t, h.Hash.Bytes(), obs.CurrentBlockHash.Val)
- assert.Equal(t, uint64(h.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
-
- chainHeadTracker.AssertExpectations(t)
- ethClient.AssertExpectations(t)
- headTracker.AssertExpectations(t)
- })
-
- t.Run("if headtracker returns nil head and eth call fails", func(t *testing.T) {
- ethClient := evmclimocks.NewClient(t)
- headTracker := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t)
- chainHeadTracker := mercurymocks.NewChainHeadTracker(t)
-
- chainHeadTracker.On("Client").Return(ethClient)
- chainHeadTracker.On("HeadTracker").Return(headTracker)
- // This can happen in some cases e.g. RPC node is offline
- headTracker.On("LatestChain").Return((*evmtypes.Head)(nil))
- err := errors.New("foo")
- ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, err)
-
- ds.chainHeadTracker = chainHeadTracker
+ ds.chainReader = evm.NewChainReader(headTracker)
obs := relaymercuryv1.Observation{}
- ds.setCurrentBlock(context.Background(), &obs)
+ err := ds.setLatestBlocks(context.Background(), &obs)
- assert.Equal(t, err, obs.CurrentBlockNum.Err)
- assert.Equal(t, err, obs.CurrentBlockHash.Err)
- assert.Equal(t, err, obs.CurrentBlockTimestamp.Err)
+ assert.NoError(t, err)
+ assert.Zero(t, obs.CurrentBlockNum.Val)
+ assert.Zero(t, obs.CurrentBlockHash.Val)
+ assert.Zero(t, obs.CurrentBlockTimestamp.Val)
+ assert.EqualError(t, obs.CurrentBlockNum.Err, "no blocks available")
+ assert.EqualError(t, obs.CurrentBlockHash.Err, "no blocks available")
+ assert.EqualError(t, obs.CurrentBlockTimestamp.Err, "no blocks available")
- chainHeadTracker.AssertExpectations(t)
- ethClient.AssertExpectations(t)
+ assert.Len(t, obs.LatestBlocks, 0)
headTracker.AssertExpectations(t)
})
}
diff --git a/core/services/relay/evm/mercury_provider.go b/core/services/relay/evm/mercury_provider.go
index 914401c0897..bba5e699bc6 100644
--- a/core/services/relay/evm/mercury_provider.go
+++ b/core/services/relay/evm/mercury_provider.go
@@ -12,6 +12,7 @@ import (
relaymercuryv3 "github.com/smartcontractkit/chainlink-relay/pkg/reportingplugins/mercury/v3"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
relaytypes "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
@@ -25,6 +26,7 @@ type mercuryProvider struct {
reportCodecV1 relaymercuryv1.ReportCodec
reportCodecV2 relaymercuryv2.ReportCodec
reportCodecV3 relaymercuryv3.ReportCodec
+ chainReader relaymercury.ChainReader
logger logger.Logger
ms services.MultiStart
@@ -36,6 +38,7 @@ func NewMercuryProvider(
reportCodecV1 relaymercuryv1.ReportCodec,
reportCodecV2 relaymercuryv2.ReportCodec,
reportCodecV3 relaymercuryv3.ReportCodec,
+ chainReader relaymercury.ChainReader,
lggr logger.Logger,
) *mercuryProvider {
return &mercuryProvider{
@@ -44,6 +47,7 @@ func NewMercuryProvider(
reportCodecV1,
reportCodecV2,
reportCodecV3,
+ chainReader,
lggr,
services.MultiStart{},
}
@@ -103,3 +107,37 @@ func (p *mercuryProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
func (p *mercuryProvider) MercuryServerFetcher() relaymercury.MercuryServerFetcher {
return p.transmitter
}
+
+func (p *mercuryProvider) ChainReader() relaymercury.ChainReader {
+ return p.chainReader
+}
+
+var _ relaymercury.ChainReader = (*chainReader)(nil)
+
+type chainReader struct {
+ tracker httypes.HeadTracker
+}
+
+func NewChainReader(h httypes.HeadTracker) relaymercury.ChainReader {
+ return &chainReader{
+ tracker: h,
+ }
+}
+
+func (r *chainReader) LatestHeads(ctx context.Context, k int) ([]relaymercury.Head, error) {
+ evmBlocks := r.tracker.LatestChain().AsSlice(k)
+ if len(evmBlocks) == 0 {
+ return nil, nil
+ }
+
+ blocks := make([]relaymercury.Head, len(evmBlocks))
+ for x := 0; x < len(evmBlocks); x++ {
+ blocks[x] = relaymercury.Head{
+ Number: uint64(evmBlocks[x].BlockNumber()),
+ Hash: evmBlocks[x].Hash.Bytes(),
+ Timestamp: uint64(evmBlocks[x].Timestamp.Unix()),
+ }
+ }
+
+ return blocks, nil
+}
diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go
index baf98b9b006..a284d677ebf 100644
--- a/core/services/relay/evm/ocr2keeper.go
+++ b/core/services/relay/evm/ocr2keeper.go
@@ -8,12 +8,12 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
"github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types"
"github.com/smartcontractkit/ocr2keepers/pkg/v3/plugin"
- "github.com/smartcontractkit/sqlx"
relaytypes "github.com/smartcontractkit/chainlink-relay/pkg/types"
diff --git a/core/services/relay/evm/ocr2vrf.go b/core/services/relay/evm/ocr2vrf.go
index 14004d0b1aa..0c9414068e3 100644
--- a/core/services/relay/evm/ocr2vrf.go
+++ b/core/services/relay/evm/ocr2vrf.go
@@ -7,11 +7,11 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
"github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
relaytypes "github.com/smartcontractkit/chainlink-relay/pkg/types"
diff --git a/core/services/relay/evm/request_round_tracker.go b/core/services/relay/evm/request_round_tracker.go
index 4e065f2dfdf..c1c3a49e0e4 100644
--- a/core/services/relay/evm/request_round_tracker.go
+++ b/core/services/relay/evm/request_round_tracker.go
@@ -9,11 +9,12 @@ import (
gethTypes "github.com/ethereum/go-ethereum/core/types"
"github.com/pkg/errors"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
+
evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
offchain_aggregator_wrapper "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator"
diff --git a/core/services/relay/grpc_provider_server.go b/core/services/relay/grpc_provider_server.go
new file mode 100644
index 00000000000..943af0e6362
--- /dev/null
+++ b/core/services/relay/grpc_provider_server.go
@@ -0,0 +1,68 @@
+package relay
+
+import (
+ "context"
+ "net"
+
+ "go.uber.org/multierr"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop"
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+type ProviderServer struct {
+ s *grpc.Server
+ lis net.Listener
+ lggr logger.Logger
+ conns []*grpc.ClientConn
+}
+
+func (p *ProviderServer) Start(ctx context.Context) error {
+ p.serve()
+ return nil
+}
+
+func (p *ProviderServer) Close() error {
+ var err error
+ for _, c := range p.conns {
+ err = multierr.Combine(err, c.Close())
+ }
+ p.s.Stop()
+ return err
+}
+
+func (p *ProviderServer) GetConn() (*grpc.ClientConn, error) {
+ cc, err := grpc.Dial(p.lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()))
+ p.conns = append(p.conns, cc)
+ return cc, err
+}
+
+// NewProviderServer creates a GRPC server that will wrap a provider, this is a workaround to test the Node API PoC until the EVM relayer is loopifyed
+func NewProviderServer(p types.PluginProvider, pType types.OCR2PluginType, lggr logger.Logger) (*ProviderServer, error) {
+ lis, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ ps := ProviderServer{
+ s: grpc.NewServer(),
+ lis: lis,
+ lggr: lggr.Named("EVM.ProviderServer"),
+ }
+ err = loop.RegisterStandAloneProvider(ps.s, p, pType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ps, nil
+}
+
+func (p *ProviderServer) serve() {
+ go func() {
+ if err := p.s.Serve(p.lis); err != nil {
+ p.lggr.Errorf("Failed to serve EVM provider server: %v", err)
+ }
+ }()
+}
diff --git a/core/services/relay/grpc_provider_server_test.go b/core/services/relay/grpc_provider_server_test.go
new file mode 100644
index 00000000000..e7ee8d7f150
--- /dev/null
+++ b/core/services/relay/grpc_provider_server_test.go
@@ -0,0 +1,27 @@
+package relay
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func TestProviderServer(t *testing.T) {
+ r := &mockRelayer{}
+ sa := NewServerAdapter(r, mockRelayerExt{})
+ mp, _ := sa.NewPluginProvider(context.Background(), types.RelayArgs{ProviderType: string(types.Median)}, types.PluginArgs{})
+
+ lggr := logger.TestLogger(t)
+ _, err := NewProviderServer(mp, "unsupported-type", lggr)
+ require.Error(t, err)
+
+ ps, err := NewProviderServer(staticMedianProvider{}, types.Median, lggr)
+ require.NoError(t, err)
+
+ _, err = ps.GetConn()
+ require.NoError(t, err)
+}
diff --git a/core/services/relay/relay_test.go b/core/services/relay/relay_test.go
index d3a94773498..5bcd14c64a0 100644
--- a/core/services/relay/relay_test.go
+++ b/core/services/relay/relay_test.go
@@ -4,6 +4,8 @@ import (
"context"
"testing"
+ "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/stretchr/testify/assert"
"github.com/smartcontractkit/chainlink-relay/pkg/loop"
@@ -61,6 +63,30 @@ type staticMedianProvider struct {
types.MedianProvider
}
+func (s staticMedianProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ return nil
+}
+
+func (s staticMedianProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ return nil
+}
+
+func (s staticMedianProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ return nil
+}
+
+func (s staticMedianProvider) ReportCodec() median.ReportCodec {
+ return nil
+}
+
+func (s staticMedianProvider) MedianContract() median.MedianContract {
+ return nil
+}
+
+func (s staticMedianProvider) OnchainConfigCodec() median.OnchainConfigCodec {
+ return nil
+}
+
type staticFunctionsProvider struct {
types.FunctionsProvider
}
diff --git a/core/services/s4/postgres_orm.go b/core/services/s4/postgres_orm.go
index d0a79dba959..1f91270fd08 100644
--- a/core/services/s4/postgres_orm.go
+++ b/core/services/s4/postgres_orm.go
@@ -9,8 +9,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/utils"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
)
const (
diff --git a/core/services/synchronization/common.go b/core/services/synchronization/common.go
index 32f3a86c6f9..584f5b24380 100644
--- a/core/services/synchronization/common.go
+++ b/core/services/synchronization/common.go
@@ -22,6 +22,7 @@ const (
OCR3Mercury TelemetryType = "ocr3-mercury"
OCR2VRF TelemetryType = "ocr2-vrf"
AutomationCustom TelemetryType = "automation-custom"
+ OCR3Automation TelemetryType = "ocr3-automation"
)
type TelemPayload struct {
diff --git a/core/services/telemetry/common.go b/core/services/telemetry/common.go
index 5a3f6706f7d..37a92f16c6d 100644
--- a/core/services/telemetry/common.go
+++ b/core/services/telemetry/common.go
@@ -7,5 +7,5 @@ import (
)
type MonitoringEndpointGenerator interface {
- GenMonitoringEndpoint(contractID string, telemType synchronization.TelemetryType, network string, chainID string) ocrtypes.MonitoringEndpoint
+ GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint
}
diff --git a/core/services/telemetry/ingress.go b/core/services/telemetry/ingress.go
index 637fa0dd3ba..266155095bf 100644
--- a/core/services/telemetry/ingress.go
+++ b/core/services/telemetry/ingress.go
@@ -18,25 +18,25 @@ func NewIngressAgentWrapper(telemetryIngressClient synchronization.TelemetryServ
return &IngressAgentWrapper{telemetryIngressClient}
}
-func (t *IngressAgentWrapper) GenMonitoringEndpoint(contractID string, telemType synchronization.TelemetryType, network string, chainID string) ocrtypes.MonitoringEndpoint {
- return NewIngressAgent(t.telemetryIngressClient, contractID, telemType, network, chainID)
+func (t *IngressAgentWrapper) GenMonitoringEndpoint(network, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint {
+ return NewIngressAgent(t.telemetryIngressClient, network, chainID, contractID, telemType)
}
type IngressAgent struct {
telemetryIngressClient synchronization.TelemetryService
- contractID string
- telemType synchronization.TelemetryType
network string
chainID string
+ contractID string
+ telemType synchronization.TelemetryType
}
-func NewIngressAgent(telemetryIngressClient synchronization.TelemetryService, contractID string, telemType synchronization.TelemetryType, network string, chainID string) *IngressAgent {
+func NewIngressAgent(telemetryIngressClient synchronization.TelemetryService, network string, chainID string, contractID string, telemType synchronization.TelemetryType) *IngressAgent {
return &IngressAgent{
telemetryIngressClient,
- contractID,
- telemType,
network,
chainID,
+ contractID,
+ telemType,
}
}
diff --git a/core/services/telemetry/ingress_batch.go b/core/services/telemetry/ingress_batch.go
index df860853592..bb08c76d7e2 100644
--- a/core/services/telemetry/ingress_batch.go
+++ b/core/services/telemetry/ingress_batch.go
@@ -21,27 +21,27 @@ func NewIngressAgentBatchWrapper(telemetryIngressBatchClient synchronization.Tel
}
// GenMonitoringEndpoint returns a new ingress batch agent instantiated with the batch client and a contractID
-func (t *IngressAgentBatchWrapper) GenMonitoringEndpoint(contractID string, telemType synchronization.TelemetryType, network string, chainID string) ocrtypes.MonitoringEndpoint {
- return NewIngressAgentBatch(t.telemetryIngressBatchClient, contractID, telemType, network, chainID)
+func (t *IngressAgentBatchWrapper) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint {
+ return NewIngressAgentBatch(t.telemetryIngressBatchClient, network, chainID, contractID, telemType)
}
// IngressAgentBatch allows for sending batch telemetry for a given contractID
type IngressAgentBatch struct {
telemetryIngressBatchClient synchronization.TelemetryService
- contractID string
- telemType synchronization.TelemetryType
network string
chainID string
+ contractID string
+ telemType synchronization.TelemetryType
}
// NewIngressAgentBatch creates a new IngressAgentBatch with the given batch client and contractID
-func NewIngressAgentBatch(telemetryIngressBatchClient synchronization.TelemetryService, contractID string, telemType synchronization.TelemetryType, network string, chainID string) *IngressAgentBatch {
+func NewIngressAgentBatch(telemetryIngressBatchClient synchronization.TelemetryService, network string, chainID string, contractID string, telemType synchronization.TelemetryType) *IngressAgentBatch {
return &IngressAgentBatch{
telemetryIngressBatchClient,
- contractID,
- telemType,
network,
chainID,
+ contractID,
+ telemType,
}
}
diff --git a/core/services/telemetry/ingress_batch_test.go b/core/services/telemetry/ingress_batch_test.go
index 3923b569fed..91e6a07ad7f 100644
--- a/core/services/telemetry/ingress_batch_test.go
+++ b/core/services/telemetry/ingress_batch_test.go
@@ -14,7 +14,7 @@ import (
func TestIngressAgentBatch(t *testing.T) {
telemetryBatchClient := mocks.NewTelemetryService(t)
ingressAgentBatch := telemetry.NewIngressAgentWrapper(telemetryBatchClient)
- monitoringEndpoint := ingressAgentBatch.GenMonitoringEndpoint("0xa", synchronization.OCR, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgentBatch.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.OCR)
// Handle the Send call and store the telem
var telemPayload synchronization.TelemPayload
diff --git a/core/services/telemetry/ingress_test.go b/core/services/telemetry/ingress_test.go
index 31028f2f605..7e83384dc6c 100644
--- a/core/services/telemetry/ingress_test.go
+++ b/core/services/telemetry/ingress_test.go
@@ -14,7 +14,7 @@ import (
func TestIngressAgent(t *testing.T) {
telemetryClient := mocks.NewTelemetryService(t)
ingressAgent := telemetry.NewIngressAgentWrapper(telemetryClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("0xa", synchronization.OCR, "test-network", "test-chainID")
+ monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.OCR)
// Handle the Send call and store the telem
var telemPayload synchronization.TelemPayload
diff --git a/core/services/telemetry/manager.go b/core/services/telemetry/manager.go
index 2931ec71a13..cc14a956c12 100644
--- a/core/services/telemetry/manager.go
+++ b/core/services/telemetry/manager.go
@@ -150,7 +150,7 @@ func (m *Manager) HealthReport() map[string]error {
}
// GenMonitoringEndpoint creates a new monitoring endpoints based on the existing available endpoints defined in the core config TOML, if no endpoint for the network and chainID exists, a NOOP agent will be used and the telemetry will not be sent
-func (m *Manager) GenMonitoringEndpoint(contractID string, telemType synchronization.TelemetryType, network string, chainID string) commontypes.MonitoringEndpoint {
+func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) commontypes.MonitoringEndpoint {
e, found := m.getEndpoint(network, chainID)
@@ -160,10 +160,10 @@ func (m *Manager) GenMonitoringEndpoint(contractID string, telemType synchroniza
}
if m.useBatchSend {
- return NewIngressAgentBatch(e.client, contractID, telemType, network, chainID)
+ return NewIngressAgentBatch(e.client, network, chainID, contractID, telemType)
}
- return NewIngressAgent(e.client, contractID, telemType, network, chainID)
+ return NewIngressAgent(e.client, network, chainID, contractID, telemType)
}
diff --git a/core/services/telemetry/manager_test.go b/core/services/telemetry/manager_test.go
index 69746625ddd..2d51d9f4491 100644
--- a/core/services/telemetry/manager_test.go
+++ b/core/services/telemetry/manager_test.go
@@ -56,14 +56,14 @@ func TestManagerAgents(t *testing.T) {
tm := NewManager(tic, ks, lggr)
require.Equal(t, "*synchronization.telemetryIngressBatchClient", reflect.TypeOf(tm.endpoints[0].client).String())
- me := tm.GenMonitoringEndpoint("", "", "network-1", "network-1-chainID-1")
+ me := tm.GenMonitoringEndpoint("network-1", "network-1-chainID-1", "", "")
require.Equal(t, "*telemetry.IngressAgentBatch", reflect.TypeOf(me).String())
tic = setupMockConfig(t, false)
tic.On("Endpoints").Return([]config.TelemetryIngressEndpoint{te})
tm = NewManager(tic, ks, lggr)
require.Equal(t, "*synchronization.telemetryIngressClient", reflect.TypeOf(tm.endpoints[0].client).String())
- me = tm.GenMonitoringEndpoint("", "", "network-1", "network-1-chainID-1")
+ me = tm.GenMonitoringEndpoint("network-1", "network-1-chainID-1", "", "")
require.Equal(t, "*telemetry.IngressAgent", reflect.TypeOf(me).String())
}
@@ -254,17 +254,17 @@ func TestCorrectEndpointRouting(t *testing.T) {
}
//Unknown networks or chainID
- noopEndpoint := tm.GenMonitoringEndpoint("some-contractID", "some-type", "unknown-network", "unknown-chainID")
+ noopEndpoint := tm.GenMonitoringEndpoint("unknown-network", "unknown-chainID", "some-contractID", "some-type")
require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String())
require.Equal(t, 1, obsLogs.Len())
require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found")
- noopEndpoint = tm.GenMonitoringEndpoint("some-contractID", "some-type", "network-1", "unknown-chainID")
+ noopEndpoint = tm.GenMonitoringEndpoint("network-1", "unknown-chainID", "some-contractID", "some-type")
require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String())
require.Equal(t, 1, obsLogs.Len())
require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found")
- noopEndpoint = tm.GenMonitoringEndpoint("some-contractID", "some-type", "network-2", "network-1-chainID-1")
+ noopEndpoint = tm.GenMonitoringEndpoint("network-2", "network-1-chainID-1", "some-contractID", "some-type")
require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String())
require.Equal(t, 1, obsLogs.Len())
require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found")
@@ -274,10 +274,10 @@ func TestCorrectEndpointRouting(t *testing.T) {
telemType := fmt.Sprintf("TelemType_%s", e.chainID)
contractID := fmt.Sprintf("contractID_%s", e.chainID)
me := tm.GenMonitoringEndpoint(
- contractID,
- synchronization.TelemetryType(telemType),
e.network,
e.chainID,
+ contractID,
+ synchronization.TelemetryType(telemType),
)
me.SendLog([]byte(e.chainID))
require.Equal(t, 0, obsLogs.Len())
@@ -316,7 +316,7 @@ func TestLegacyMode(t *testing.T) {
})
tm.endpoints[0].client = clientMock
- e := tm.GenMonitoringEndpoint("some-contractID", "some-type", "unknown-network", "unknown-chainID")
+ e := tm.GenMonitoringEndpoint("unknown-network", "unknown-chainID", "some-contractID", "some-type")
require.Equal(t, "*telemetry.IngressAgentBatch", reflect.TypeOf(e).String())
e.SendLog([]byte("endpoint-1-message-1"))
@@ -324,7 +324,7 @@ func TestLegacyMode(t *testing.T) {
e.SendLog([]byte("endpoint-1-message-3"))
require.Len(t, clientSent, 3)
- e2 := tm.GenMonitoringEndpoint("another-contractID", "another-type", "another-unknown-network", "another-unknown-chainID")
+ e2 := tm.GenMonitoringEndpoint("another-unknown-network", "another-unknown-chainID", "another-contractID", "another-type")
require.Equal(t, "*telemetry.IngressAgentBatch", reflect.TypeOf(e).String())
e2.SendLog([]byte("endpoint-2-message-1"))
diff --git a/core/services/telemetry/noop.go b/core/services/telemetry/noop.go
index cbeb0387089..4da8868c8f0 100644
--- a/core/services/telemetry/noop.go
+++ b/core/services/telemetry/noop.go
@@ -16,6 +16,6 @@ func (t *NoopAgent) SendLog(log []byte) {
}
// GenMonitoringEndpoint creates a monitoring endpoint for telemetry
-func (t *NoopAgent) GenMonitoringEndpoint(contractID string, telemType synchronization.TelemetryType, network string, chainID string) ocrtypes.MonitoringEndpoint {
+func (t *NoopAgent) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint {
return t
}
diff --git a/core/services/versioning/orm.go b/core/services/versioning/orm.go
index 03bd64fdd2b..8ed745955dc 100644
--- a/core/services/versioning/orm.go
+++ b/core/services/versioning/orm.go
@@ -7,8 +7,8 @@ import (
"github.com/Masterminds/semver/v3"
"github.com/jackc/pgconn"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
diff --git a/core/services/vrf/delegate.go b/core/services/vrf/delegate.go
index f6b6a460b89..e976d01b995 100644
--- a/core/services/vrf/delegate.go
+++ b/core/services/vrf/delegate.go
@@ -1,10 +1,7 @@
package vrf
import (
- "encoding/hex"
"fmt"
- "math/big"
- "strings"
"time"
"github.com/avast/retry-go/v4"
@@ -13,7 +10,7 @@ import (
"github.com/theodesp/go-heaps/pairing"
"go.uber.org/multierr"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
@@ -87,7 +84,6 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
if err != nil {
return nil, err
}
- chainId := chain.Client().ConfiguredChainID()
coordinator, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(jb.VRFSpec.CoordinatorAddress.Address(), chain.Client())
if err != nil {
return nil, err
@@ -168,23 +164,19 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
chain.Config().EVM(),
chain.Config().EVM().GasEstimator(),
lV2Plus,
- chain.Client(),
+ chain,
chain.ID(),
- chain.LogBroadcaster(),
d.q,
v2.NewCoordinatorV2_5(coordinatorV2Plus),
batchCoordinatorV2,
vrfOwner,
aggregator,
- chain.TxManager(),
d.pr,
d.ks.Eth(),
jb,
d.mailMon,
utils.NewHighCapacityMailbox[log.Broadcast](),
func() {},
- GetStartingResponseCountsV2(d.q, lV2Plus, chainId.Uint64(), chain.Config().EVM().FinalityDepth()),
- chain.HeadBroadcaster(),
vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())))}, nil
}
if _, ok := task.(*pipeline.VRFTaskV2); ok {
@@ -223,49 +215,42 @@ func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) {
chain.Config().EVM(),
chain.Config().EVM().GasEstimator(),
lV2,
- chain.Client(),
+ chain,
chain.ID(),
- chain.LogBroadcaster(),
d.q,
v2.NewCoordinatorV2(coordinatorV2),
batchCoordinatorV2,
vrfOwner,
aggregator,
- chain.TxManager(),
d.pr,
d.ks.Eth(),
jb,
d.mailMon,
utils.NewHighCapacityMailbox[log.Broadcast](),
func() {},
- GetStartingResponseCountsV2(d.q, lV2, chainId.Uint64(), chain.Config().EVM().FinalityDepth()),
- chain.HeadBroadcaster(),
vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())))}, nil
}
if _, ok := task.(*pipeline.VRFTask); ok {
return []job.ServiceCtx{&v1.Listener{
- Cfg: chain.Config().EVM(),
- FeeCfg: chain.Config().EVM().GasEstimator(),
- L: logger.Sugared(lV1),
- HeadBroadcaster: chain.HeadBroadcaster(),
- LogBroadcaster: chain.LogBroadcaster(),
- Q: d.q,
- Txm: chain.TxManager(),
- Coordinator: coordinator,
- PipelineRunner: d.pr,
- GethKs: d.ks.Eth(),
- Job: jb,
- MailMon: d.mailMon,
+ Cfg: chain.Config().EVM(),
+ FeeCfg: chain.Config().EVM().GasEstimator(),
+ L: logger.Sugared(lV1),
+ Q: d.q,
+ Coordinator: coordinator,
+ PipelineRunner: d.pr,
+ GethKs: d.ks.Eth(),
+ Job: jb,
+ MailMon: d.mailMon,
// Note the mailbox size effectively sets a limit on how many logs we can replay
// in the event of a VRF outage.
ReqLogs: utils.NewHighCapacityMailbox[log.Broadcast](),
ChStop: make(chan struct{}),
WaitOnStop: make(chan struct{}),
NewHead: make(chan struct{}, 1),
- ResponseCount: GetStartingResponseCountsV1(d.q, lV1, chainId.Uint64(), chain.Config().EVM().FinalityDepth()),
BlockNumberToReqID: pairing.New(),
ReqAdded: func() {},
Deduper: vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())),
+ Chain: chain,
}}, nil
}
}
@@ -314,101 +299,3 @@ func FromAddressMaxGasPricesAllEqual(jb job.Job, keySpecificMaxGasPriceWei keySp
}
return
}
-
-func GetStartingResponseCountsV1(q pg.Q, l logger.Logger, chainID uint64, evmFinalityDepth uint32) map[[32]byte]uint64 {
- respCounts := map[[32]byte]uint64{}
-
- // Only check as far back as the evm finality depth for completed transactions.
- counts, err := getRespCounts(q, chainID, evmFinalityDepth)
- if err != nil {
- // Continue with an empty map, do not block job on this.
- l.Errorw("Unable to read previous confirmed fulfillments", "err", err)
- return respCounts
- }
-
- for _, c := range counts {
- // Remove the quotes from the json
- req := strings.Replace(c.RequestID, `"`, ``, 2)
- // Remove the 0x prefix
- b, err := hex.DecodeString(req[2:])
- if err != nil {
- l.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID)
- continue
- }
- var reqID [32]byte
- copy(reqID[:], b)
- respCounts[reqID] = uint64(c.Count)
- }
-
- return respCounts
-}
-
-func GetStartingResponseCountsV2(
- q pg.Q,
- l logger.Logger,
- chainID uint64,
- evmFinalityDepth uint32,
-) map[string]uint64 {
- respCounts := map[string]uint64{}
-
- // Only check as far back as the evm finality depth for completed transactions.
- counts, err := getRespCounts(q, chainID, evmFinalityDepth)
- if err != nil {
- // Continue with an empty map, do not block job on this.
- l.Errorw("Unable to read previous confirmed fulfillments", "err", err)
- return respCounts
- }
-
- for _, c := range counts {
- // Remove the quotes from the json
- req := strings.Replace(c.RequestID, `"`, ``, 2)
- // Remove the 0x prefix
- b, err := hex.DecodeString(req[2:])
- if err != nil {
- l.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID)
- continue
- }
- bi := new(big.Int).SetBytes(b)
- respCounts[bi.String()] = uint64(c.Count)
- }
- return respCounts
-}
-
-func getRespCounts(q pg.Q, chainID uint64, evmFinalityDepth uint32) (
- []struct {
- RequestID string
- Count int
- },
- error,
-) {
- counts := []struct {
- RequestID string
- Count int
- }{}
- // This query should use the idx_evm.txes_state_from_address_evm_chain_id
- // index, since the quantity of unconfirmed/unstarted/in_progress transactions _should_ be small
- // relative to the rest of the data.
- unconfirmedQuery := `
-SELECT meta->'RequestID' AS request_id, count(meta->'RequestID') AS count
-FROM evm.txes et
-WHERE et.meta->'RequestID' IS NOT NULL
-AND et.state IN ('unconfirmed', 'unstarted', 'in_progress')
-GROUP BY meta->'RequestID'
- `
- // Fetch completed transactions only as far back as the given cutoffBlockNumber. This avoids
- // a table scan of the evm.txes table, which could be large if it is unpruned.
- confirmedQuery := `
-SELECT meta->'RequestID' AS request_id, count(meta->'RequestID') AS count
-FROM evm.txes et JOIN evm.tx_attempts eta on et.id = eta.eth_tx_id
- join evm.receipts er on eta.hash = er.tx_hash
-WHERE et.meta->'RequestID' is not null
-AND er.block_number >= (SELECT number FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC LIMIT 1) - $2
-GROUP BY meta->'RequestID'
- `
- query := unconfirmedQuery + "\nUNION ALL\n" + confirmedQuery
- err := q.Select(&counts, query, chainID, evmFinalityDepth)
- if err != nil {
- return nil, err
- }
- return counts, nil
-}
diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go
index 38b361716b6..389e1159be1 100644
--- a/core/services/vrf/delegate_test.go
+++ b/core/services/vrf/delegate_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
@@ -17,9 +17,9 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
log_mocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/log/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
- txmmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
@@ -58,7 +58,7 @@ type vrfUniverse struct {
ks keystore.Master
vrfkey vrfkey.KeyV2
submitter common.Address
- txm *txmmocks.MockEvmTxManager
+ txm *txmgr.TxManager
hb httypes.HeadBroadcaster
legacyChains evm.LegacyChainContainer
cid big.Int
@@ -68,28 +68,33 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg chainlink.GeneralConfig) vrfUniv
// Mock all chain interactions
lb := log_mocks.NewBroadcaster(t)
lb.On("AddDependents", 1).Maybe()
+ lb.On("Register", mock.Anything, mock.Anything).Return(func() {}).Maybe()
ec := evmclimocks.NewClient(t)
ec.On("ConfiguredChainID").Return(testutils.FixtureChainID)
+ ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(51), nil).Maybe()
lggr := logger.TestLogger(t)
hb := headtracker.NewHeadBroadcaster(lggr)
// Don't mock db interactions
prm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns())
btORM := bridges.NewORM(db, lggr, cfg.Database())
- txm := txmmocks.NewMockEvmTxManager(t)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database())
+ _, dbConfig, evmConfig := txmgr.MakeTestConfigs(t)
+ txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil)
+ orm := headtracker.NewORM(db, lggr, cfg.Database(), *testutils.FixtureChainID)
+ require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(51)))
+ jrm := job.NewORM(db, prm, btORM, ks, lggr, cfg.Database())
+ t.Cleanup(func() { assert.NoError(t, jrm.Close()) })
relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{LogBroadcaster: lb, KeyStore: ks.Eth(), Client: ec, DB: db, GeneralConfig: cfg, TxManager: txm})
legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
- jrm := job.NewORM(db, legacyChains, prm, btORM, ks, lggr, cfg.Database())
- t.Cleanup(func() { jrm.Close() })
pr := pipeline.NewRunner(prm, btORM, cfg.JobPipeline(), cfg.WebServer(), legacyChains, ks.Eth(), ks.VRF(), lggr, nil, nil)
require.NoError(t, ks.Unlock(testutils.Password))
- k, err := ks.Eth().Create(testutils.FixtureChainID)
- require.NoError(t, err)
+ k, err2 := ks.Eth().Create(testutils.FixtureChainID)
+ require.NoError(t, err2)
submitter := k.Address
require.NoError(t, err)
- vrfkey, err := ks.VRF().Create()
- require.NoError(t, err)
+ vrfkey, err3 := ks.VRF().Create()
+ require.NoError(t, err3)
return vrfUniverse{
jrm: jrm,
@@ -100,7 +105,7 @@ func buildVrfUni(t *testing.T, db *sqlx.DB, cfg chainlink.GeneralConfig) vrfUniv
ks: ks,
vrfkey: vrfkey,
submitter: submitter,
- txm: txm,
+ txm: &txm,
hb: hb,
legacyChains: legacyChains,
cid: *ec.ConfiguredChainID(),
@@ -172,6 +177,7 @@ func setup(t *testing.T) (vrfUniverse, *v1.Listener, job.Job) {
listener.RunHeadListener(func() {})
}()
t.Cleanup(func() { listener.Stop(t) })
+ require.NoError(t, listener.Start(testutils.Context(t)))
return vuni, listener, jb
}
@@ -302,20 +308,6 @@ func TestDelegate_ValidLog(t *testing.T) {
// Expect a call to check if the req is already fulfilled.
vuni.ec.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(generateCallbackReturnValues(t, false), nil)
- // Ensure we queue up a valid eth transaction
- // Linked to requestID
- vuni.txm.On("CreateTransaction",
- mock.Anything,
- mock.MatchedBy(func(txRequest txmgr.TxRequest) bool {
- meta := txRequest.Meta
- return txRequest.FromAddress == vuni.submitter &&
- txRequest.ToAddress == common.HexToAddress(jb.VRFSpec.CoordinatorAddress.String()) &&
- txRequest.FeeLimit == uint32(500000) &&
- meta.JobID != nil && meta.RequestID != nil && meta.RequestTxHash != nil &&
- (*meta.JobID > 0 && *meta.RequestID == tc.reqID && *meta.RequestTxHash == txHash)
- }),
- ).Once().Return(txmgr.Tx{}, nil)
-
listener.HandleLog(log.NewLogBroadcast(tc.log, vuni.cid, nil))
// Wait until the log is present
waitForChannel(t, added, time.Second, "request not added to the queue")
diff --git a/core/services/vrf/v1/integration_test.go b/core/services/vrf/v1/integration_test.go
index b7e6be43183..a7dca56776f 100644
--- a/core/services/vrf/v1/integration_test.go
+++ b/core/services/vrf/v1/integration_test.go
@@ -2,7 +2,6 @@ package v1_test
import (
"encoding/hex"
- "fmt"
"math/big"
"strings"
"testing"
@@ -46,7 +45,7 @@ func TestIntegration_VRF_JPV2(t *testing.T) {
for _, tt := range tests {
test := tt
t.Run(test.name, func(t *testing.T) {
- config, _ := heavyweight.FullTestDBV2(t, fmt.Sprintf("vrf_jpv2_%v", test.eip1559), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.EVM[0].GasEstimator.EIP1559DynamicFees = &test.eip1559
c.EVM[0].ChainID = (*utils.Big)(testutils.SimulatedChainID)
})
@@ -129,7 +128,7 @@ func TestIntegration_VRF_JPV2(t *testing.T) {
func TestIntegration_VRF_WithBHS(t *testing.T) {
t.Parallel()
- config, _ := heavyweight.FullTestDBV2(t, "vrf_with_bhs", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true)
c.EVM[0].BlockBackfillDepth = ptr[uint32](500)
c.Feature.LogPoller = ptr(true)
diff --git a/core/services/vrf/v1/listener_v1.go b/core/services/vrf/v1/listener_v1.go
index 613c0d124df..b1f9bbb5034 100644
--- a/core/services/vrf/v1/listener_v1.go
+++ b/core/services/vrf/v1/listener_v1.go
@@ -3,20 +3,22 @@ package v1
import (
"context"
"encoding/hex"
+ "errors"
"fmt"
"math/big"
+ "strings"
"sync"
"time"
+ "github.com/avast/retry-go/v4"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
heaps "github.com/theodesp/go-heaps"
"github.com/theodesp/go-heaps/pairing"
"github.com/smartcontractkit/chainlink-relay/pkg/services"
- httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -46,24 +48,22 @@ type request struct {
type Listener struct {
services.StateMachine
- Cfg vrfcommon.Config
- FeeCfg vrfcommon.FeeConfig
- L logger.SugaredLogger
- LogBroadcaster log.Broadcaster
- Coordinator *solidity_vrf_coordinator_interface.VRFCoordinator
- PipelineRunner pipeline.Runner
- Job job.Job
- Q pg.Q
- HeadBroadcaster httypes.HeadBroadcasterRegistry
- Txm txmgr.TxManager
- GethKs vrfcommon.GethKeyStore
- MailMon *utils.MailboxMonitor
- ReqLogs *utils.Mailbox[log.Broadcast]
- ChStop utils.StopChan
- WaitOnStop chan struct{}
- NewHead chan struct{}
- LatestHead uint64
- LatestHeadMu sync.RWMutex
+ Cfg vrfcommon.Config
+ FeeCfg vrfcommon.FeeConfig
+ L logger.SugaredLogger
+ Coordinator *solidity_vrf_coordinator_interface.VRFCoordinator
+ PipelineRunner pipeline.Runner
+ Job job.Job
+ Q pg.Q
+ GethKs vrfcommon.GethKeyStore
+ MailMon *utils.MailboxMonitor
+ ReqLogs *utils.Mailbox[log.Broadcast]
+ ChStop utils.StopChan
+ WaitOnStop chan struct{}
+ NewHead chan struct{}
+ LatestHead uint64
+ LatestHeadMu sync.RWMutex
+ Chain evm.Chain
// We can keep these pending logs in memory because we
// only mark them confirmed once we send a corresponding fulfillment transaction.
@@ -110,11 +110,11 @@ func (lsn *Listener) getLatestHead() uint64 {
}
// Start complies with job.Service
-func (lsn *Listener) Start(context.Context) error {
+func (lsn *Listener) Start(ctx context.Context) error {
return lsn.StartOnce("VRFListener", func() error {
- spec := job.LoadEnvConfigVarsVRF(lsn.Cfg, *lsn.Job.VRFSpec)
+ spec := job.LoadDefaultVRFPollPeriod(*lsn.Job.VRFSpec)
- unsubscribeLogs := lsn.LogBroadcaster.Register(lsn, log.ListenerOpts{
+ unsubscribeLogs := lsn.Chain.LogBroadcaster().Register(lsn, log.ListenerOpts{
Contract: lsn.Coordinator.Address(),
ParseLog: lsn.Coordinator.ParseLog,
LogsWithTopics: map[common.Hash][][]log.Topic{
@@ -136,10 +136,19 @@ func (lsn *Listener) Start(context.Context) error {
})
// Subscribe to the head broadcaster for handling
// per request conf requirements.
- latestHead, unsubscribeHeadBroadcaster := lsn.HeadBroadcaster.Subscribe(lsn)
+ latestHead, unsubscribeHeadBroadcaster := lsn.Chain.HeadBroadcaster().Subscribe(lsn)
if latestHead != nil {
lsn.setLatestHead(latestHead)
}
+
+ // Populate the response count map
+ lsn.RespCountMu.Lock()
+ defer lsn.RespCountMu.Unlock()
+ respCount, err := lsn.GetStartingResponseCountsV1(ctx)
+ if err != nil {
+ return err
+ }
+ lsn.ResponseCount = respCount
go lsn.RunLogListener([]func(){unsubscribeLogs}, spec.MinIncomingConfirmations)
go lsn.RunHeadListener(unsubscribeHeadBroadcaster)
@@ -148,6 +157,48 @@ func (lsn *Listener) Start(context.Context) error {
})
}
+func (lsn *Listener) GetStartingResponseCountsV1(ctx context.Context) (respCount map[[32]byte]uint64, err error) {
+ respCounts := make(map[[32]byte]uint64)
+ var latestBlockNum *big.Int
+ // Retry client call for LatestBlockHeight if fails
+ // Want to avoid failing startup due to potential faulty RPC call
+ err = retry.Do(func() error {
+ latestBlockNum, err = lsn.Chain.Client().LatestBlockHeight(ctx)
+ return err
+ }, retry.Attempts(10), retry.Delay(500*time.Millisecond))
+ if err != nil {
+ return nil, err
+ }
+ if latestBlockNum == nil {
+ return nil, errors.New("LatestBlockHeight return nil block num")
+ }
+ confirmedBlockNum := latestBlockNum.Int64() - int64(lsn.Chain.Config().EVM().FinalityDepth())
+ // Only check as far back as the evm finality depth for completed transactions.
+ var counts []vrfcommon.RespCountEntry
+ counts, err = vrfcommon.GetRespCounts(ctx, lsn.Chain.TxManager(), lsn.Chain.Client().ConfiguredChainID(), confirmedBlockNum)
+ if err != nil {
+ // Continue with an empty map, do not block job on this.
+ lsn.L.Errorw("Unable to read previous confirmed fulfillments", "err", err)
+ return respCounts, nil
+ }
+
+ for _, c := range counts {
+ // Remove the quotes from the json
+ req := strings.Replace(c.RequestID, `"`, ``, 2)
+ // Remove the 0x prefix
+ b, err := hex.DecodeString(req[2:])
+ if err != nil {
+ lsn.L.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID)
+ continue
+ }
+ var reqID [32]byte
+ copy(reqID[:], b)
+ respCounts[reqID] = uint64(c.Count)
+ }
+
+ return respCounts, nil
+}
+
// Removes and returns all the confirmed logs from
// the pending queue.
func (lsn *Listener) extractConfirmedLogs() []request {
@@ -314,7 +365,7 @@ func (lsn *Listener) handleLog(lb log.Broadcast, minConfs uint32) {
}
func (lsn *Listener) shouldProcessLog(lb log.Broadcast) bool {
- consumed, err := lsn.LogBroadcaster.WasAlreadyConsumed(lb)
+ consumed, err := lsn.Chain.LogBroadcaster().WasAlreadyConsumed(lb)
if err != nil {
lsn.L.Errorw("Could not determine if log was already consumed", "error", err, "txHash", lb.RawLog().TxHash)
// Do not process, let lb resend it as a retry mechanism.
@@ -324,7 +375,7 @@ func (lsn *Listener) shouldProcessLog(lb log.Broadcast) bool {
}
func (lsn *Listener) markLogAsConsumed(lb log.Broadcast) {
- err := lsn.LogBroadcaster.MarkConsumed(lb)
+ err := lsn.Chain.LogBroadcaster().MarkConsumed(lb)
lsn.L.ErrorIf(err, fmt.Sprintf("Unable to mark log %v as consumed", lb.String()))
}
@@ -432,7 +483,7 @@ func (lsn *Listener) ProcessRequest(ctx context.Context, req request) bool {
// The VRF pipeline has no async tasks, so we don't need to check for `incomplete`
if _, err = lsn.PipelineRunner.Run(ctx, run, lggr, true, func(tx pg.Queryer) error {
// Always mark consumed regardless of whether the proof failed or not.
- if err = lsn.LogBroadcaster.MarkConsumed(req.lb, pg.WithQueryer(tx)); err != nil {
+ if err = lsn.Chain.LogBroadcaster().MarkConsumed(req.lb, pg.WithQueryer(tx)); err != nil {
lggr.Errorw("Failed mark consumed", "err", err)
}
return nil
diff --git a/core/services/vrf/v2/bhs_feeder_test.go b/core/services/vrf/v2/bhs_feeder_test.go
index 0da28378d01..219fe1c8fd2 100644
--- a/core/services/vrf/v2/bhs_feeder_test.go
+++ b/core/services/vrf/v2/bhs_feeder_test.go
@@ -51,7 +51,7 @@ func TestStartHeartbeats(t *testing.T) {
keys = append(keys, ownerKey, vrfKey)
- config, _ := heavyweight.FullTestDBV2(t, "vrfv2_needs_blockhash_store", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, gasLanePriceWei, keySpecificOverrides...)(c, s)
c.EVM[0].MinIncomingConfirmations = ptr[uint32](2)
c.Feature.LogPoller = ptr(true)
diff --git a/core/services/vrf/v2/integration_helpers_test.go b/core/services/vrf/v2/integration_helpers_test.go
index 09c9a0ed437..a086cbbb09f 100644
--- a/core/services/vrf/v2/integration_helpers_test.go
+++ b/core/services/vrf/v2/integration_helpers_test.go
@@ -1,7 +1,6 @@
package v2_test
import (
- "fmt"
"math/big"
"strings"
"testing"
@@ -62,7 +61,7 @@ func testSingleConsumerHappyPath(
key1 := cltest.MustGenerateRandomKey(t)
key2 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_happypath", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -113,7 +112,7 @@ func testSingleConsumerHappyPath(
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID1, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
// In particular:
@@ -133,7 +132,7 @@ func testSingleConsumerHappyPath(
t.Log("runs", len(runs))
return len(runs) == 2
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID2, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID2, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
// In particular:
@@ -202,7 +201,7 @@ func testMultipleConsumersNeedBHS(
GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei},
})
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_needs_blockhash_store", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s)
c.EVM[0].MinIncomingConfirmations = ptr[uint32](2)
c.Feature.LogPoller = ptr(true)
@@ -285,7 +284,7 @@ func testMultipleConsumersNeedBHS(
return len(runs) == 1
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment)
if len(assertions) > 0 {
@@ -349,7 +348,7 @@ func testMultipleConsumersNeedTrustedBHS(
uni.backend.Commit()
}
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_needs_trusted_blockhash_store", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s)
c.EVM[0].MinIncomingConfirmations = ptr[uint32](2)
c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(5_000_000))
@@ -446,7 +445,7 @@ func testMultipleConsumersNeedTrustedBHS(
return len(runs) == 1
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment)
if len(assertions) > 0 {
@@ -531,7 +530,7 @@ func testSingleConsumerHappyPathBatchFulfillment(
) {
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_batch_happypath", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -592,7 +591,7 @@ func testSingleConsumerHappyPathBatchFulfillment(
return len(runs) == numRequests
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mineBatch(t, reqIDs, subID, uni.backend, db, vrfVersion)
+ mineBatch(t, reqIDs, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
for i, requestID := range reqIDs {
// Assert correct state of RandomWordsFulfilled event.
@@ -635,7 +634,7 @@ func testSingleConsumerNeedsTopUp(
) {
key := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(1000)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_needstopup", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(1000), toml.KeySpecific{
// Gas lane.
Key: ptr(key.EIP55Address),
@@ -694,7 +693,7 @@ func testSingleConsumerNeedsTopUp(
// Mine the fulfillment. Need to wait for Txm to mark the tx as confirmed
// so that we can actually see the event on the simulated chain.
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert the state of the RandomWordsFulfilled event.
rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment)
@@ -739,7 +738,7 @@ func testBlockHeaderFeeder(
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_test_block_header_feeder", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, gasLanePriceWei, toml.KeySpecific{
// Gas lane.
Key: ptr(vrfKey.EIP55Address),
@@ -818,7 +817,7 @@ func testBlockHeaderFeeder(
return len(runs) == 1
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment)
if len(assertions) > 0 {
@@ -894,7 +893,7 @@ func testSingleConsumerForcedFulfillment(
key1 := cltest.MustGenerateRandomKey(t)
key2 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, fmt.Sprintf("vrfv2_singleconsumer_forcefulfill_%v", batchEnabled), func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1021,7 +1020,7 @@ func testSingleConsumerForcedFulfillment(
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
// In this particular case:
@@ -1061,7 +1060,7 @@ func testSingleConsumerEIP150(
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, _ := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_eip150_happypath", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1129,7 +1128,7 @@ func testSingleConsumerEIP150Revert(
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, _ := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_eip150_revert", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1192,7 +1191,7 @@ func testSingleConsumerBigGasCallbackSandwich(
) {
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(100)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_bigcallback_sandwich", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(100), v2.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1264,7 +1263,7 @@ func testSingleConsumerBigGasCallbackSandwich(
}, 3*time.Second, 1*time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, reqIDs[1], subID, uni.backend, db, vrfVersion)
+ mine(t, reqIDs[1], subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert the random word was fulfilled
assertRandomWordsFulfilled(t, reqIDs[1], false, uni.rootContract, nativePayment)
@@ -1308,7 +1307,7 @@ func testSingleConsumerMultipleGasLanes(
expensiveKey := cltest.MustGenerateRandomKey(t)
cheapGasLane := assets.GWei(10)
expensiveGasLane := assets.GWei(1000)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_multiplegaslanes", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{
// Cheap gas lane.
Key: ptr(cheapKey.EIP55Address),
@@ -1365,7 +1364,7 @@ func testSingleConsumerMultipleGasLanes(
}, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, cheapRequestID, subID, uni.backend, db, vrfVersion)
+ mine(t, cheapRequestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, cheapRequestID, true, uni.rootContract, nativePayment)
@@ -1397,7 +1396,7 @@ func testSingleConsumerMultipleGasLanes(
}, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, expensiveRequestID, subID, uni.backend, db, vrfVersion)
+ mine(t, expensiveRequestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, expensiveRequestID, true, uni.rootContract, nativePayment)
@@ -1428,7 +1427,7 @@ func testSingleConsumerAlwaysRevertingCallbackStillFulfilled(
) {
key := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_alwaysrevertingcallback", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{
// Gas lane.
Key: ptr(key.EIP55Address),
@@ -1477,7 +1476,7 @@ func testSingleConsumerAlwaysRevertingCallbackStillFulfilled(
}, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, requestID, false, uni.rootContract, nativePayment)
@@ -1496,7 +1495,7 @@ func testConsumerProxyHappyPath(
key1 := cltest.MustGenerateRandomKey(t)
key2 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_consumerproxy_happypath", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1552,7 +1551,7 @@ func testConsumerProxyHappyPath(
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID1, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, requestID1, true, uni.rootContract, nativePayment)
@@ -1576,7 +1575,7 @@ func testConsumerProxyHappyPath(
t.Log("runs", len(runs))
return len(runs) == 2
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID2, subID, uni.backend, db, vrfVersion)
+ mine(t, requestID2, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID)
assertRandomWordsFulfilled(t, requestID2, true, uni.rootContract, nativePayment)
// Assert correct number of random words sent by coordinator.
@@ -1624,7 +1623,7 @@ func testMaliciousConsumer(
batchEnabled bool,
vrfVersion vrfcommon.Version,
) {
- config, _ := heavyweight.FullTestDBV2(t, "vrf_v2plus_integration_malicious", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](2_000_000)
c.EVM[0].GasEstimator.PriceMax = assets.GWei(1)
c.EVM[0].GasEstimator.PriceDefault = assets.GWei(1)
diff --git a/core/services/vrf/v2/integration_v2_plus_test.go b/core/services/vrf/v2/integration_v2_plus_test.go
index f08c10c2004..75026423f4b 100644
--- a/core/services/vrf/v2/integration_v2_plus_test.go
+++ b/core/services/vrf/v2/integration_v2_plus_test.go
@@ -304,6 +304,7 @@ func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumer
}
func TestVRFV2PlusIntegration_SingleConsumer_HappyPath_BatchFulfillment(t *testing.T) {
+ testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/BCF-2745")
t.Parallel()
ownerKey := cltest.MustGenerateRandomKey(t)
uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false)
@@ -456,6 +457,7 @@ func TestVRFV2PlusIntegration_SingleConsumer_HappyPath(t *testing.T) {
}
func TestVRFV2PlusIntegration_SingleConsumer_EOA_Request(t *testing.T) {
+ testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/BCF-2744")
t.Parallel()
ownerKey := cltest.MustGenerateRandomKey(t)
uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false)
@@ -1139,7 +1141,7 @@ func TestVRFV2PlusIntegration_Migration(t *testing.T) {
uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false)
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2plus_migration", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1198,7 +1200,7 @@ func TestVRFV2PlusIntegration_Migration(t *testing.T) {
return len(runs) == 1
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
- mine(t, requestID, subID, uni.backend, db, vrfcommon.V2Plus)
+ mine(t, requestID, subID, uni.backend, db, vrfcommon.V2Plus, testutils.SimulatedChainID)
assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false)
// Assert correct number of random words sent by coordinator.
diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go
index 093adc8eaaf..1f607da2f26 100644
--- a/core/services/vrf/v2/integration_v2_test.go
+++ b/core/services/vrf/v2/integration_v2_test.go
@@ -24,14 +24,17 @@ import (
"github.com/onsi/gomega"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
+ evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
evmlogger "github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
@@ -58,6 +61,8 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
@@ -67,9 +72,10 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pg/datatypes"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
+ evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/services/signatures/secp256k1"
- "github.com/smartcontractkit/chainlink/v2/core/services/vrf"
"github.com/smartcontractkit/chainlink/v2/core/services/vrf/proof"
+ v1 "github.com/smartcontractkit/chainlink/v2/core/services/vrf/v1"
v22 "github.com/smartcontractkit/chainlink/v2/core/services/vrf/v2"
"github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon"
"github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrftesthelpers"
@@ -127,24 +133,14 @@ type coordinatorV2Universe struct {
batchCoordinatorContractAddress common.Address
}
-const (
- ConfirmedEthTxesV2Query = `SELECT * FROM evm.txes
- WHERE evm.txes.state = 'confirmed'
- AND evm.txes.meta->>'RequestID' = $1
- AND CAST(evm.txes.meta->>'SubId' AS NUMERIC) = $2 LIMIT 1`
- ConfirmedEthTxesV2PlusQuery = `SELECT * FROM evm.txes
- WHERE evm.txes.state = 'confirmed'
- AND evm.txes.meta->>'RequestID' = $1
- AND CAST(evm.txes.meta->>'GlobalSubId' AS NUMERIC) = $2 LIMIT 1`
- ConfirmedEthTxesV2BatchQuery = `
- SELECT * FROM evm.txes
- WHERE evm.txes.state = 'confirmed'
- AND CAST(evm.txes.meta->>'SubId' AS NUMERIC) = $1`
- ConfirmedEthTxesV2PlusBatchQuery = `
- SELECT * FROM evm.txes
- WHERE evm.txes.state = 'confirmed'
- AND CAST(evm.txes.meta->>'GlobalSubId' AS NUMERIC) = $1`
-)
+func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.Master, ec *evmclimocks.Client) txmgrcommon.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] {
+ _, _, evmConfig := txmgr.MakeTestConfigs(t)
+ txmConfig := txmgr.NewEvmTxmConfig(evmConfig)
+ txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil,
+ nil, txStore, nil, nil, nil, nil)
+
+ return txm
+}
func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers int) coordinatorV2Universe {
testutils.SkipShort(t, "VRFCoordinatorV2Universe")
@@ -454,7 +450,7 @@ func sendEth(t *testing.T, key ethkey.KeyV2, ec *backends.SimulatedBackend, to c
nonce, err := ec.PendingNonceAt(testutils.Context(t), key.Address)
require.NoError(t, err)
tx := gethtypes.NewTx(&gethtypes.DynamicFeeTx{
- ChainID: big.NewInt(1337),
+ ChainID: testutils.SimulatedChainID,
Nonce: nonce,
GasTipCap: big.NewInt(1),
GasFeeCap: assets.GWei(10).ToInt(), // block base fee in sim
@@ -463,7 +459,7 @@ func sendEth(t *testing.T, key ethkey.KeyV2, ec *backends.SimulatedBackend, to c
Value: big.NewInt(0).Mul(big.NewInt(int64(eth)), big.NewInt(1e18)),
Data: nil,
})
- signedTx, err := gethtypes.SignTx(tx, gethtypes.NewLondonSigner(big.NewInt(1337)), key.ToEcdsaPrivKey())
+ signedTx, err := gethtypes.SignTx(tx, gethtypes.NewLondonSigner(testutils.SimulatedChainID), key.ToEcdsaPrivKey())
require.NoError(t, err)
err = ec.SendTransaction(testutils.Context(t), signedTx)
require.NoError(t, err)
@@ -762,32 +758,42 @@ func assertNumRandomWords(
}
}
-func mine(t *testing.T, requestID, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version) bool {
- var query string
+func mine(t *testing.T, requestID, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version, chainId *big.Int) bool {
+ cfg := pgtest.NewQConfig(false)
+ txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg)
+ var metaField string
if vrfVersion == vrfcommon.V2Plus {
- query = ConfirmedEthTxesV2PlusQuery
+ metaField = "GlobalSubId"
} else if vrfVersion == vrfcommon.V2 {
- query = ConfirmedEthTxesV2Query
+ metaField = "SubId"
} else {
t.Errorf("unsupported vrf version %s", vrfVersion)
}
+
return gomega.NewWithT(t).Eventually(func() bool {
backend.Commit()
- var txs []txmgr.DbEthTx
- err := db.Select(&txs, query, common.BytesToHash(requestID.Bytes()).String(), subID.String())
+ txes, err := txstore.FindTxesByMetaFieldAndStates(testutils.Context(t), metaField, subID.String(), []txmgrtypes.TxState{txmgrcommon.TxConfirmed}, chainId)
require.NoError(t, err)
- t.Log("num txs", len(txs))
- return len(txs) == 1
+ for _, tx := range txes {
+ meta, err := tx.GetMeta()
+ require.NoError(t, err)
+ if meta.RequestID.String() == common.BytesToHash(requestID.Bytes()).String() {
+ return true
+ }
+ }
+ return false
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
}
-func mineBatch(t *testing.T, requestIDs []*big.Int, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version) bool {
+func mineBatch(t *testing.T, requestIDs []*big.Int, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version, chainId *big.Int) bool {
requestIDMap := map[string]bool{}
- var query string
+ cfg := pgtest.NewQConfig(false)
+ txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg)
+ var metaField string
if vrfVersion == vrfcommon.V2Plus {
- query = ConfirmedEthTxesV2PlusBatchQuery
+ metaField = "GlobalSubId"
} else if vrfVersion == vrfcommon.V2 {
- query = ConfirmedEthTxesV2BatchQuery
+ metaField = "SubId"
} else {
t.Errorf("unsupported vrf version %s", vrfVersion)
}
@@ -796,12 +802,10 @@ func mineBatch(t *testing.T, requestIDs []*big.Int, subID *big.Int, backend *bac
}
return gomega.NewWithT(t).Eventually(func() bool {
backend.Commit()
- var txs []txmgr.DbEthTx
- require.NoError(t, db.Select(&txs, query, subID.String()))
- for _, tx := range txs {
- var evmTx txmgr.Tx
- tx.ToTx(&evmTx)
- meta, err := evmTx.GetMeta()
+ txes, err := txstore.FindTxesByMetaFieldAndStates(testutils.Context(t), metaField, subID.String(), []txmgrtypes.TxState{txmgrcommon.TxConfirmed}, chainId)
+ require.NoError(t, err)
+ for _, tx := range txes {
+ meta, err := tx.GetMeta()
require.NoError(t, err)
for _, requestID := range meta.RequestIDs {
if _, ok := requestIDMap[requestID.String()]; ok {
@@ -963,7 +967,7 @@ func testEoa(
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, _ := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_eoa_request", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1124,7 +1128,7 @@ func TestVRFV2Integration_SingleConsumer_Wrapper(t *testing.T) {
callBackGasLimit := int64(100_000) // base callback gas.
key1 := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_singleconsumer_wrapper", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1188,7 +1192,7 @@ func TestVRFV2Integration_SingleConsumer_Wrapper(t *testing.T) {
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2)
+ mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false)
@@ -1204,7 +1208,7 @@ func TestVRFV2Integration_Wrapper_High_Gas(t *testing.T) {
key1 := cltest.MustGenerateRandomKey(t)
callBackGasLimit := int64(2_000_000) // base callback gas.
gasLanePriceWei := assets.GWei(10)
- config, db := heavyweight.FullTestDBV2(t, "vrfv2_wrapper_high_gas_revert", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{
// Gas lane.
Key: ptr(key1.EIP55Address),
@@ -1268,7 +1272,7 @@ func TestVRFV2Integration_Wrapper_High_Gas(t *testing.T) {
}, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue())
// Mine the fulfillment that was queued.
- mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2)
+ mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2, testutils.SimulatedChainID)
// Assert correct state of RandomWordsFulfilled event.
assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false)
@@ -1581,7 +1585,7 @@ func TestIntegrationVRFV2(t *testing.T) {
gasPrice := assets.GWei(1)
key := cltest.MustGenerateRandomKey(t)
gasLanePriceWei := assets.GWei(10)
- config, _ := heavyweight.FullTestDBV2(t, "vrf_v2_integration", func(c *chainlink.Config, s *chainlink.Secrets) {
+ config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
simulatedOverrides(t, gasPrice, toml.KeySpecific{
Key: &key.EIP55Address,
GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei},
@@ -1599,6 +1603,10 @@ func TestIntegrationVRFV2(t *testing.T) {
require.Zero(t, key.Cmp(keys[0]))
require.NoError(t, app.Start(testutils.Context(t)))
+ var chain evm.Chain
+ chain, err = app.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
+ require.NoError(t, err)
+ listenerV2 := v22.MakeTestListenerV2(chain)
jbs := createVRFJobs(
t,
@@ -1751,11 +1759,10 @@ func TestIntegrationVRFV2(t *testing.T) {
})
// We should see the response count present
- chain, err := app.GetRelayers().LegacyEVMChains().Get(big.NewInt(1337).String())
require.NoError(t, err)
-
- q := pg.NewQ(app.GetSqlxDB(), app.Logger, app.Config.Database())
- counts := vrf.GetStartingResponseCountsV2(q, app.Logger, chain.Client().ConfiguredChainID().Uint64(), chain.Config().EVM().FinalityDepth())
+ var counts map[string]uint64
+ counts, err = listenerV2.GetStartingResponseCountsV2(testutils.Context(t))
+ require.NoError(t, err)
t.Log(counts, rf[0].RequestID().String())
assert.Equal(t, uint64(1), counts[rf[0].RequestID().String()])
}
@@ -1996,20 +2003,31 @@ func TestFulfillmentCost(t *testing.T) {
}
func TestStartingCountsV1(t *testing.T) {
- cfg, db := heavyweight.FullTestDBNoFixturesV2(t, "vrf_test_starting_counts", nil)
- _, err := db.Exec(`INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, evm_chain_id)
- VALUES ($1, 4, $2, NOW(), NOW(), 1337)`, utils.NewHash(), utils.NewHash())
- require.NoError(t, err)
+ cfg, db := heavyweight.FullTestDBNoFixturesV2(t, nil)
lggr := logger.TestLogger(t)
- q := pg.NewQ(db, lggr, cfg.Database())
- finalityDepth := 3
- counts := vrf.GetStartingResponseCountsV1(q, lggr, 1337, uint32(finalityDepth))
- assert.Equal(t, 0, len(counts))
+ qCfg := pgtest.NewQConfig(false)
+ txStore := txmgr.NewTxStore(db, logger.TestLogger(t), qCfg)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database())
+ ec := evmclimocks.NewClient(t)
+ ec.On("ConfiguredChainID").Return(testutils.SimulatedChainID)
+ ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(2), nil).Maybe()
+ txm := makeTestTxm(t, txStore, ks, ec)
+ relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{KeyStore: ks.Eth(), Client: ec, DB: db, GeneralConfig: cfg, TxManager: txm})
+ legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders)
+ chain, err := legacyChains.Get(testutils.SimulatedChainID.String())
+ require.NoError(t, err)
+ listenerV1 := &v1.Listener{
+ Chain: chain,
+ }
+ listenerV2 := v22.MakeTestListenerV2(chain)
+ var counts map[[32]byte]uint64
+ counts, err = listenerV1.GetStartingResponseCountsV1(testutils.Context(t))
+ require.NoError(t, err)
+ assert.Equal(t, 0, len(counts))
err = ks.Unlock(testutils.Password)
require.NoError(t, err)
- k, err := ks.Eth().Create(big.NewInt(1337))
+ k, err := ks.Eth().Create(testutils.SimulatedChainID)
require.NoError(t, err)
b := time.Now()
n1, n2, n3, n4 := evmtypes.Nonce(0), evmtypes.Nonce(1), evmtypes.Nonce(2), evmtypes.Nonce(3)
@@ -2027,7 +2045,7 @@ func TestStartingCountsV1(t *testing.T) {
md2, err := json.Marshal(&m2)
md2_ := datatypes.JSON(md2)
require.NoError(t, err)
- chainID := utils.NewBig(big.NewInt(1337))
+ chainID := utils.NewBig(testutils.SimulatedChainID)
confirmedTxes := []txmgr.Tx{
{
Sequence: &n1,
@@ -2100,16 +2118,13 @@ func TestStartingCountsV1(t *testing.T) {
ChainID: chainID.ToInt(),
})
}
- sql := `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, state, created_at, broadcast_at, initial_broadcast_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id)
-VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :state, :created_at, :broadcast_at, :initial_broadcast_at, :meta, :subject, :evm_chain_id, :min_confirmations, :pipeline_task_run_id);`
- for _, tx := range append(confirmedTxes, unconfirmedTxes...) {
- var dbEtx txmgr.DbEthTx
- dbEtx.FromTx(&tx) //nolint:gosec // just copying fields
- _, err = db.NamedExec(sql, &dbEtx)
+ txList := append(confirmedTxes, unconfirmedTxes...)
+ for i := range txList {
+ err = txStore.InsertTx(&txList[i])
require.NoError(t, err)
}
- // add evm.tx_attempts for confirmed
+ // add tx attempt for confirmed
broadcastBlock := int64(1)
var txAttempts []txmgr.TxAttempt
for i := range confirmedTxes {
@@ -2124,7 +2139,7 @@ VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit
ChainSpecificFeeLimit: uint32(100),
})
}
- // add evm.tx_attempts for unconfirmed
+ // add tx attempt for unconfirmed
for i := range unconfirmedTxes {
txAttempts = append(txAttempts, txmgr.TxAttempt{
TxID: int64(i + 1 + len(confirmedTxes)),
@@ -2139,41 +2154,35 @@ VALUES (:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit
for _, txAttempt := range txAttempts {
t.Log("tx attempt eth tx id: ", txAttempt.TxID)
}
- sql = `INSERT INTO evm.tx_attempts (eth_tx_id, gas_price, signed_raw_tx, hash, state, created_at, chain_specific_gas_limit)
- VALUES (:eth_tx_id, :gas_price, :signed_raw_tx, :hash, :state, :created_at, :chain_specific_gas_limit)`
- for _, attempt := range txAttempts {
- var dbAttempt txmgr.DbEthTxAttempt
- dbAttempt.FromTxAttempt(&attempt) //nolint:gosec // just copying fields
- _, err = db.NamedExec(sql, &dbAttempt)
+ for i := range txAttempts {
+ err = txStore.InsertTxAttempt(&txAttempts[i])
require.NoError(t, err)
}
// add evm.receipts
- receipts := []txmgr.Receipt{}
+ receipts := []evmtypes.Receipt{}
for i := 0; i < 4; i++ {
- receipts = append(receipts, txmgr.Receipt{
+ receipts = append(receipts, evmtypes.Receipt{
BlockHash: utils.NewHash(),
TxHash: txAttempts[i].Hash,
- BlockNumber: broadcastBlock,
+ BlockNumber: big.NewInt(broadcastBlock),
TransactionIndex: 1,
- Receipt: evmtypes.Receipt{},
- CreatedAt: time.Now(),
})
}
- sql = `INSERT INTO evm.receipts (block_hash, tx_hash, block_number, transaction_index, receipt, created_at)
- VALUES (:block_hash, :tx_hash, :block_number, :transaction_index, :receipt, :created_at)`
- for _, r := range receipts {
- _, err2 := db.NamedExec(sql, r)
- require.NoError(t, err2)
+ for i := range receipts {
+ _, err = txStore.InsertReceipt(&receipts[i])
+ require.NoError(t, err)
}
- counts = vrf.GetStartingResponseCountsV1(q, lggr, 1337, uint32(finalityDepth))
+ counts, err = listenerV1.GetStartingResponseCountsV1(testutils.Context(t))
+ require.NoError(t, err)
assert.Equal(t, 3, len(counts))
assert.Equal(t, uint64(1), counts[utils.PadByteToHash(0x10)])
assert.Equal(t, uint64(2), counts[utils.PadByteToHash(0x11)])
assert.Equal(t, uint64(2), counts[utils.PadByteToHash(0x12)])
- countsV2 := vrf.GetStartingResponseCountsV2(q, lggr, 1337, uint32(finalityDepth))
+ countsV2, err := listenerV2.GetStartingResponseCountsV2(testutils.Context(t))
+ require.NoError(t, err)
t.Log(countsV2)
assert.Equal(t, 3, len(countsV2))
assert.Equal(t, uint64(1), countsV2[big.NewInt(0x10).String()])
diff --git a/core/services/vrf/v2/listener_v2.go b/core/services/vrf/v2/listener_v2.go
index 7560baad3a2..17cb9ec96e4 100644
--- a/core/services/vrf/v2/listener_v2.go
+++ b/core/services/vrf/v2/listener_v2.go
@@ -4,6 +4,7 @@ import (
"cmp"
"context"
"database/sql"
+ "encoding/hex"
"fmt"
"math"
"math/big"
@@ -12,6 +13,7 @@ import (
"sync"
"time"
+ "github.com/avast/retry-go/v4"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
@@ -27,8 +29,7 @@ import (
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
"github.com/smartcontractkit/chainlink/v2/core/assets"
- evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client"
- httypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
@@ -56,6 +57,8 @@ var (
batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI)
batchCoordinatorV2PlusABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2plus.BatchVRFCoordinatorV2PlusABI)
vrfOwnerABI = evmtypes.MustGetABI(vrf_owner.VRFOwnerMetaData.ABI)
+ // These are the transaction states used when summing up already reserved subscription funds that are about to be used in in-flight transactions
+ reserveEthLinkQueryStates = []txmgrtypes.TxState{txmgrcommon.TxUnconfirmed, txmgrcommon.TxUnstarted, txmgrcommon.TxInProgress}
)
const (
@@ -73,29 +76,8 @@ const (
// backoffFactor is the factor by which to increase the delay each time a request fails.
backoffFactor = 1.3
- V2ReservedLinkQuery = `SELECT SUM(CAST(meta->>'MaxLink' AS NUMERIC(78, 0)))
- FROM evm.txes
- WHERE meta->>'MaxLink' IS NOT NULL
- AND evm_chain_id = $1
- AND CAST(meta->>'SubId' AS NUMERIC) = $2
- AND state IN ('unconfirmed', 'unstarted', 'in_progress')
- GROUP BY meta->>'SubId'`
-
- V2PlusReservedLinkQuery = `SELECT SUM(CAST(meta->>'MaxLink' AS NUMERIC(78, 0)))
- FROM evm.txes
- WHERE meta->>'MaxLink' IS NOT NULL
- AND evm_chain_id = $1
- AND CAST(meta->>'GlobalSubId' AS NUMERIC) = $2
- AND state IN ('unconfirmed', 'unstarted', 'in_progress')
- GROUP BY meta->>'GlobalSubId'`
-
- V2PlusReservedEthQuery = `SELECT SUM(CAST(meta->>'MaxEth' AS NUMERIC(78, 0)))
- FROM evm.txes
- WHERE meta->>'MaxEth' IS NOT NULL
- AND evm_chain_id = $1
- AND CAST(meta->>'GlobalSubId' AS NUMERIC) = $2
- AND state IN ('unconfirmed', 'unstarted', 'in_progress')
- GROUP BY meta->>'GlobalSubId'`
+ txMetaFieldSubId = "SubId"
+ txMetaGlobalSubId = "GlobalSubId"
CouldNotDetermineIfLogConsumedMsg = "Could not determine if log was already consumed"
)
@@ -116,33 +98,27 @@ func New(
cfg vrfcommon.Config,
feeCfg vrfcommon.FeeConfig,
l logger.Logger,
- ethClient evmclient.Client,
+ chain evm.Chain,
chainID *big.Int,
- logBroadcaster log.Broadcaster,
q pg.Q,
coordinator CoordinatorV2_X,
batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface,
vrfOwner vrf_owner.VRFOwnerInterface,
aggregator *aggregator_v3_interface.AggregatorV3Interface,
- txm txmgr.TxManager,
pipelineRunner pipeline.Runner,
gethks keystore.Eth,
job job.Job,
mailMon *utils.MailboxMonitor,
reqLogs *utils.Mailbox[log.Broadcast],
reqAdded func(),
- respCount map[string]uint64,
- headBroadcaster httypes.HeadBroadcasterRegistry,
deduper *vrfcommon.LogDeduper,
) job.ServiceCtx {
return &listenerV2{
cfg: cfg,
feeCfg: feeCfg,
l: logger.Sugared(l),
- ethClient: ethClient,
+ chain: chain,
chainID: chainID,
- logBroadcaster: logBroadcaster,
- txm: txm,
mailMon: mailMon,
coordinator: coordinator,
batchCoordinator: batchCoordinator,
@@ -154,9 +130,7 @@ func New(
reqLogs: reqLogs,
chStop: make(chan struct{}),
reqAdded: reqAdded,
- respCount: respCount,
blockNumberToReqID: pairing.New(),
- headBroadcaster: headBroadcaster,
latestHeadMu: sync.RWMutex{},
wg: &sync.WaitGroup{},
aggregator: aggregator,
@@ -193,14 +167,12 @@ type vrfPipelineResult struct {
type listenerV2 struct {
services.StateMachine
- cfg vrfcommon.Config
- feeCfg vrfcommon.FeeConfig
- l logger.SugaredLogger
- ethClient evmclient.Client
- chainID *big.Int
- logBroadcaster log.Broadcaster
- txm txmgr.TxManager
- mailMon *utils.MailboxMonitor
+ cfg vrfcommon.Config
+ feeCfg vrfcommon.FeeConfig
+ l logger.SugaredLogger
+ chain evm.Chain
+ chainID *big.Int
+ mailMon *utils.MailboxMonitor
coordinator CoordinatorV2_X
batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface
@@ -229,7 +201,6 @@ type listenerV2 struct {
blockNumberToReqID *pairing.PairHeap
// head tracking data structures
- headBroadcaster httypes.HeadBroadcasterRegistry
latestHeadMu sync.RWMutex
latestHeadNumber uint64
@@ -271,9 +242,9 @@ func (lsn *listenerV2) Start(ctx context.Context) error {
"proofVerificationGas", GasProofVerification)
}
- spec := job.LoadEnvConfigVarsVRF(lsn.cfg, *lsn.job.VRFSpec)
+ spec := job.LoadDefaultVRFPollPeriod(*lsn.job.VRFSpec)
- unsubscribeLogs := lsn.logBroadcaster.Register(lsn, log.ListenerOpts{
+ unsubscribeLogs := lsn.chain.LogBroadcaster().Register(lsn, log.ListenerOpts{
Contract: lsn.coordinator.Address(),
ParseLog: lsn.coordinator.ParseLog,
LogsWithTopics: lsn.coordinator.LogsWithTopics(spec.PublicKey.MustHash()),
@@ -284,11 +255,20 @@ func (lsn *listenerV2) Start(ctx context.Context) error {
ReplayStartedCallback: lsn.ReplayStartedCallback,
})
- latestHead, unsubscribeHeadBroadcaster := lsn.headBroadcaster.Subscribe(lsn)
+ latestHead, unsubscribeHeadBroadcaster := lsn.chain.HeadBroadcaster().Subscribe(lsn)
if latestHead != nil {
lsn.setLatestHead(latestHead)
}
+ lsn.respCountMu.Lock()
+ defer lsn.respCountMu.Unlock()
+ var respCount map[string]uint64
+ respCount, err = lsn.GetStartingResponseCountsV2(ctx)
+ if err != nil {
+ return err
+ }
+ lsn.respCount = respCount
+
// Log listener gathers request logs
lsn.wg.Add(1)
go func() {
@@ -306,6 +286,46 @@ func (lsn *listenerV2) Start(ctx context.Context) error {
})
}
+func (lsn *listenerV2) GetStartingResponseCountsV2(ctx context.Context) (respCount map[string]uint64, err error) {
+ respCounts := map[string]uint64{}
+ var latestBlockNum *big.Int
+ // Retry client call for LatestBlockHeight if fails
+ // Want to avoid failing startup due to potential faulty RPC call
+ err = retry.Do(func() error {
+ latestBlockNum, err = lsn.chain.Client().LatestBlockHeight(ctx)
+ return err
+ }, retry.Attempts(10), retry.Delay(500*time.Millisecond))
+ if err != nil {
+ return nil, err
+ }
+ if latestBlockNum == nil {
+ return nil, errors.New("LatestBlockHeight return nil block num")
+ }
+ confirmedBlockNum := latestBlockNum.Int64() - int64(lsn.chain.Config().EVM().FinalityDepth())
+ // Only check as far back as the evm finality depth for completed transactions.
+ var counts []vrfcommon.RespCountEntry
+ counts, err = vrfcommon.GetRespCounts(ctx, lsn.chain.TxManager(), lsn.chainID, confirmedBlockNum)
+ if err != nil {
+ // Continue with an empty map, do not block job on this.
+ lsn.l.Errorw("Unable to read previous confirmed fulfillments", "err", err)
+ return respCounts, nil
+ }
+
+ for _, c := range counts {
+ // Remove the quotes from the json
+ req := strings.Replace(c.RequestID, `"`, ``, 2)
+ // Remove the 0x prefix
+ b, err := hex.DecodeString(req[2:])
+ if err != nil {
+ lsn.l.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID)
+ continue
+ }
+ bi := new(big.Int).SetBytes(b)
+ respCounts[bi.String()] = uint64(c.Count)
+ }
+ return respCounts, nil
+}
+
func (lsn *listenerV2) setLatestHead(head *evmtypes.Head) {
lsn.latestHeadMu.Lock()
defer lsn.latestHeadMu.Unlock()
@@ -520,68 +540,80 @@ func (lsn *listenerV2) processPendingVRFRequests(ctx context.Context) {
// MaybeSubtractReservedLink figures out how much LINK is reserved for other VRF requests that
// have not been fully confirmed yet on-chain, and subtracts that from the given startBalance,
// and returns that value if there are no errors.
-func MaybeSubtractReservedLink(q pg.Q, startBalance *big.Int, chainID uint64, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) {
- var (
- reservedLink string
- query string
- )
+func (lsn *listenerV2) MaybeSubtractReservedLink(ctx context.Context, startBalance *big.Int, chainID *big.Int, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) {
+ var metaField string
if vrfVersion == vrfcommon.V2Plus {
- query = V2PlusReservedLinkQuery
+ metaField = txMetaGlobalSubId
} else if vrfVersion == vrfcommon.V2 {
- query = V2ReservedLinkQuery
+ metaField = txMetaFieldSubId
} else {
return nil, errors.Errorf("unsupported vrf version %s", vrfVersion)
}
- err := q.Get(&reservedLink, query, chainID, subID.String())
+ txes, err := lsn.chain.TxManager().FindTxesByMetaFieldAndStates(ctx, metaField, subID.String(), reserveEthLinkQueryStates, chainID)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- return nil, errors.Wrap(err, "getting reserved LINK")
+ return nil, errors.Wrap(err, "TXM FindTxesByMetaFieldAndStates failed")
}
- if reservedLink != "" {
- reservedLinkInt, success := big.NewInt(0).SetString(reservedLink, 10)
- if !success {
- return nil, fmt.Errorf("converting reserved LINK %s", reservedLink)
+ reservedLinkSum := big.NewInt(0)
+ // Aggregate non-null MaxLink from all txes returned
+ for _, tx := range txes {
+ var meta *txmgrtypes.TxMeta[common.Address, common.Hash]
+ meta, err = tx.GetMeta()
+ if err != nil {
+ return nil, errors.Wrap(err, "GetMeta for Tx failed")
}
+ if meta != nil && meta.MaxLink != nil {
+ txMaxLink, success := new(big.Int).SetString(*meta.MaxLink, 10)
+ if !success {
+ return nil, fmt.Errorf("converting reserved LINK %s", *meta.MaxLink)
+ }
- return new(big.Int).Sub(startBalance, reservedLinkInt), nil
+ reservedLinkSum.Add(reservedLinkSum, txMaxLink)
+ }
}
- return new(big.Int).Set(startBalance), nil
+ return new(big.Int).Sub(startBalance, reservedLinkSum), nil
}
// MaybeSubtractReservedEth figures out how much ether is reserved for other VRF requests that
// have not been fully confirmed yet on-chain, and subtracts that from the given startBalance,
// and returns that value if there are no errors.
-func MaybeSubtractReservedEth(q pg.Q, startBalance *big.Int, chainID uint64, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) {
- var (
- reservedEther string
- query string
- )
+func (lsn *listenerV2) MaybeSubtractReservedEth(ctx context.Context, startBalance *big.Int, chainID *big.Int, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) {
+ var metaField string
if vrfVersion == vrfcommon.V2Plus {
- query = V2PlusReservedEthQuery
+ metaField = txMetaGlobalSubId
} else if vrfVersion == vrfcommon.V2 {
// native payment is not supported for v2, so returning 0 reserved ETH
return big.NewInt(0), nil
} else {
return nil, errors.Errorf("unsupported vrf version %s", vrfVersion)
}
- err := q.Get(&reservedEther, query, chainID, subID.String())
+ txes, err := lsn.chain.TxManager().FindTxesByMetaFieldAndStates(ctx, metaField, subID.String(), reserveEthLinkQueryStates, chainID)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
- return nil, errors.Wrap(err, "getting reserved ether")
+ return nil, errors.Wrap(err, "TXM FindTxesByMetaFieldAndStates failed")
}
- if reservedEther != "" {
- reservedEtherInt, success := big.NewInt(0).SetString(reservedEther, 10)
- if !success {
- return nil, fmt.Errorf("converting reserved ether %s", reservedEther)
+ reservedEthSum := big.NewInt(0)
+ // Aggregate non-null MaxEth from all txes returned
+ for _, tx := range txes {
+ var meta *txmgrtypes.TxMeta[common.Address, common.Hash]
+ meta, err = tx.GetMeta()
+ if err != nil {
+ return nil, errors.Wrap(err, "GetMeta for Tx failed")
}
+ if meta != nil && meta.MaxEth != nil {
+ txMaxEth, success := new(big.Int).SetString(*meta.MaxEth, 10)
+ if !success {
+ return nil, fmt.Errorf("converting reserved ETH %s", *meta.MaxEth)
+ }
- return new(big.Int).Sub(startBalance, reservedEtherInt), nil
+ reservedEthSum.Add(reservedEthSum, txMaxEth)
+ }
}
if startBalance != nil {
- return new(big.Int).Set(startBalance), nil
+ return new(big.Int).Sub(startBalance, reservedEthSum), nil
}
return big.NewInt(0), nil
}
@@ -812,14 +844,14 @@ func (lsn *listenerV2) processRequestsPerSubBatch(
subIsActive bool,
) map[string]struct{} {
var processed = make(map[string]struct{})
- startBalanceNoReserveLink, err := MaybeSubtractReservedLink(
- lsn.q, startLinkBalance, lsn.chainID.Uint64(), subID, lsn.coordinator.Version())
+ startBalanceNoReserveLink, err := lsn.MaybeSubtractReservedLink(
+ ctx, startLinkBalance, lsn.chainID, subID, lsn.coordinator.Version())
if err != nil {
lsn.l.Errorw("Couldn't get reserved LINK for subscription", "sub", reqs[0].req.SubID(), "err", err)
return processed
}
- startBalanceNoReserveEth, err := MaybeSubtractReservedEth(
- lsn.q, startEthBalance, lsn.chainID.Uint64(), subID, lsn.coordinator.Version())
+ startBalanceNoReserveEth, err := lsn.MaybeSubtractReservedEth(
+ ctx, startEthBalance, lsn.chainID, subID, lsn.coordinator.Version())
if err != nil {
lsn.l.Errorw("Couldn't get reserved ether for subscription", "sub", reqs[0].req.SubID(), "err", err)
return processed
@@ -883,7 +915,7 @@ func (lsn *listenerV2) enqueueForceFulfillment(
// fulfill the request through the VRF owner
err = lsn.q.Transaction(func(tx pg.Queryer) error {
- if err = lsn.logBroadcaster.MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil {
+ if err = lsn.chain.LogBroadcaster().MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil {
return err
}
@@ -901,7 +933,7 @@ func (lsn *listenerV2) enqueueForceFulfillment(
if err != nil {
return errors.Wrap(err, "abi pack VRFOwner.fulfillRandomWords")
}
- estimateGasLimit, err := lsn.ethClient.EstimateGas(ctx, ethereum.CallMsg{
+ estimateGasLimit, err := lsn.chain.Client().EstimateGas(ctx, ethereum.CallMsg{
From: fromAddress,
To: &vrfOwnerAddressSpec,
Data: txData,
@@ -919,7 +951,7 @@ func (lsn *listenerV2) enqueueForceFulfillment(
requestID := common.BytesToHash(p.req.req.RequestID().Bytes())
subID := p.req.req.SubID()
requestTxHash := p.req.req.Raw().TxHash
- etx, err = lsn.txm.CreateTransaction(ctx, txmgr.TxRequest{
+ etx, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: lsn.vrfOwner.Address(),
EncodedPayload: txData,
@@ -943,7 +975,7 @@ func (lsn *listenerV2) enqueueForceFulfillment(
func (lsn *listenerV2) isConsumerValidAfterFinalityDepthElapsed(ctx context.Context, req pendingRequest) bool {
latestHead := lsn.getLatestHead()
if latestHead-req.req.Raw().BlockNumber > uint64(lsn.cfg.FinalityDepth()) {
- code, err := lsn.ethClient.CodeAt(ctx, req.req.Sender(), big.NewInt(int64(latestHead)))
+ code, err := lsn.chain.Client().CodeAt(ctx, req.req.Sender(), big.NewInt(int64(latestHead)))
if err != nil {
lsn.l.Warnw("Failed to fetch contract code", "err", err)
return true // error fetching code, give the benefit of doubt to the consumer
@@ -1103,7 +1135,7 @@ func (lsn *listenerV2) processRequestsPerSubHelper(
if err = lsn.pipelineRunner.InsertFinishedRun(p.run, true, pg.WithQueryer(tx)); err != nil {
return err
}
- if err = lsn.logBroadcaster.MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil {
+ if err = lsn.chain.LogBroadcaster().MarkConsumed(p.req.lb, pg.WithQueryer(tx)); err != nil {
return err
}
@@ -1126,7 +1158,7 @@ func (lsn *listenerV2) processRequestsPerSubHelper(
requestID := common.BytesToHash(p.req.req.RequestID().Bytes())
coordinatorAddress := lsn.coordinator.Address()
requestTxHash := p.req.req.Raw().TxHash
- transaction, err = lsn.txm.CreateTransaction(ctx, txmgr.TxRequest{
+ transaction, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: lsn.coordinator.Address(),
EncodedPayload: hexutil.MustDecode(p.payload),
@@ -1185,15 +1217,15 @@ func (lsn *listenerV2) processRequestsPerSub(
}
var processed = make(map[string]struct{})
- chainId := lsn.ethClient.ConfiguredChainID()
- startBalanceNoReserveLink, err := MaybeSubtractReservedLink(
- lsn.q, startLinkBalance, chainId.Uint64(), subID, lsn.coordinator.Version())
+ chainId := lsn.chain.Client().ConfiguredChainID()
+ startBalanceNoReserveLink, err := lsn.MaybeSubtractReservedLink(
+ ctx, startLinkBalance, chainId, subID, lsn.coordinator.Version())
if err != nil {
lsn.l.Errorw("Couldn't get reserved LINK for subscription", "sub", reqs[0].req.SubID(), "err", err)
return processed
}
- startBalanceNoReserveEth, err := MaybeSubtractReservedEth(
- lsn.q, startEthBalance, lsn.chainID.Uint64(), subID, lsn.coordinator.Version())
+ startBalanceNoReserveEth, err := lsn.MaybeSubtractReservedEth(
+ ctx, startEthBalance, lsn.chainID, subID, lsn.coordinator.Version())
if err != nil {
lsn.l.Errorw("Couldn't get reserved ETH for subscription", "sub", reqs[0].req.SubID(), "err", err)
return processed
@@ -1299,7 +1331,7 @@ func (lsn *listenerV2) checkReqsFulfilled(ctx context.Context, l logger.Logger,
}
}
- err := lsn.ethClient.BatchCallContext(ctx, calls)
+ err := lsn.chain.Client().BatchCallContext(ctx, calls)
if err != nil {
return fulfilled, errors.Wrap(err, "making batch call")
}
@@ -1582,7 +1614,7 @@ func (lsn *listenerV2) getConfirmedAt(req RandomWordsRequested, nodeMinConfs uin
func (lsn *listenerV2) handleLog(lb log.Broadcast, minConfs uint32) {
if v, ok := lb.DecodedLog().(*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled); ok {
lsn.l.Debugw("Received fulfilled log", "reqID", v.RequestId, "success", v.Success)
- consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(lb)
+ consumed, err := lsn.chain.LogBroadcaster().WasAlreadyConsumed(lb)
if err != nil {
lsn.l.Errorw(CouldNotDetermineIfLogConsumedMsg, "err", err, "txHash", lb.RawLog().TxHash)
return
@@ -1602,7 +1634,7 @@ func (lsn *listenerV2) handleLog(lb log.Broadcast, minConfs uint32) {
if v, ok := lb.DecodedLog().(*vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled); ok {
lsn.l.Debugw("Received fulfilled log", "reqID", v.RequestId, "success", v.Success)
- consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(lb)
+ consumed, err := lsn.chain.LogBroadcaster().WasAlreadyConsumed(lb)
if err != nil {
lsn.l.Errorw(CouldNotDetermineIfLogConsumedMsg, "err", err, "txHash", lb.RawLog().TxHash)
return
@@ -1623,7 +1655,7 @@ func (lsn *listenerV2) handleLog(lb log.Broadcast, minConfs uint32) {
req, err := lsn.coordinator.ParseRandomWordsRequested(lb.RawLog())
if err != nil {
lsn.l.Errorw("Failed to parse log", "err", err, "txHash", lb.RawLog().TxHash)
- consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(lb)
+ consumed, err := lsn.chain.LogBroadcaster().WasAlreadyConsumed(lb)
if err != nil {
lsn.l.Errorw(CouldNotDetermineIfLogConsumedMsg, "err", err, "txHash", lb.RawLog().TxHash)
return
@@ -1648,7 +1680,7 @@ func (lsn *listenerV2) handleLog(lb log.Broadcast, minConfs uint32) {
}
func (lsn *listenerV2) markLogAsConsumed(lb log.Broadcast) {
- err := lsn.logBroadcaster.MarkConsumed(lb)
+ err := lsn.chain.LogBroadcaster().MarkConsumed(lb)
lsn.l.ErrorIf(err, fmt.Sprintf("Unable to mark log %v as consumed", lb.String()))
}
diff --git a/core/services/vrf/v2/listener_v2_helpers_test.go b/core/services/vrf/v2/listener_v2_helpers_test.go
index 8ba900bdc3a..fc34a115b1c 100644
--- a/core/services/vrf/v2/listener_v2_helpers_test.go
+++ b/core/services/vrf/v2/listener_v2_helpers_test.go
@@ -7,7 +7,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/v2/core/assets"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
v2 "github.com/smartcontractkit/chainlink/v2/core/services/vrf/v2"
+ "github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon"
)
func TestListener_EstimateFeeJuels(t *testing.T) {
@@ -29,3 +31,23 @@ func TestListener_EstimateFeeJuels(t *testing.T) {
require.Nil(t, actual)
require.Error(t, err)
}
+
+func Test_TxListDeduper(t *testing.T) {
+ tx1 := &txmgr.Tx{
+ ID: 1,
+ Value: *big.NewInt(0),
+ ChainID: big.NewInt(0),
+ }
+ tx2 := &txmgr.Tx{
+ ID: 1,
+ Value: *big.NewInt(1),
+ ChainID: big.NewInt(0),
+ }
+ tx3 := &txmgr.Tx{
+ ID: 2,
+ Value: *big.NewInt(1),
+ ChainID: big.NewInt(0),
+ }
+ txList := vrfcommon.DedupeTxList([]*txmgr.Tx{tx1, tx2, tx3})
+ require.Equal(t, len(txList), 2)
+}
diff --git a/core/services/vrf/v2/listener_v2_test.go b/core/services/vrf/v2/listener_v2_test.go
index 70d5b8154e0..17615feb63a 100644
--- a/core/services/vrf/v2/listener_v2_test.go
+++ b/core/services/vrf/v2/listener_v2_test.go
@@ -1,6 +1,7 @@
package v2
import (
+ "encoding/json"
"math/big"
"sync"
"testing"
@@ -13,39 +14,44 @@ import (
"github.com/stretchr/testify/require"
"github.com/theodesp/go-heaps/pairing"
- "github.com/smartcontractkit/sqlx"
-
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg/datatypes"
"github.com/smartcontractkit/chainlink/v2/core/services/vrf/vrfcommon"
txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/log/mocks"
+ evmmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/mocks"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+ evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
+ clnull "github.com/smartcontractkit/chainlink/v2/core/null"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
- "github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/testdata/testspecs"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
-const (
- addEthTxQuery = `INSERT INTO evm.txes (from_address, to_address, encoded_payload, value, gas_limit, state, created_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id)
- VALUES (
- $1, $2, $3, $4, $5, $6, NOW(), $7, $8, $9, $10, $11
- )
- RETURNING "txes".*`
-
- addConfirmedEthTxQuery = `INSERT INTO evm.txes (nonce, broadcast_at, initial_broadcast_at, error, from_address, to_address, encoded_payload, value, gas_limit, state, created_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id)
- VALUES (
- $1, NOW(), NOW(), NULL, $2, $3, $4, $5, $6, 'confirmed', NOW(), $7, $8, $9, $10, $11
- )
- RETURNING "txes".*`
-)
+func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.Master) txmgrcommon.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] {
+ _, _, evmConfig := txmgr.MakeTestConfigs(t)
+ ec := evmtest.NewEthClientMockWithDefaultChain(t)
+ txmConfig := txmgr.NewEvmTxmConfig(evmConfig)
+ txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil,
+ nil, txStore, nil, nil, nil, nil)
+
+ return txm
+}
+
+func MakeTestListenerV2(chain evm.Chain) *listenerV2 {
+ return &listenerV2{chainID: chain.Client().ConfiguredChainID(), chain: chain}
+}
func txMetaSubIDs(t *testing.T, vrfVersion vrfcommon.Version, subID *big.Int) (*uint64, *string) {
var (
@@ -62,89 +68,118 @@ func txMetaSubIDs(t *testing.T, vrfVersion vrfcommon.Version, subID *big.Int) (*
return txMetaSubID, txMetaGlobalSubID
}
-func addEthTx(t *testing.T, db *sqlx.DB, from common.Address, state txmgrtypes.TxState, maxLink string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) {
+func addEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, state txmgrtypes.TxState, maxLink string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) {
txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID)
- _, err := db.Exec(addEthTxQuery,
- from, // from
- from, // to
- []byte(`blah`), // payload
- 0, // value
- 0, // limit
- state,
- txmgr.TxMeta{
- MaxLink: &maxLink,
- SubID: txMetaSubID,
- GlobalSubID: txMetaGlobalSubID,
- RequestTxHash: &reqTxHash,
- },
- uuid.NullUUID{},
- 1337,
- 0, // confs
- nil)
+ b, err := json.Marshal(txmgr.TxMeta{
+ MaxLink: &maxLink,
+ SubID: txMetaSubID,
+ GlobalSubID: txMetaGlobalSubID,
+ RequestTxHash: &reqTxHash,
+ })
+ require.NoError(t, err)
+ meta := datatypes.JSON(b)
+ tx := &txmgr.Tx{
+ FromAddress: from,
+ ToAddress: from,
+ EncodedPayload: []byte(`blah`),
+ Value: *big.NewInt(0),
+ FeeLimit: 0,
+ State: state,
+ Meta: &meta,
+ Subject: uuid.NullUUID{},
+ ChainID: testutils.SimulatedChainID,
+ MinConfirmations: clnull.Uint32{Uint32: 0},
+ PipelineTaskRunID: uuid.NullUUID{},
+ }
+ err = txStore.InsertTx(tx)
require.NoError(t, err)
}
-func addConfirmedEthTx(t *testing.T, db *sqlx.DB, from common.Address, maxLink string, subID *big.Int, nonce uint64, vrfVersion vrfcommon.Version) {
+func addConfirmedEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, maxLink string, subID *big.Int, nonce evmtypes.Nonce, vrfVersion vrfcommon.Version) {
txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID)
- _, err := db.Exec(addConfirmedEthTxQuery,
- nonce, // nonce
- from, // from
- from, // to
- []byte(`blah`), // payload
- 0, // value
- 0, // limit
- txmgr.TxMeta{
- MaxLink: &maxLink,
- SubID: txMetaSubID,
- GlobalSubID: txMetaGlobalSubID,
- },
- uuid.NullUUID{},
- 1337,
- 0, // confs
- nil)
+ b, err := json.Marshal(txmgr.TxMeta{
+ MaxLink: &maxLink,
+ SubID: txMetaSubID,
+ GlobalSubID: txMetaGlobalSubID,
+ })
+ require.NoError(t, err)
+ meta := datatypes.JSON(b)
+ now := time.Now()
+
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ FromAddress: from,
+ ToAddress: from,
+ EncodedPayload: []byte(`blah`),
+ Value: *big.NewInt(0),
+ FeeLimit: 0,
+ State: txmgrcommon.TxConfirmed,
+ Meta: &meta,
+ Subject: uuid.NullUUID{},
+ ChainID: testutils.SimulatedChainID,
+ MinConfirmations: clnull.Uint32{Uint32: 0},
+ PipelineTaskRunID: uuid.NullUUID{},
+ BroadcastAt: &now,
+ InitialBroadcastAt: &now,
+ }
+ err = txStore.InsertTx(tx)
require.NoError(t, err)
}
-func addEthTxNativePayment(t *testing.T, db *sqlx.DB, from common.Address, state txmgrtypes.TxState, maxNative string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) {
+func addEthTxNativePayment(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, state txmgrtypes.TxState, maxNative string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) {
txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID)
- _, err := db.Exec(addEthTxQuery,
- from, // from
- from, // to
- []byte(`blah`), // payload
- 0, // value
- 0, // limit
- state,
- txmgr.TxMeta{
- MaxEth: &maxNative,
- SubID: txMetaSubID,
- GlobalSubID: txMetaGlobalSubID,
- RequestTxHash: &reqTxHash,
- },
- uuid.NullUUID{},
- 1337,
- 0, // confs
- nil)
+ b, err := json.Marshal(txmgr.TxMeta{
+ MaxEth: &maxNative,
+ SubID: txMetaSubID,
+ GlobalSubID: txMetaGlobalSubID,
+ RequestTxHash: &reqTxHash,
+ })
+ require.NoError(t, err)
+ meta := datatypes.JSON(b)
+ tx := &txmgr.Tx{
+ FromAddress: from,
+ ToAddress: from,
+ EncodedPayload: []byte(`blah`),
+ Value: *big.NewInt(0),
+ FeeLimit: 0,
+ State: state,
+ Meta: &meta,
+ Subject: uuid.NullUUID{},
+ ChainID: testutils.SimulatedChainID,
+ MinConfirmations: clnull.Uint32{Uint32: 0},
+ PipelineTaskRunID: uuid.NullUUID{},
+ }
+ err = txStore.InsertTx(tx)
require.NoError(t, err)
}
-func addConfirmedEthTxNativePayment(t *testing.T, db *sqlx.DB, from common.Address, maxNative string, subID *big.Int, nonce uint64, vrfVersion vrfcommon.Version) {
+func addConfirmedEthTxNativePayment(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, maxNative string, subID *big.Int, nonce evmtypes.Nonce, vrfVersion vrfcommon.Version) {
txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID)
- _, err := db.Exec(addConfirmedEthTxQuery,
- nonce, // nonce
- from, // from
- from, // to
- []byte(`blah`), // payload
- 0, // value
- 0, // limit
- txmgr.TxMeta{
- MaxEth: &maxNative,
- SubID: txMetaSubID,
- GlobalSubID: txMetaGlobalSubID,
- },
- uuid.NullUUID{},
- 1337,
- 0, // confs
- nil)
+ b, err := json.Marshal(txmgr.TxMeta{
+ MaxEth: &maxNative,
+ SubID: txMetaSubID,
+ GlobalSubID: txMetaGlobalSubID,
+ })
+ require.NoError(t, err)
+ meta := datatypes.JSON(b)
+ now := time.Now()
+ tx := &txmgr.Tx{
+ Sequence: &nonce,
+ FromAddress: from,
+ ToAddress: from,
+ EncodedPayload: []byte(`blah`),
+ Value: *big.NewInt(0),
+ FeeLimit: 0,
+ State: txmgrcommon.TxConfirmed,
+ Meta: &meta,
+ Subject: uuid.NullUUID{},
+ ChainID: testutils.SimulatedChainID,
+ MinConfirmations: clnull.Uint32{Uint32: 0},
+ PipelineTaskRunID: uuid.NullUUID{},
+ BroadcastAt: &now,
+ InitialBroadcastAt: &now,
+ }
+ err = txStore.InsertTx(tx)
require.NoError(t, err)
}
@@ -152,57 +187,72 @@ func testMaybeSubtractReservedLink(t *testing.T, vrfVersion vrfcommon.Version) {
db := pgtest.NewSqlxDB(t)
lggr := logger.TestLogger(t)
cfg := pgtest.NewQConfig(false)
- q := pg.NewQ(db, lggr, cfg)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg)
require.NoError(t, ks.Unlock("blah"))
- chainID := uint64(1337)
- k, err := ks.Eth().Create(big.NewInt(int64(chainID)))
+ chainID := testutils.SimulatedChainID
+ k, err := ks.Eth().Create(chainID)
require.NoError(t, err)
subID := new(big.Int).SetUint64(1)
reqTxHash := common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")
+ j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{
+ RequestedConfsDelay: 10,
+ }).Toml())
+ require.NoError(t, err)
+ txstore := txmgr.NewTxStore(db, lggr, cfg)
+ txm := makeTestTxm(t, txstore, ks)
+ chain := evmmocks.NewChain(t)
+ chain.On("TxManager").Return(txm)
+ listener := &listenerV2{
+ respCount: map[string]uint64{},
+ job: j,
+ chain: chain,
+ }
+
+ ctx := testutils.Context(t)
+
// Insert an unstarted eth tx with link metadata
- addEthTx(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err := MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err := listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "90000", start.String())
// A confirmed tx should not affect the starting balance
- addConfirmedEthTx(t, db, k.Address, "10000", subID, 1, vrfVersion)
- start, err = MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addConfirmedEthTx(t, txstore, k.Address, "10000", subID, 1, vrfVersion)
+ start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "90000", start.String())
// An unconfirmed tx _should_ affect the starting balance.
- addEthTx(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "80000", start.String())
// One subscriber's reserved link should not affect other subscribers prospective balance.
otherSubID := new(big.Int).SetUint64(2)
require.NoError(t, err)
- addEthTx(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "80000", start.String())
// One key's data should not affect other keys' data in the case of different subscribers.
- k2, err := ks.Eth().Create(big.NewInt(1337))
+ k2, err := ks.Eth().Create(testutils.SimulatedChainID)
require.NoError(t, err)
anotherSubID := new(big.Int).SetUint64(3)
- addEthTx(t, db, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTx(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "80000", start.String())
// A subscriber's balance is deducted with the link reserved across multiple keys,
// i.e, gas lanes.
- addEthTx(t, db, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedLink(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTx(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "70000", start.String())
}
@@ -219,57 +269,73 @@ func testMaybeSubtractReservedNative(t *testing.T, vrfVersion vrfcommon.Version)
db := pgtest.NewSqlxDB(t)
lggr := logger.TestLogger(t)
cfg := pgtest.NewQConfig(false)
- q := pg.NewQ(db, lggr, cfg)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg)
require.NoError(t, ks.Unlock("blah"))
- chainID := uint64(1337)
- k, err := ks.Eth().Create(big.NewInt(int64(chainID)))
+ chainID := testutils.SimulatedChainID
+ k, err := ks.Eth().Create(chainID)
require.NoError(t, err)
subID := new(big.Int).SetUint64(1)
reqTxHash := common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8")
+ j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{
+ RequestedConfsDelay: 10,
+ }).Toml())
+ require.NoError(t, err)
+ txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg)
+ txm := makeTestTxm(t, txstore, ks)
+ require.NoError(t, err)
+ chain := evmmocks.NewChain(t)
+ chain.On("TxManager").Return(txm)
+ listener := &listenerV2{
+ respCount: map[string]uint64{},
+ job: j,
+ chain: chain,
+ }
+
+ ctx := testutils.Context(t)
+
// Insert an unstarted eth tx with native metadata
- addEthTxNativePayment(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err := MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err := listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "90000", start.String())
// A confirmed tx should not affect the starting balance
- addConfirmedEthTxNativePayment(t, db, k.Address, "10000", subID, 1, vrfVersion)
- start, err = MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addConfirmedEthTxNativePayment(t, txstore, k.Address, "10000", subID, 1, vrfVersion)
+ start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "90000", start.String())
// An unconfirmed tx _should_ affect the starting balance.
- addEthTxNativePayment(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
assert.Equal(t, "80000", start.String())
// One subscriber's reserved native should not affect other subscribers prospective balance.
otherSubID := new(big.Int).SetUint64(2)
require.NoError(t, err)
- addEthTxNativePayment(t, db, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "80000", start.String())
// One key's data should not affect other keys' data in the case of different subscribers.
- k2, err := ks.Eth().Create(big.NewInt(1337))
+ k2, err := ks.Eth().Create(testutils.SimulatedChainID)
require.NoError(t, err)
anotherSubID := new(big.Int).SetUint64(3)
- addEthTxNativePayment(t, db, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTxNativePayment(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "80000", start.String())
// A subscriber's balance is deducted with the native reserved across multiple keys,
// i.e, gas lanes.
- addEthTxNativePayment(t, db, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
- start, err = MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfVersion)
+ addEthTxNativePayment(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion)
+ start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion)
require.NoError(t, err)
require.Equal(t, "70000", start.String())
}
@@ -282,13 +348,26 @@ func TestMaybeSubtractReservedNativeV2(t *testing.T) {
db := pgtest.NewSqlxDB(t)
lggr := logger.TestLogger(t)
cfg := pgtest.NewQConfig(false)
- q := pg.NewQ(db, lggr, cfg)
ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg)
require.NoError(t, ks.Unlock("blah"))
- chainID := uint64(1337)
+ chainID := testutils.SimulatedChainID
subID := new(big.Int).SetUint64(1)
+
+ j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{
+ RequestedConfsDelay: 10,
+ }).Toml())
+ require.NoError(t, err)
+ txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg)
+ txm := makeTestTxm(t, txstore, ks)
+ chain := evmmocks.NewChain(t)
+ chain.On("TxManager").Return(txm).Maybe()
+ listener := &listenerV2{
+ respCount: map[string]uint64{},
+ job: j,
+ chain: chain,
+ }
// returns error because native payment is not supported for V2
- start, err := MaybeSubtractReservedEth(q, big.NewInt(100_000), chainID, subID, vrfcommon.V2)
+ start, err := listener.MaybeSubtractReservedEth(testutils.Context(t), big.NewInt(100_000), chainID, subID, vrfcommon.V2)
require.NoError(t, err)
assert.Equal(t, big.NewInt(0), start)
}
@@ -445,12 +524,14 @@ func TestListener_handleLog(tt *testing.T) {
lb.On("WasAlreadyConsumed", log).Return(false, nil).Once()
lb.On("MarkConsumed", log).Return(nil).Once()
defer lb.AssertExpectations(t)
+ chain := evmmocks.NewChain(t)
+ chain.On("LogBroadcaster").Return(lb)
listener := &listenerV2{
respCount: map[string]uint64{},
job: j,
blockNumberToReqID: pairing.New(),
latestHeadMu: sync.RWMutex{},
- logBroadcaster: lb,
+ chain: chain,
l: logger.TestLogger(t),
}
listener.handleLog(log, minConfs)
@@ -476,12 +557,14 @@ func TestListener_handleLog(tt *testing.T) {
lb.On("WasAlreadyConsumed", log).Return(false, nil).Once()
lb.On("MarkConsumed", log).Return(nil).Once()
defer lb.AssertExpectations(t)
+ chain := evmmocks.NewChain(t)
+ chain.On("LogBroadcaster").Return(lb)
listener := &listenerV2{
respCount: map[string]uint64{},
job: j,
blockNumberToReqID: pairing.New(),
latestHeadMu: sync.RWMutex{},
- logBroadcaster: lb,
+ chain: chain,
l: logger.TestLogger(t),
}
listener.handleLog(log, minConfs)
diff --git a/core/services/vrf/v2/listener_v2_types.go b/core/services/vrf/v2/listener_v2_types.go
index e0596abcd1a..5ad44c31a8b 100644
--- a/core/services/vrf/v2/listener_v2_types.go
+++ b/core/services/vrf/v2/listener_v2_types.go
@@ -170,7 +170,7 @@ func (lsn *listenerV2) processBatch(
return errors.Wrap(err, "inserting finished pipeline runs")
}
- if err = lsn.logBroadcaster.MarkManyConsumed(batch.lbs, pg.WithQueryer(tx)); err != nil {
+ if err = lsn.chain.LogBroadcaster().MarkManyConsumed(batch.lbs, pg.WithQueryer(tx)); err != nil {
return errors.Wrap(err, "mark logs consumed")
}
@@ -181,7 +181,7 @@ func (lsn *listenerV2) processBatch(
for _, reqID := range batch.reqIDs {
reqIDHashes = append(reqIDHashes, common.BytesToHash(reqID.Bytes()))
}
- ethTX, err = lsn.txm.CreateTransaction(ctx, txmgr.TxRequest{
+ ethTX, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{
FromAddress: fromAddress,
ToAddress: lsn.batchCoordinator.Address(),
EncodedPayload: payload,
@@ -234,7 +234,7 @@ func (lsn *listenerV2) getUnconsumed(l logger.Logger, reqs []pendingRequest) (un
// This check to see if the log was consumed needs to be in the same
// goroutine as the mark consumed to avoid processing duplicates.
- consumed, err := lsn.logBroadcaster.WasAlreadyConsumed(req.lb)
+ consumed, err := lsn.chain.LogBroadcaster().WasAlreadyConsumed(req.lb)
if err != nil {
// Do not process for now, retry on next iteration.
l.Errorw("Could not determine if log was already consumed",
diff --git a/core/services/vrf/vrfcommon/utils.go b/core/services/vrf/vrfcommon/utils.go
new file mode 100644
index 00000000000..f9cc012d8fb
--- /dev/null
+++ b/core/services/vrf/vrfcommon/utils.go
@@ -0,0 +1,78 @@
+package vrfcommon
+
+import (
+ "context"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/pkg/errors"
+
+ txmgrcommon "github.com/smartcontractkit/chainlink/v2/common/txmgr"
+ txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr"
+)
+
+type RespCountEntry struct {
+ RequestID string
+ Count int
+}
+
+func GetRespCounts(ctx context.Context, txm txmgr.TxManager, chainID *big.Int, confirmedBlockNum int64) (
+ []RespCountEntry,
+ error,
+) {
+ counts := []RespCountEntry{}
+ metaField := "RequestID"
+ states := []txmgrtypes.TxState{txmgrcommon.TxUnconfirmed, txmgrcommon.TxUnstarted, txmgrcommon.TxInProgress}
+ // Search for txes with a non-null meta field in the provided states
+ unconfirmedTxes, err := txm.FindTxesWithMetaFieldByStates(ctx, metaField, states, chainID)
+ if err != nil {
+ return nil, errors.Wrap(err, "getRespCounts failed due to error in FindTxesWithMetaFieldByStates")
+ }
+ // Fetch completed transactions only as far back as the given confirmedBlockNum. This avoids
+ // a table scan of the whole table, which could be large if it is unpruned.
+ var confirmedTxes []*txmgr.Tx
+ confirmedTxes, err = txm.FindTxesWithMetaFieldByReceiptBlockNum(ctx, metaField, confirmedBlockNum, chainID)
+ if err != nil {
+ return nil, errors.Wrap(err, "getRespCounts failed due to error in FindTxesWithMetaFieldByReceiptBlockNum")
+ }
+ txes := DedupeTxList(append(unconfirmedTxes, confirmedTxes...))
+ respCountMap := make(map[string]int)
+ // Consolidate the number of txes for each meta RequestID
+ for _, tx := range txes {
+ var meta *txmgrtypes.TxMeta[common.Address, common.Hash]
+ meta, err = tx.GetMeta()
+ if err != nil {
+ return nil, errors.Wrap(err, "getRespCounts failed parsing tx meta field")
+ }
+ if meta != nil && meta.RequestID != nil {
+ requestId := meta.RequestID.String()
+ if _, exists := respCountMap[requestId]; !exists {
+ respCountMap[requestId] = 0
+ }
+ respCountMap[requestId]++
+ }
+ }
+
+ // Parse response count map into output
+ for key, value := range respCountMap {
+ respCountEntry := RespCountEntry{
+ RequestID: key,
+ Count: value,
+ }
+ counts = append(counts, respCountEntry)
+ }
+ return counts, nil
+}
+
+func DedupeTxList(txes []*txmgr.Tx) []*txmgr.Tx {
+ txIdMap := make(map[string]bool)
+ dedupedTxes := []*txmgr.Tx{}
+ for _, tx := range txes {
+ if _, found := txIdMap[tx.GetID()]; !found {
+ txIdMap[tx.GetID()] = true
+ dedupedTxes = append(dedupedTxes, tx)
+ }
+ }
+ return dedupedTxes
+}
diff --git a/core/services/webhook/authorizer_test.go b/core/services/webhook/authorizer_test.go
index 19dbf381408..b6eb2feaccb 100644
--- a/core/services/webhook/authorizer_test.go
+++ b/core/services/webhook/authorizer_test.go
@@ -3,7 +3,7 @@ package webhook_test
import (
"testing"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
diff --git a/core/services/webhook/external_initiator_manager.go b/core/services/webhook/external_initiator_manager.go
index 2e881ec42d6..01edf82b114 100644
--- a/core/services/webhook/external_initiator_manager.go
+++ b/core/services/webhook/external_initiator_manager.go
@@ -10,7 +10,7 @@ import (
"github.com/lib/pq"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/logger"
diff --git a/core/sessions/authentication.go b/core/sessions/authentication.go
new file mode 100644
index 00000000000..0f0dda3bf33
--- /dev/null
+++ b/core/sessions/authentication.go
@@ -0,0 +1,66 @@
+package sessions
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/smartcontractkit/chainlink/v2/core/auth"
+ "github.com/smartcontractkit/chainlink/v2/core/bridges"
+)
+
+// Application config constant options
+type AuthenticationProviderName string
+
+const (
+ LocalAuth AuthenticationProviderName = "local"
+ LDAPAuth AuthenticationProviderName = "ldap"
+)
+
+// ErrUserSessionExpired defines the error triggered when the user session has expired
+var ErrUserSessionExpired = errors.New("session missing or expired, please login again")
+
+// ErrNotSupported defines the error where interface functionality doesn't align with the underlying Auth Provider
+var ErrNotSupported = fmt.Errorf("functionality not supported with current authentication provider: %w", errors.ErrUnsupported)
+
+// ErrEmptySessionID captures the empty case error message
+var ErrEmptySessionID = errors.New("session ID cannot be empty")
+
+//go:generate mockery --quiet --name BasicAdminUsersORM --output ./mocks/ --case=underscore
+
+// BasicAdminUsersORM is the interface that defines the functionality required for supporting basic admin functionality
+// adjacent to the identity provider authentication provider implementation. It is currently implemented by the local
+// users/sessions ORM containing local admin CLI actions. This is separate from the AuthenticationProvider,
+// as local admin management (ie initial core node setup, initial admin user creation), is always
+// required no matter what the pluggable AuthenticationProvider implementation is.
+type BasicAdminUsersORM interface {
+ ListUsers() ([]User, error)
+ CreateUser(user *User) error
+ FindUser(email string) (User, error)
+}
+
+//go:generate mockery --quiet --name AuthenticationProvider --output ./mocks/ --case=underscore
+
+// AuthenticationProvider is an interface that abstracts the required application calls to a user management backend
+// Currently localauth (users table DB) or LDAP server (readonly)
+type AuthenticationProvider interface {
+ FindUser(email string) (User, error)
+ FindUserByAPIToken(apiToken string) (User, error)
+ ListUsers() ([]User, error)
+ AuthorizedUserWithSession(sessionID string) (User, error)
+ DeleteUser(email string) error
+ DeleteUserSession(sessionID string) error
+ CreateSession(sr SessionRequest) (string, error)
+ ClearNonCurrentSessions(sessionID string) error
+ CreateUser(user *User) error
+ UpdateRole(email, newRole string) (User, error)
+ SetAuthToken(user *User, token *auth.Token) error
+ CreateAndSetAuthToken(user *User) (*auth.Token, error)
+ DeleteAuthToken(user *User) error
+ SetPassword(user *User, newPassword string) error
+ TestPassword(email, password string) error
+ Sessions(offset, limit int) ([]Session, error)
+ GetUserWebAuthn(email string) ([]WebAuthn, error)
+ SaveWebAuthn(token *WebAuthn) error
+
+ FindExternalInitiator(eia *auth.Token) (initiator *bridges.ExternalInitiator, err error)
+}
diff --git a/core/sessions/ldapauth/client.go b/core/sessions/ldapauth/client.go
new file mode 100644
index 00000000000..bb259f8c9a2
--- /dev/null
+++ b/core/sessions/ldapauth/client.go
@@ -0,0 +1,47 @@
+package ldapauth
+
+import (
+ "fmt"
+
+ "github.com/go-ldap/ldap/v3"
+
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+)
+
+type ldapClient struct {
+ config config.LDAP
+}
+
+//go:generate mockery --quiet --name LDAPClient --output ./mocks/ --case=underscore
+
+// Wrapper for creating a handle to a *ldap.Conn/LDAPConn interface
+type LDAPClient interface {
+ CreateEphemeralConnection() (LDAPConn, error)
+}
+
+//go:generate mockery --quiet --name LDAPConn --output ./mocks/ --case=underscore
+
+// Wrapper for ldap connection and mock testing, implemented by *ldap.Conn
+type LDAPConn interface {
+ Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error)
+ Bind(username string, password string) error
+ Close() (err error)
+}
+
+func newLDAPClient(config config.LDAP) LDAPClient {
+ return &ldapClient{config}
+}
+
+// CreateEphemeralConnection returns a valid, active LDAP connection for upstream Search and Bind queries
+func (l *ldapClient) CreateEphemeralConnection() (LDAPConn, error) {
+ conn, err := ldap.DialURL(l.config.ServerAddress())
+ if err != nil {
+ return nil, fmt.Errorf("failed to Dial LDAP Server: %w", err)
+ }
+ // Root level root user auth with credentials provided from config
+ bindStr := l.config.BaseUserAttr() + "=" + l.config.ReadOnlyUserLogin() + "," + l.config.BaseDN()
+ if err := conn.Bind(bindStr, l.config.ReadOnlyUserPass()); err != nil {
+ return nil, fmt.Errorf("unable to login as initial root LDAP user: %w", err)
+ }
+ return conn, nil
+}
diff --git a/core/sessions/ldapauth/helpers_test.go b/core/sessions/ldapauth/helpers_test.go
new file mode 100644
index 00000000000..3566ea84380
--- /dev/null
+++ b/core/sessions/ldapauth/helpers_test.go
@@ -0,0 +1,131 @@
+package ldapauth
+
+import (
+ "time"
+
+ "github.com/jmoiron/sqlx"
+
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
+)
+
+// Returns an instantiated ldapAuthenticator struct without validation for testing
+func NewTestLDAPAuthenticator(
+ db *sqlx.DB,
+ pgCfg pg.QConfig,
+ ldapCfg config.LDAP,
+ dev bool,
+ lggr logger.Logger,
+ auditLogger audit.AuditLogger,
+) (*ldapAuthenticator, error) {
+ namedLogger := lggr.Named("LDAPAuthenticationProvider")
+ ldapAuth := ldapAuthenticator{
+ q: pg.NewQ(db, namedLogger, pgCfg),
+ ldapClient: newLDAPClient(ldapCfg),
+ config: ldapCfg,
+ lggr: lggr.Named("LDAPAuthenticationProvider"),
+ auditLogger: auditLogger,
+ }
+
+ return &ldapAuth, nil
+}
+
+// Default server group name mappings for test config and mocked ldap search results
+const (
+ NodeAdminsGroupCN = "NodeAdmins"
+ NodeEditorsGroupCN = "NodeEditors"
+ NodeRunnersGroupCN = "NodeRunners"
+ NodeReadOnlyGroupCN = "NodeReadOnly"
+)
+
+// Implement a setter function within the _test file so that the ldapauth_test module can set the unexported field with a mock
+func (l *ldapAuthenticator) SetLDAPClient(newClient LDAPClient) {
+ l.ldapClient = newClient
+}
+
+// Implements config.LDAP
+type TestConfig struct {
+}
+
+func (t *TestConfig) ServerAddress() string {
+ return "ldaps://MOCK"
+}
+
+func (t *TestConfig) ReadOnlyUserLogin() string {
+ return "mock-readonly"
+}
+
+func (t *TestConfig) ReadOnlyUserPass() string {
+ return "mock-password"
+}
+
+func (t *TestConfig) ServerTLS() bool {
+ return false
+}
+
+func (t *TestConfig) SessionTimeout() models.Duration {
+ return models.MustMakeDuration(time.Duration(0))
+}
+
+func (t *TestConfig) QueryTimeout() time.Duration {
+ return time.Duration(0)
+}
+
+func (t *TestConfig) UserAPITokenDuration() models.Duration {
+ return models.MustMakeDuration(time.Duration(0))
+}
+
+func (t *TestConfig) BaseUserAttr() string {
+ return "uid"
+}
+
+func (t *TestConfig) BaseDN() string {
+ return "dc=custom,dc=example,dc=com"
+}
+
+func (t *TestConfig) UsersDN() string {
+ return "ou=users"
+}
+
+func (t *TestConfig) GroupsDN() string {
+ return "ou=groups"
+}
+
+func (t *TestConfig) ActiveAttribute() string {
+ return "organizationalStatus"
+}
+
+func (t *TestConfig) ActiveAttributeAllowedValue() string {
+ return "ACTIVE"
+}
+
+func (t *TestConfig) AdminUserGroupCN() string {
+ return NodeAdminsGroupCN
+}
+
+func (t *TestConfig) EditUserGroupCN() string {
+ return NodeEditorsGroupCN
+}
+
+func (t *TestConfig) RunUserGroupCN() string {
+ return NodeRunnersGroupCN
+}
+
+func (t *TestConfig) ReadUserGroupCN() string {
+ return NodeReadOnlyGroupCN
+}
+
+func (t *TestConfig) UserApiTokenEnabled() bool {
+ return true
+}
+
+func (t *TestConfig) UpstreamSyncInterval() models.Duration {
+ return models.MustMakeDuration(time.Duration(0))
+}
+
+func (t *TestConfig) UpstreamSyncRateLimit() models.Duration {
+ return models.MustMakeDuration(time.Duration(0))
+}
diff --git a/core/sessions/ldapauth/ldap.go b/core/sessions/ldapauth/ldap.go
new file mode 100644
index 00000000000..04f6fbfbbb6
--- /dev/null
+++ b/core/sessions/ldapauth/ldap.go
@@ -0,0 +1,858 @@
+/*
+The LDAP authentication package forwards the credentials in the user session request
+for authentication with a configured upstream LDAP server
+
+This package relies on the two following local database tables:
+
+ ldap_sessions: Upon successful LDAP response, creates a keyed local copy of the user email
+ ldap_user_api_tokens: User created API tokens, tied to the node, storing user email.
+
+Note: user can have only one API token at a time, and token expiration is enforced
+
+User session and roles are cached and revalidated with the upstream service at the interval defined in
+the local LDAP config through the Application.sessionReaper implementation in reaper.go.
+
+Changes to the upstream identity server will propagate through and update local tables (web sessions, API tokens)
+by either removing the entries or updating the roles. This sync happens for every auth endpoint hit, and
+via the defined sync interval. One goroutine is created to coordinate the sync timing in the New function
+
+This implementation is read only; user mutation actions such as Delete are not supported.
+
+MFA is supported via the remote LDAP server implementation. Sufficient request time out should accommodate
+for a blocking auth call while the user responds to a potential push notification callback.
+*/
+package ldapauth
+
+import (
+ "crypto/subtle"
+ "database/sql"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/go-ldap/ldap/v3"
+ "github.com/jmoiron/sqlx"
+
+ "github.com/smartcontractkit/chainlink/v2/core/auth"
+ "github.com/smartcontractkit/chainlink/v2/core/bridges"
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+ "github.com/smartcontractkit/chainlink/v2/core/utils/mathutil"
+)
+
+const (
+ UniqueMemberAttribute = "uniqueMember"
+)
+
+var ErrUserNotInUpstream = errors.New("LDAP query returned no matching users")
+var ErrUserNoLDAPGroups = errors.New("user present in directory, but matching no role groups assigned")
+
+type ldapAuthenticator struct {
+ q pg.Q
+ ldapClient LDAPClient
+ config config.LDAP
+ lggr logger.Logger
+ auditLogger audit.AuditLogger
+}
+
+// ldapAuthenticator implements sessions.AuthenticationProvider interface
+var _ sessions.AuthenticationProvider = (*ldapAuthenticator)(nil)
+
+func NewLDAPAuthenticator(
+ db *sqlx.DB,
+ pgCfg pg.QConfig,
+ ldapCfg config.LDAP,
+ dev bool,
+ lggr logger.Logger,
+ auditLogger audit.AuditLogger,
+) (*ldapAuthenticator, error) {
+ namedLogger := lggr.Named("LDAPAuthenticationProvider")
+
+ // If not chainlink dev and not tls, error
+ if !dev && !ldapCfg.ServerTLS() {
+ return nil, errors.New("LDAP Authentication driver requires TLS when running in Production mode")
+ }
+
+ // Ensure all RBAC role mappings to LDAP Groups are defined, and required fields populated, or error on startup
+ if ldapCfg.AdminUserGroupCN() == "" || ldapCfg.EditUserGroupCN() == "" ||
+ ldapCfg.RunUserGroupCN() == "" || ldapCfg.ReadUserGroupCN() == "" {
+ return nil, errors.New("LDAP Group mapping from server group name for all local RBAC role required. Set group names for `_UserGroupCN` fields")
+ }
+ if ldapCfg.ServerAddress() == "" {
+ return nil, errors.New("LDAP ServerAddress config required")
+ }
+ if ldapCfg.ReadOnlyUserLogin() == "" {
+ return nil, errors.New("LDAP ReadOnlyUserLogin config required")
+ }
+
+ ldapAuth := ldapAuthenticator{
+ q: pg.NewQ(db, namedLogger, pgCfg),
+ ldapClient: newLDAPClient(ldapCfg),
+ config: ldapCfg,
+ lggr: lggr.Named("LDAPAuthenticationProvider"),
+ auditLogger: auditLogger,
+ }
+
+ // Single override of library defined global
+ ldap.DefaultTimeout = ldapCfg.QueryTimeout()
+
+ // Test initial connection and credentials
+ lggr.Infof("Attempting initial connection to configured LDAP server with bind as API user")
+ conn, err := ldapAuth.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ return nil, fmt.Errorf("unable to establish connection to LDAP server with provided URL and credentials: %w", err)
+ }
+ conn.Close()
+
+ // Store LDAP connection config for auth/new connection per request instead of persisted connection with reconnect
+ return &ldapAuth, nil
+}
+
+// FindUser will attempt to return an LDAP user with mapped role by email.
+func (l *ldapAuthenticator) FindUser(email string) (sessions.User, error) {
+ email = strings.ToLower(email)
+ foundUser := sessions.User{}
+
+ // First check for the supported local admin users table
+ var foundLocalAdminUser sessions.User
+ checkErr := l.q.Transaction(func(tx pg.Queryer) error {
+ sql := "SELECT * FROM users WHERE lower(email) = lower($1)"
+ return tx.Get(&foundLocalAdminUser, sql, email)
+ })
+ if checkErr != nil {
+ // If error is not nil, there was either an issue or no local users found
+ if !errors.Is(checkErr, sql.ErrNoRows) {
+ // If the error is not that no local user was found, log and exit
+ l.lggr.Errorf("error searching users table: %v", checkErr)
+ return sessions.User{}, errors.New("error Finding user")
+ }
+ } else {
+ // Error was nil, local user found. Return
+ return foundLocalAdminUser, nil
+ }
+
+ // First query for user "is active" property if defined
+ usersActive, err := l.validateUsersActive([]string{email})
+ if err != nil {
+ if errors.Is(err, ErrUserNotInUpstream) {
+ return foundUser, ErrUserNotInUpstream
+ }
+ l.lggr.Errorf("error in validateUsers call: %v", err)
+ return foundUser, errors.New("error running query to validate user active")
+ }
+ if !usersActive[0] {
+ return foundUser, errors.New("user not active")
+ }
+
+ conn, err := l.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ l.lggr.Errorf("error in LDAP dial: ", err)
+ return foundUser, errors.New("unable to establish connection to LDAP server with provided URL and credentials")
+ }
+ defer conn.Close()
+
+ // User email and role are the only upstream data that needs queried for.
+ // List query user groups using the provided email, on success is a list of group the uniquemember belongs to
+ // data is readily available
+ escapedEmail := ldap.EscapeFilter(email)
+ searchBaseDN := fmt.Sprintf("%s, %s", l.config.GroupsDN(), l.config.BaseDN())
+ filterQuery := fmt.Sprintf("(&(uniquemember=%s=%s,%s,%s))", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN())
+ searchRequest := ldap.NewSearchRequest(
+ searchBaseDN,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
+ 0, int(l.config.QueryTimeout().Seconds()), false,
+ filterQuery,
+ []string{"cn"},
+ nil,
+ )
+
+ // Query the server
+ result, err := conn.Search(searchRequest)
+ if err != nil {
+ l.lggr.Errorf("error searching users in LDAP query: %v", err)
+ return foundUser, errors.New("error searching users in LDAP directory")
+ }
+
+ if len(result.Entries) == 0 {
+ // Provided email is not present in upstream LDAP server, local admin CLI auth is supported
+ // So query and check the users table as well before failing
+ if err = l.q.Transaction(func(tx pg.Queryer) error {
+ var localUserRole sessions.UserRole
+ if err = tx.Get(&localUserRole, "SELECT role FROM users WHERE email = $1", email); err != nil {
+ return err
+ }
+ foundUser = sessions.User{
+ Email: email,
+ Role: localUserRole,
+ }
+ return nil
+ }); err != nil {
+ // Above query for local user unsuccessful, return error
+ l.lggr.Warnf("No local users table user found with email %s", email)
+ return foundUser, errors.New("no users found with provided email")
+ }
+
+ // If the above query to the local users table was successful, return that local user's role
+ return foundUser, nil
+ }
+
+ // Populate found user by email and role based on matched group names
+ userRole, err := l.groupSearchResultsToUserRole(result.Entries)
+ if err != nil {
+ l.lggr.Warnf("User '%s' found but no matching assigned groups in LDAP to assume role", email)
+ return sessions.User{}, err
+ }
+
+ // Convert search result to sessions.User type with required fields
+ foundUser = sessions.User{
+ Email: email,
+ Role: userRole,
+ }
+
+ return foundUser, nil
+}
+
+// FindUserByAPIToken retrieves a possible stored user and role from the ldap_user_api_tokens table store
+func (l *ldapAuthenticator) FindUserByAPIToken(apiToken string) (sessions.User, error) {
+ if !l.config.UserApiTokenEnabled() {
+ return sessions.User{}, errors.New("API token is not enabled ")
+ }
+
+ var foundUser sessions.User
+ err := l.q.Transaction(func(tx pg.Queryer) error {
+ // Query the ldap user API token table for given token, user role and email are cached so
+ // no further upstream LDAP query is performed, sessions and tokens are synced against the upstream server
+ // via the UpstreamSyncInterval config and reaper.go sync implementation
+ var foundUserToken struct {
+ UserEmail string
+ UserRole sessions.UserRole
+ Valid bool
+ }
+ if err := tx.Get(&foundUserToken,
+ "SELECT user_email, user_role, created_at + $2 >= now() as valid FROM ldap_user_api_tokens WHERE token_key = $1",
+ apiToken, l.config.UserAPITokenDuration().Duration(),
+ ); err != nil {
+ return err
+ }
+ if !foundUserToken.Valid {
+ return sessions.ErrUserSessionExpired
+ }
+ foundUser = sessions.User{
+ Email: foundUserToken.UserEmail,
+ Role: foundUserToken.UserRole,
+ }
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, sessions.ErrUserSessionExpired) {
+ // API Token expired, purge
+ if _, execErr := l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE token_key = $1", apiToken); err != nil {
+ l.lggr.Errorf("error purging stale ldap API token session: %v", execErr)
+ }
+ }
+ return sessions.User{}, err
+ }
+ return foundUser, nil
+}
+
+// ListUsers will load and return all active users in applicable LDAP groups, extended with local admin users as well
+func (l *ldapAuthenticator) ListUsers() ([]sessions.User, error) {
+ // For each defined role/group, query for the list of group members to gather the full list of possible users
+ users := []sessions.User{}
+ var err error
+
+ conn, err := l.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ l.lggr.Errorf("error in LDAP dial: ", err)
+ return users, errors.New("unable to establish connection to LDAP server with provided URL and credentials")
+ }
+ defer conn.Close()
+
+ // Query for list of uniqueMember IDs present in Admin group
+ adminUsers, err := l.ldapGroupMembersListToUser(conn, l.config.AdminUserGroupCN(), sessions.UserRoleAdmin)
+ if err != nil {
+ l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err)
+ return users, errors.New("unable to list group users")
+ }
+ // Query for list of uniqueMember IDs present in Edit group
+ editUsers, err := l.ldapGroupMembersListToUser(conn, l.config.EditUserGroupCN(), sessions.UserRoleEdit)
+ if err != nil {
+ l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err)
+ return users, errors.New("unable to list group users")
+ }
+ // Query for list of uniqueMember IDs present in Run group
+ runUsers, err := l.ldapGroupMembersListToUser(conn, l.config.RunUserGroupCN(), sessions.UserRoleRun)
+ if err != nil {
+ l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err)
+ return users, errors.New("unable to list group users")
+ }
+ // Query for list of uniqueMember IDs present in Read group
+ readUsers, err := l.ldapGroupMembersListToUser(conn, l.config.ReadUserGroupCN(), sessions.UserRoleView)
+ if err != nil {
+ l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err)
+ return users, errors.New("unable to list group users")
+ }
+
+ // Aggregate full list
+ users = append(users, adminUsers...)
+ users = append(users, editUsers...)
+ users = append(users, runUsers...)
+ users = append(users, readUsers...)
+
+ // Dedupe preserving order of highest role
+ uniqueRef := make(map[string]struct{})
+ dedupedUsers := []sessions.User{}
+ for _, user := range users {
+ if _, ok := uniqueRef[user.Email]; !ok {
+ uniqueRef[user.Email] = struct{}{}
+ dedupedUsers = append(dedupedUsers, user)
+ }
+ }
+
+ // If no active attribute to check is defined, user simple being assigned the group is enough, return full list
+ if l.config.ActiveAttribute() == "" {
+ return dedupedUsers, nil
+ }
+
+ // Now optionally validate that all uniqueMembers are active in the org/LDAP server
+ emails := []string{}
+ for _, user := range dedupedUsers {
+ emails = append(emails, user.Email)
+ }
+ activeUsers, err := l.validateUsersActive(emails)
+ if err != nil {
+ l.lggr.Errorf("error validating supplied user list: ", err)
+ return users, errors.New("error validating supplied user list")
+ }
+
+ // Filter non active users
+ returnUsers := []sessions.User{}
+ for i, active := range activeUsers {
+ if active {
+ returnUsers = append(returnUsers, dedupedUsers[i])
+ }
+ }
+
+ // Extend with local admin users
+ var localAdminUsers []sessions.User
+ if err := l.q.Transaction(func(tx pg.Queryer) error {
+ sql := "SELECT * FROM users ORDER BY email ASC;"
+ return tx.Select(&localAdminUsers, sql)
+ }); err != nil {
+ l.lggr.Errorf("error extending upstream LDAP users with local admin users in users table: ", err)
+ } else {
+ returnUsers = append(returnUsers, localAdminUsers...)
+ }
+
+ return returnUsers, nil
+}
+
+// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group
+func (l *ldapAuthenticator) ldapGroupMembersListToUser(conn LDAPConn, groupNameCN string, roleToAssign sessions.UserRole) ([]sessions.User, error) {
+ users, err := ldapGroupMembersListToUser(
+ conn, groupNameCN, roleToAssign, l.config.GroupsDN(),
+ l.config.BaseDN(), l.config.QueryTimeout(),
+ l.lggr,
+ )
+ if err != nil {
+ l.lggr.Errorf("error listing members of group (%s): %v", groupNameCN, err)
+ return users, errors.New("error searching group members in LDAP directory")
+ }
+ return users, nil
+}
+
+// AuthorizedUserWithSession will return the API user associated with the Session ID if it
+// exists and hasn't expired, and update session's LastUsed field. The state of the upstream LDAP server
+// is polled and synced at the defined interval via a SleeperTask
+func (l *ldapAuthenticator) AuthorizedUserWithSession(sessionID string) (sessions.User, error) {
+ if len(sessionID) == 0 {
+ return sessions.User{}, errors.New("session ID cannot be empty")
+ }
+ var foundUser sessions.User
+ err := l.q.Transaction(func(tx pg.Queryer) error {
+ // Query the ldap_sessions table for given session ID, user role and email are cached so
+ // no further upstream LDAP query is performed
+ var foundSession struct {
+ UserEmail string
+ UserRole sessions.UserRole
+ Valid bool
+ }
+ if err := tx.Get(&foundSession,
+ "SELECT user_email, user_role, created_at + $2 >= now() as valid FROM ldap_sessions WHERE id = $1",
+ sessionID, l.config.SessionTimeout().Duration(),
+ ); err != nil {
+ return sessions.ErrUserSessionExpired
+ }
+ if !foundSession.Valid {
+ // Sessions expired, purge
+ return sessions.ErrUserSessionExpired
+ }
+ foundUser = sessions.User{
+ Email: foundSession.UserEmail,
+ Role: foundSession.UserRole,
+ }
+ return nil
+ })
+ if err != nil {
+ if errors.Is(err, sessions.ErrUserSessionExpired) {
+ if _, execErr := l.q.Exec("DELETE FROM ldap_sessions WHERE id = $1", sessionID); err != nil {
+ l.lggr.Errorf("error purging stale ldap session: %v", execErr)
+ }
+ }
+ return sessions.User{}, err
+ }
+ return foundUser, nil
+}
+
+// DeleteUser is not supported for read only LDAP
+func (l *ldapAuthenticator) DeleteUser(email string) error {
+ return sessions.ErrNotSupported
+}
+
+// DeleteUserSession removes an ldapSession table entry by ID
+func (l *ldapAuthenticator) DeleteUserSession(sessionID string) error {
+ _, err := l.q.Exec("DELETE FROM ldap_sessions WHERE id = $1", sessionID)
+ return err
+}
+
+// GetUserWebAuthn returns an empty stub, MFA token prompt is handled either by the upstream
+// server blocking callback, or an error code to pass a OTP
+func (l *ldapAuthenticator) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) {
+ return []sessions.WebAuthn{}, nil
+}
+
+// CreateSession will forward the session request credentials to the
+// LDAP server, querying for a user + role response if username and
+// password match. The API call is blocking with timeout, so a sufficient timeout
+// should allow the user to respond to potential MFA push notifications
+func (l *ldapAuthenticator) CreateSession(sr sessions.SessionRequest) (string, error) {
+ conn, err := l.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ return "", errors.New("unable to establish connection to LDAP server with provided URL and credentials")
+ }
+ defer conn.Close()
+
+ var returnErr error
+
+ // Attempt to LDAP Bind with user provided credentials
+ escapedEmail := ldap.EscapeFilter(strings.ToLower(sr.Email))
+ searchBaseDN := fmt.Sprintf("%s=%s,%s,%s", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN())
+ if err = conn.Bind(searchBaseDN, sr.Password); err != nil {
+ l.lggr.Infof("Error binding user authentication request in LDAP Bind: %v", err)
+ returnErr = errors.New("unable to log in with LDAP server. Check credentials")
+ }
+
+ // Bind was successful meaning user and credentials are present in LDAP directory
+ // Reuse FindUser functionality to fetch user roles used to create ldap_session entry
+ // with cached user email and role
+ foundUser, err := l.FindUser(escapedEmail)
+ if err != nil {
+ l.lggr.Infof("Successful user login, but error querying for user groups: user: %s, error %v", escapedEmail, err)
+ returnErr = errors.New("log in successful, but no assigned groups to assume role")
+ }
+
+ isLocalUser := false
+ if returnErr != nil {
+ // Unable to log in against LDAP server, attempt fallback local auth with credentials, case of local CLI Admin account
+ // Successful local user sessions can not be managed by the upstream server and have expiration handled by the reaper sync module
+ foundUser, returnErr = l.localLoginFallback(sr)
+ isLocalUser = true
+ }
+
+ // If err is still populated, return
+ if returnErr != nil {
+ return "", returnErr
+ }
+
+ l.lggr.Infof("Successful LDAP login request for user %s - %s", sr.Email, foundUser.Role)
+
+ // Save session, user, and role to database. Given a session ID for future queries, the LDAP server will not be queried
+ // Sessions are set to expire after the duration + creation date elapsed, and are synced on an interval against the upstream
+ // LDAP server
+ session := sessions.NewSession()
+ _, err = l.q.Exec(
+ "INSERT INTO ldap_sessions (id, user_email, user_role, localauth_user, created_at) VALUES ($1, $2, $3, $4, now())",
+ session.ID,
+ strings.ToLower(sr.Email),
+ foundUser.Role,
+ isLocalUser,
+ )
+ if err != nil {
+ l.lggr.Errorf("unable to create new session in ldap_sessions table %v", err)
+ return "", fmt.Errorf("error creating local LDAP session: %w", err)
+ }
+
+ l.auditLogger.Audit(audit.AuthLoginSuccessNo2FA, map[string]interface{}{"email": sr.Email})
+
+ return session.ID, nil
+}
+
+// ClearNonCurrentSessions removes all ldap_sessions but the id passed in.
+func (l *ldapAuthenticator) ClearNonCurrentSessions(sessionID string) error {
+ _, err := l.q.Exec("DELETE FROM ldap_sessions where id != $1", sessionID)
+ return err
+}
+
+// CreateUser is not supported for read only LDAP
+func (l *ldapAuthenticator) CreateUser(user *sessions.User) error {
+ return sessions.ErrNotSupported
+}
+
+// UpdateRole is not supported for read only LDAP
+func (l *ldapAuthenticator) UpdateRole(email, newRole string) (sessions.User, error) {
+ return sessions.User{}, sessions.ErrNotSupported
+}
+
+// SetPassword for remote users is not supported via the read only LDAP implementation, however change password
+// in the context of updating a local admin user's password is required
+func (l *ldapAuthenticator) SetPassword(user *sessions.User, newPassword string) error {
+ // Ensure specified user is part of the local admins user table
+ var localAdminUser sessions.User
+ if err := l.q.Transaction(func(tx pg.Queryer) error {
+ sql := "SELECT * FROM users WHERE lower(email) = lower($1)"
+ return tx.Get(&localAdminUser, sql, user.Email)
+ }); err != nil {
+ l.lggr.Infof("Can not change password, local user with email not found in users table: %s, err: %v", user.Email, err)
+ return sessions.ErrNotSupported
+ }
+
+ // User is local admin, save new password
+ hashedPassword, err := utils.HashPassword(newPassword)
+ if err != nil {
+ return err
+ }
+ if err := l.q.Transaction(func(tx pg.Queryer) error {
+ sql := "UPDATE users SET hashed_password = $1, updated_at = now() WHERE email = $2 RETURNING *"
+ return tx.Get(user, sql, hashedPassword, user.Email)
+ }); err != nil {
+ l.lggr.Errorf("unable to set password for user: %s, err: %v", user.Email, err)
+ return errors.New("unable to save password")
+ }
+ return nil
+}
+
+// TestPassword tests if an LDAP login bind can be performed with provided credentials, returns nil if success
+func (l *ldapAuthenticator) TestPassword(email string, password string) error {
+ conn, err := l.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ return errors.New("unable to establish connection to LDAP server with provided URL and credentials")
+ }
+ defer conn.Close()
+
+ // Attempt to LDAP Bind with user provided credentials
+ escapedEmail := ldap.EscapeFilter(strings.ToLower(email))
+ searchBaseDN := fmt.Sprintf("%s=%s,%s,%s", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN())
+ err = conn.Bind(searchBaseDN, password)
+ if err == nil {
+ return nil
+ }
+ l.lggr.Infof("Error binding user authentication request in TestPassword call LDAP Bind: %v", err)
+
+ // Fall back to test local users table in case of supported local CLI users as well
+ var hashedPassword string
+ if err := l.q.Get(&hashedPassword, "SELECT hashed_password FROM users WHERE lower(email) = lower($1)", email); err != nil {
+ return errors.New("invalid credentials")
+ }
+ if !utils.CheckPasswordHash(password, hashedPassword) {
+ return errors.New("invalid credentials")
+ }
+
+ return nil
+}
+
+// CreateAndSetAuthToken generates a new credential token with the user role
+func (l *ldapAuthenticator) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) {
+ newToken := auth.NewToken()
+
+ err := l.SetAuthToken(user, newToken)
+ if err != nil {
+ return nil, err
+ }
+
+ return newToken, nil
+}
+
+// SetAuthToken updates the user to use the given Authentication Token.
+func (l *ldapAuthenticator) SetAuthToken(user *sessions.User, token *auth.Token) error {
+ if !l.config.UserApiTokenEnabled() {
+ return errors.New("API token is not enabled ")
+ }
+
+ salt := utils.NewSecret(utils.DefaultSecretSize)
+ hashedSecret, err := auth.HashedSecret(token, salt)
+ if err != nil {
+ return fmt.Errorf("LDAPAuth SetAuthToken hashed secret error: %w", err)
+ }
+
+ err = l.q.Transaction(func(tx pg.Queryer) error {
+ // Is this user a local CLI Admin or upstream LDAP user?
+ // Check presence in local users table. Set localauth_user column true if present.
+ // This flag omits the session/token from being purged by the sync daemon/reaper.go
+ isLocalCLIAdmin := false
+ err = l.q.QueryRow("SELECT EXISTS (SELECT 1 FROM users WHERE email = $1)", user.Email).Scan(&isLocalCLIAdmin)
+ if err != nil {
+ return fmt.Errorf("error checking user presence in users table: %w", err)
+ }
+
+ // Remove any existing API tokens
+ if _, err = l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE user_email = $1", user.Email); err != nil {
+ return fmt.Errorf("error executing DELETE FROM ldap_user_api_tokens: %w", err)
+ }
+ // Create new API token for user
+ _, err = l.q.Exec(
+ "INSERT INTO ldap_user_api_tokens (user_email, user_role, localauth_user, token_key, token_salt, token_hashed_secret, created_at) VALUES ($1, $2, $3, $4, $5, $6, now())",
+ user.Email,
+ user.Role,
+ isLocalCLIAdmin,
+ token.AccessKey,
+ salt,
+ hashedSecret,
+ )
+ if err != nil {
+ return fmt.Errorf("failed insert into ldap_user_api_tokens: %w", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return errors.New("error creating API token")
+ }
+
+ l.auditLogger.Audit(audit.APITokenCreated, map[string]interface{}{"user": user.Email})
+ return nil
+}
+
+// DeleteAuthToken clears and disables the users Authentication Token.
+func (l *ldapAuthenticator) DeleteAuthToken(user *sessions.User) error {
+ _, err := l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE email = $1")
+ return err
+}
+
+// SaveWebAuthn is not supported for read only LDAP
+func (l *ldapAuthenticator) SaveWebAuthn(token *sessions.WebAuthn) error {
+ return sessions.ErrNotSupported
+}
+
+// Sessions returns all sessions limited by the parameters.
+func (l *ldapAuthenticator) Sessions(offset, limit int) ([]sessions.Session, error) {
+ var sessions []sessions.Session
+ sql := `SELECT * FROM ldap_sessions ORDER BY created_at, id LIMIT $1 OFFSET $2;`
+ if err := l.q.Select(&sessions, sql, limit, offset); err != nil {
+ return sessions, nil
+ }
+ return sessions, nil
+}
+
+// FindExternalInitiator supports the 'Run' role external intiator header auth functionality
+func (l *ldapAuthenticator) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) {
+ exi := &bridges.ExternalInitiator{}
+ err := l.q.Get(exi, `SELECT * FROM external_initiators WHERE access_key = $1`, eia.AccessKey)
+ return exi, err
+}
+
+// localLoginFallback tests the credentials provided against the 'local' authentication method
+// This covers the case of local CLI API calls requiring local login separate from the LDAP server
+func (l *ldapAuthenticator) localLoginFallback(sr sessions.SessionRequest) (sessions.User, error) {
+ var user sessions.User
+ sql := "SELECT * FROM users WHERE lower(email) = lower($1)"
+ err := l.q.Get(&user, sql, sr.Email)
+ if err != nil {
+ return user, err
+ }
+ if !constantTimeEmailCompare(strings.ToLower(sr.Email), strings.ToLower(user.Email)) {
+ l.auditLogger.Audit(audit.AuthLoginFailedEmail, map[string]interface{}{"email": sr.Email})
+ return user, errors.New("invalid email")
+ }
+
+ if !utils.CheckPasswordHash(sr.Password, user.HashedPassword) {
+ l.auditLogger.Audit(audit.AuthLoginFailedPassword, map[string]interface{}{"email": sr.Email})
+ return user, errors.New("invalid password")
+ }
+
+ return user, nil
+}
+
+// validateUsersActive performs an additional LDAP server query for the supplied emails, checking the
+// returned user data for an 'active' property defined optionally in the config.
+// Returns same length bool 'valid' array, indexed by sorted email
+func (l *ldapAuthenticator) validateUsersActive(emails []string) ([]bool, error) {
+ validUsers := make([]bool, len(emails))
+ // If active attribute to check is not defined in config, skip
+ if l.config.ActiveAttribute() == "" {
+ // fill with valids
+ for i := range emails {
+ validUsers[i] = true
+ }
+ return validUsers, nil
+ }
+
+ conn, err := l.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ l.lggr.Errorf("error in LDAP dial: ", err)
+ return validUsers, errors.New("unable to establish connection to LDAP server with provided URL and credentials")
+ }
+ defer conn.Close()
+
+ // Build the full email list query to pull all 'isActive' information for each user specified in one query
+ filterQuery := "(|"
+ for _, email := range emails {
+ escapedEmail := ldap.EscapeFilter(email)
+ filterQuery = fmt.Sprintf("%s(%s=%s)", filterQuery, l.config.BaseUserAttr(), escapedEmail)
+ }
+ filterQuery = fmt.Sprintf("(&%s))", filterQuery)
+ searchBaseDN := fmt.Sprintf("%s,%s", l.config.UsersDN(), l.config.BaseDN())
+ searchRequest := ldap.NewSearchRequest(
+ searchBaseDN,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
+ 0, int(l.config.QueryTimeout().Seconds()), false,
+ filterQuery,
+ []string{l.config.BaseUserAttr(), l.config.ActiveAttribute()},
+ nil,
+ )
+ // Query LDAP server for the ActiveAttribute property of each specified user
+ results, err := conn.Search(searchRequest)
+ if err != nil {
+ l.lggr.Errorf("error searching user in LDAP query: %v", err)
+ return validUsers, errors.New("error searching users in LDAP directory")
+ }
+
+ // Ensure user response entries
+ if len(results.Entries) == 0 {
+ return validUsers, ErrUserNotInUpstream
+ }
+
+ // Pull expected ActiveAttribute value from list of string possible values
+ // keyed on email for final step to return flag bool list where order is preserved
+ emailToActiveMap := make(map[string]bool)
+ for _, result := range results.Entries {
+ isActiveAttribute := result.GetAttributeValue(l.config.ActiveAttribute())
+ uidAttribute := result.GetAttributeValue(l.config.BaseUserAttr())
+ emailToActiveMap[uidAttribute] = isActiveAttribute == l.config.ActiveAttributeAllowedValue()
+ }
+ for i, email := range emails {
+ active, ok := emailToActiveMap[email]
+ if ok && active {
+ validUsers[i] = true
+ }
+ }
+
+ return validUsers, nil
+}
+
+// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group. Reused by sync.go
+func ldapGroupMembersListToUser(
+ conn LDAPConn,
+ groupNameCN string,
+ roleToAssign sessions.UserRole,
+ groupsDN string,
+ baseDN string,
+ queryTimeout time.Duration,
+ lggr logger.Logger,
+) ([]sessions.User, error) {
+ users := []sessions.User{}
+ // Prepare and query the GroupsDN for the specified group name
+ searchBaseDN := fmt.Sprintf("%s, %s", groupsDN, baseDN)
+ filterQuery := fmt.Sprintf("(&(cn=%s))", groupNameCN)
+ searchRequest := ldap.NewSearchRequest(
+ searchBaseDN,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
+ 0, int(queryTimeout.Seconds()), false,
+ filterQuery,
+ []string{UniqueMemberAttribute},
+ nil,
+ )
+ result, err := conn.Search(searchRequest)
+ if err != nil {
+ lggr.Errorf("error searching group members in LDAP query: %v", err)
+ return users, errors.New("error searching group members in LDAP directory")
+ }
+
+ // The result.Entry query response here is for the 'group' type of LDAP resource. The result should be a single entry, containing
+ // a single Attribute named 'uniqueMember' containing a list of string Values. These Values are strings that should be returned in
+ // the format "uid=test.user@example.com,ou=users,dc=example,dc=com". The 'uid' is then manually parsed here as the library does
+ // not expose the functionality
+ if len(result.Entries) != 1 {
+ lggr.Errorf("unexpected length of query results for group user members, expected one got %d", len(result.Entries))
+ return users, errors.New("error searching group members in LDAP directory")
+ }
+
+ // Get string list of members from 'uniqueMember' attribute
+ uniqueMemberValues := result.Entries[0].GetAttributeValues(UniqueMemberAttribute)
+ for _, uniqueMemberEntry := range uniqueMemberValues {
+ parts := strings.Split(uniqueMemberEntry, ",") // Split attribute value on comma (uid, ou, dc parts)
+ uidComponent := ""
+ for _, part := range parts { // Iterate parts for "uid="
+ if strings.HasPrefix(part, "uid=") {
+ uidComponent = part
+ break
+ }
+ }
+ if uidComponent == "" {
+ lggr.Errorf("unexpected LDAP group query response for unique members - expected list of LDAP Values for uniqueMember containing LDAP strings in format uid=test.user@example.com,ou=users,dc=example,dc=com. Got %s", uniqueMemberEntry)
+ continue
+ }
+ // Map each user email to the sessions.User struct
+ userEmail := strings.TrimPrefix(uidComponent, "uid=")
+ users = append(users, sessions.User{
+ Email: userEmail,
+ Role: roleToAssign,
+ })
+ }
+ return users, nil
+}
+
+// groupSearchResultsToUserRole takes a list of LDAP group search result entries and returns the associated
+// internal user role based on the group name mappings defined in the configuration
+func (l *ldapAuthenticator) groupSearchResultsToUserRole(ldapGroups []*ldap.Entry) (sessions.UserRole, error) {
+ return GroupSearchResultsToUserRole(
+ ldapGroups,
+ l.config.AdminUserGroupCN(),
+ l.config.EditUserGroupCN(),
+ l.config.RunUserGroupCN(),
+ l.config.ReadUserGroupCN(),
+ )
+}
+
+func GroupSearchResultsToUserRole(ldapGroups []*ldap.Entry, adminCN string, editCN string, runCN string, readCN string) (sessions.UserRole, error) {
+ // If defined Admin group name is present in groups search result, return UserRoleAdmin
+ for _, group := range ldapGroups {
+ if group.GetAttributeValue("cn") == adminCN {
+ return sessions.UserRoleAdmin, nil
+ }
+ }
+ // Check edit role
+ for _, group := range ldapGroups {
+ if group.GetAttributeValue("cn") == editCN {
+ return sessions.UserRoleEdit, nil
+ }
+ }
+ // Check run role
+ for _, group := range ldapGroups {
+ if group.GetAttributeValue("cn") == runCN {
+ return sessions.UserRoleRun, nil
+ }
+ }
+ // Check view role
+ for _, group := range ldapGroups {
+ if group.GetAttributeValue("cn") == readCN {
+ return sessions.UserRoleView, nil
+ }
+ }
+ // No role group found, error
+ return sessions.UserRoleView, ErrUserNoLDAPGroups
+}
+
+const constantTimeEmailLength = 256
+
+func constantTimeEmailCompare(left, right string) bool {
+ length := mathutil.Max(constantTimeEmailLength, len(left), len(right))
+ leftBytes := make([]byte, length)
+ rightBytes := make([]byte, length)
+ copy(leftBytes, left)
+ copy(rightBytes, right)
+ return subtle.ConstantTimeCompare(leftBytes, rightBytes) == 1
+}
diff --git a/core/sessions/ldapauth/ldap_test.go b/core/sessions/ldapauth/ldap_test.go
new file mode 100644
index 00000000000..c85e0db831e
--- /dev/null
+++ b/core/sessions/ldapauth/ldap_test.go
@@ -0,0 +1,639 @@
+package ldapauth_test
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/go-ldap/ldap/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+
+ "github.com/jmoiron/sqlx"
+
+ "github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
+ "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/logger/audit"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/ldapauth"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/ldapauth/mocks"
+)
+
+// Setup LDAP Auth authenticator
+func setupAuthenticationProvider(t *testing.T, ldapClient ldapauth.LDAPClient) (*sqlx.DB, sessions.AuthenticationProvider) {
+ t.Helper()
+
+ cfg := ldapauth.TestConfig{}
+ db := pgtest.NewSqlxDB(t)
+ ldapAuthProvider, err := ldapauth.NewTestLDAPAuthenticator(db, pgtest.NewQConfig(true), &cfg, true, logger.TestLogger(t), &audit.AuditLoggerService{})
+ if err != nil {
+ t.Fatalf("Error constructing NewTestLDAPAuthenticator: %v\n", err)
+ }
+
+ // Override the LDAPClient responsible for returning the *ldap.Conn struct with Mock
+ ldapAuthProvider.SetLDAPClient(ldapClient)
+ return db, ldapAuthProvider
+}
+
+func TestORM_FindUser_Empty(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // User not in upstream, return no entry
+ expectedResults := ldap.SearchResult{}
+
+ // On search performed for validateUsersActive
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil)
+
+ // Not in upstream, no local admin users, expect error
+ _, err := ldapAuthProvider.FindUser("unknown-user")
+ require.ErrorContains(t, err, "LDAP query returned no matching users")
+}
+
+func TestORM_FindUser_NoGroups(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // User present in Upstream but no groups assigned
+ user1 := cltest.MustRandomUser(t)
+ expectedResults := ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=User One,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "organizationalStatus",
+ Values: []string{"ACTIVE"},
+ },
+ {
+ Name: "uid",
+ Values: []string{user1.Email},
+ },
+ },
+ },
+ },
+ }
+
+ // On search performed for validateUsersActive
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil)
+
+ // No Groups, expect error
+ _, err := ldapAuthProvider.FindUser(user1.Email)
+ require.ErrorContains(t, err, "user present in directory, but matching no role groups assigned")
+}
+
+func TestORM_FindUser_NotActive(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // User present in Upstream but not active
+ user1 := cltest.MustRandomUser(t)
+ expectedResults := ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=User One,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "organizationalStatus",
+ Values: []string{"INACTIVE"},
+ },
+ {
+ Name: "uid",
+ Values: []string{user1.Email},
+ },
+ },
+ },
+ },
+ }
+
+ // On search performed for validateUsersActive
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil)
+
+ // User not active, expect error
+ _, err := ldapAuthProvider.FindUser(user1.Email)
+ require.ErrorContains(t, err, "user not active")
+}
+
+func TestORM_FindUser_Single(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // User present and valid
+ user1 := cltest.MustRandomUser(t)
+ expectedResults := ldap.SearchResult{ // Users query
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=User One,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "organizationalStatus",
+ Values: []string{"ACTIVE"},
+ },
+ {
+ Name: "uid",
+ Values: []string{user1.Email},
+ },
+ },
+ },
+ },
+ }
+ expectedGroupResults := ldap.SearchResult{ // Groups query
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=NodeEditors,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{"NodeEditors"},
+ },
+ },
+ },
+ },
+ }
+
+ // On search performed for validateUsersActive
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil).Once()
+
+ // Second call on user groups search
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedGroupResults, nil).Once()
+
+ // User active, and has editor group. Expect success
+ user, err := ldapAuthProvider.FindUser(user1.Email)
+ require.NoError(t, err)
+ require.Equal(t, user1.Email, user.Email)
+ require.Equal(t, sessions.UserRoleEdit, user.Role)
+}
+
+func TestORM_FindUser_FallbackMatchLocalAdmin(t *testing.T) {
+ t.Parallel()
+
+ // Initilaize LDAP Authentication Provider with mock client
+ mockLdapClient := mocks.NewLDAPClient(t)
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Not in upstream, but utilize text fixture admin user presence in test DB. Succeed
+ user, err := ldapAuthProvider.FindUser(cltest.APIEmailAdmin)
+ require.NoError(t, err)
+ require.Equal(t, cltest.APIEmailAdmin, user.Email)
+ require.Equal(t, sessions.UserRoleAdmin, user.Role)
+}
+
+func TestORM_FindUserByAPIToken_Success(t *testing.T) {
+ // Initilaize LDAP Authentication Provider with mock client
+ mockLdapClient := mocks.NewLDAPClient(t)
+ db, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Ensure valid tokens return a user with role
+ testEmail := "test@test.com"
+ apiToken := "example"
+ _, err := db.Exec("INSERT INTO ldap_user_api_tokens values ($1, 'edit', false, $2, '', '', now())", testEmail, apiToken)
+ require.NoError(t, err)
+
+ // Found user by API token in specific ldap_user_api_tokens table
+ user, err := ldapAuthProvider.FindUserByAPIToken(apiToken)
+ require.NoError(t, err)
+ require.Equal(t, testEmail, user.Email)
+ require.Equal(t, sessions.UserRoleEdit, user.Role)
+}
+
+func TestORM_FindUserByAPIToken_Expired(t *testing.T) {
+ cfg := ldapauth.TestConfig{}
+
+ // Initilaize LDAP Authentication Provider with mock client
+ mockLdapClient := mocks.NewLDAPClient(t)
+ db, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Ensure valid tokens return a user with role
+ testEmail := "test@test.com"
+ apiToken := "example"
+ expiredTime := time.Now().Add(-cfg.UserAPITokenDuration().Duration())
+ _, err := db.Exec("INSERT INTO ldap_user_api_tokens values ($1, 'edit', false, $2, '', '', $3)", testEmail, apiToken, expiredTime)
+ require.NoError(t, err)
+
+ // Token found, but expired. Expect error
+ _, err = ldapAuthProvider.FindUserByAPIToken(apiToken)
+ require.Equal(t, sessions.ErrUserSessionExpired, err)
+}
+
+func TestORM_ListUsers_Full(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ user1 := cltest.MustRandomUser(t)
+ user2 := cltest.MustRandomUser(t)
+ user3 := cltest.MustRandomUser(t)
+ user4 := cltest.MustRandomUser(t)
+ user5 := cltest.MustRandomUser(t)
+ user6 := cltest.MustRandomUser(t)
+
+ // LDAP Group queries per role - admin
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeAdminsGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: ldapauth.UniqueMemberAttribute,
+ Values: []string{
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user1.Email),
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user2.Email),
+ },
+ },
+ },
+ },
+ },
+ }, nil).Once()
+ // LDAP Group queries per role - edit
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeEditorsGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: ldapauth.UniqueMemberAttribute,
+ Values: []string{
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user3.Email),
+ },
+ },
+ },
+ },
+ },
+ }, nil).Once()
+ // LDAP Group queries per role - run
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=NodeRunners,ou=Groups,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: ldapauth.UniqueMemberAttribute,
+ Values: []string{
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user4.Email),
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user4.Email), // Test deduped
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user5.Email),
+ },
+ },
+ },
+ },
+ },
+ }, nil).Once()
+ // LDAP Group queries per role - view
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=NodeReadOnly,ou=Groups,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: ldapauth.UniqueMemberAttribute,
+ Values: []string{
+ fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user6.Email),
+ },
+ },
+ },
+ },
+ },
+ }, nil).Once()
+ // Lastly followed by IsActive lookup
+ type userActivePair struct {
+ email string
+ active string
+ }
+ emailsActive := []userActivePair{
+ {user1.Email, "ACTIVE"},
+ {user2.Email, "INACTIVE"},
+ {user3.Email, "ACTIVE"},
+ {user4.Email, "ACTIVE"},
+ {user5.Email, "INACTIVE"},
+ {user6.Email, "ACTIVE"},
+ }
+ listUpstreamUsersQuery := ldap.SearchResult{}
+ for _, upstreamUser := range emailsActive {
+ listUpstreamUsersQuery.Entries = append(listUpstreamUsersQuery.Entries, &ldap.Entry{
+ DN: "cn=User,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "organizationalStatus",
+ Values: []string{upstreamUser.active},
+ },
+ {
+ Name: "uid",
+ Values: []string{upstreamUser.email},
+ },
+ },
+ },
+ )
+ }
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&listUpstreamUsersQuery, nil).Once()
+
+ // Asserts 'uid=' parsing log in ldapGroupMembersListToUser
+ // Expected full list of users above, including local admin user, excluding 'inactive' and duplicate users
+ users, err := ldapAuthProvider.ListUsers()
+ require.NoError(t, err)
+ require.Equal(t, users[0].Email, user1.Email)
+ require.Equal(t, users[0].Role, sessions.UserRoleAdmin)
+ require.Equal(t, users[1].Email, user3.Email) // User 2 inactive
+ require.Equal(t, users[1].Role, sessions.UserRoleEdit)
+ require.Equal(t, users[2].Email, user4.Email)
+ require.Equal(t, users[2].Role, sessions.UserRoleRun)
+ require.Equal(t, users[3].Email, user6.Email) // User 5 inactive
+ require.Equal(t, users[3].Role, sessions.UserRoleView)
+ require.Equal(t, users[4].Email, cltest.APIEmailAdmin) // Text fixture user is local admin included as well
+ require.Equal(t, users[4].Role, sessions.UserRoleAdmin)
+}
+
+func TestORM_CreateSession_UpstreamBind(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Upsream user present
+ user1 := cltest.MustRandomUser(t)
+ expectedResults := ldap.SearchResult{ // Users query
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=User One,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "organizationalStatus",
+ Values: []string{"ACTIVE"},
+ },
+ {
+ Name: "uid",
+ Values: []string{user1.Email},
+ },
+ },
+ },
+ },
+ }
+ expectedGroupResults := ldap.SearchResult{ // Groups query
+ Entries: []*ldap.Entry{
+ {
+ DN: "cn=NodeEditors,ou=Users,dc=example,dc=com",
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{"NodeEditors"},
+ },
+ },
+ },
+ },
+ }
+
+ // On search performed for validateUsersActive
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil).Once()
+
+ // Second call on user groups search
+ mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedGroupResults, nil).Once()
+
+ // User active, and has editor group. Expect success
+ mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(nil)
+ sessionRequest := sessions.SessionRequest{
+ Email: user1.Email,
+ Password: cltest.Password,
+ }
+
+ _, err := ldapAuthProvider.CreateSession(sessionRequest)
+ require.NoError(t, err)
+}
+
+func TestORM_CreateSession_LocalAdminFallbackLogin(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Fail the bind to trigger 'localLoginFallback' - local admin users should still be able to login
+ // regardless of whether the authentication provider is remote or not
+ mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(errors.New("unable to login via LDAP server")).Once()
+
+ // User active, and has editor group. Expect success
+ sessionRequest := sessions.SessionRequest{
+ Email: cltest.APIEmailAdmin,
+ Password: cltest.Password,
+ }
+
+ _, err := ldapAuthProvider.CreateSession(sessionRequest)
+ require.NoError(t, err)
+
+ // Finally, assert login failing altogether
+ // User active, and has editor group. Expect success
+ mockLdapConnProvider.On("Bind", mock.Anything, "incorrect-password").Return(errors.New("unable to login via LDAP server")).Once()
+ sessionRequest = sessions.SessionRequest{
+ Email: cltest.APIEmailAdmin,
+ Password: "incorrect-password",
+ }
+
+ _, err = ldapAuthProvider.CreateSession(sessionRequest)
+ require.ErrorContains(t, err, "invalid password")
+}
+
+func TestORM_SetPassword_LocalAdminFallbackLogin(t *testing.T) {
+ t.Parallel()
+
+ mockLdapClient := mocks.NewLDAPClient(t)
+ mockLdapConnProvider := mocks.NewLDAPConn(t)
+ mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil)
+ mockLdapConnProvider.On("Close").Return(nil)
+
+ // Initilaize LDAP Authentication Provider with mock client
+ _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient)
+
+ // Fail the bind to trigger 'localLoginFallback' - local admin users should still be able to login
+ // regardless of whether the authentication provider is remote or not
+ mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(errors.New("unable to login via LDAP server")).Once()
+
+ // User active, and has editor group. Expect success
+ sessionRequest := sessions.SessionRequest{
+ Email: cltest.APIEmailAdmin,
+ Password: cltest.Password,
+ }
+
+ _, err := ldapAuthProvider.CreateSession(sessionRequest)
+ require.NoError(t, err)
+
+ // Finally, assert login failing altogether
+ // User active, and has editor group. Expect success
+ mockLdapConnProvider.On("Bind", mock.Anything, "incorrect-password").Return(errors.New("unable to login via LDAP server")).Once()
+ sessionRequest = sessions.SessionRequest{
+ Email: cltest.APIEmailAdmin,
+ Password: "incorrect-password",
+ }
+
+ _, err = ldapAuthProvider.CreateSession(sessionRequest)
+ require.ErrorContains(t, err, "invalid password")
+}
+
+func TestORM_MapSearchGroups(t *testing.T) {
+ t.Parallel()
+
+ cfg := ldapauth.TestConfig{}
+
+ tests := []struct {
+ name string
+ groupsQuerySearchResult []*ldap.Entry
+ wantMappedRole sessions.UserRole
+ wantErr error
+ }{
+ {
+ "user in admin group only",
+ []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeAdminsGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeAdminsGroupCN},
+ },
+ },
+ },
+ },
+ sessions.UserRoleAdmin,
+ nil,
+ },
+ {
+ "user in edit group",
+ []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeEditorsGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeEditorsGroupCN},
+ },
+ },
+ },
+ },
+ sessions.UserRoleEdit,
+ nil,
+ },
+ {
+ "user in run group",
+ []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeRunnersGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeRunnersGroupCN},
+ },
+ },
+ },
+ },
+ sessions.UserRoleRun,
+ nil,
+ },
+ {
+ "user in view role",
+ []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeReadOnlyGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeReadOnlyGroupCN},
+ },
+ },
+ },
+ },
+ sessions.UserRoleView,
+ nil,
+ },
+ {
+ "user in none",
+ []*ldap.Entry{},
+ sessions.UserRole(""), // ignored, error case
+ ldapauth.ErrUserNoLDAPGroups,
+ },
+ {
+ "user in run and view",
+ []*ldap.Entry{
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeRunnersGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeRunnersGroupCN},
+ },
+ },
+ },
+ {
+ DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeReadOnlyGroupCN),
+ Attributes: []*ldap.EntryAttribute{
+ {
+ Name: "cn",
+ Values: []string{ldapauth.NodeReadOnlyGroupCN},
+ },
+ },
+ },
+ },
+ sessions.UserRoleRun, // Take highest role
+ nil,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ role, err := ldapauth.GroupSearchResultsToUserRole(
+ test.groupsQuerySearchResult,
+ cfg.AdminUserGroupCN(),
+ cfg.EditUserGroupCN(),
+ cfg.RunUserGroupCN(),
+ cfg.ReadUserGroupCN(),
+ )
+ if test.wantErr != nil {
+ assert.Equal(t, test.wantErr, err)
+ } else {
+ assert.Equal(t, test.wantMappedRole, role)
+ }
+ })
+ }
+}
diff --git a/core/sessions/ldapauth/mocks/ldap_client.go b/core/sessions/ldapauth/mocks/ldap_client.go
new file mode 100644
index 00000000000..7a44778dcaa
--- /dev/null
+++ b/core/sessions/ldapauth/mocks/ldap_client.go
@@ -0,0 +1,53 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package mocks
+
+import (
+ ldapauth "github.com/smartcontractkit/chainlink/v2/core/sessions/ldapauth"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// LDAPClient is an autogenerated mock type for the LDAPClient type
+type LDAPClient struct {
+ mock.Mock
+}
+
+// CreateEphemeralConnection provides a mock function with given fields:
+func (_m *LDAPClient) CreateEphemeralConnection() (ldapauth.LDAPConn, error) {
+ ret := _m.Called()
+
+ var r0 ldapauth.LDAPConn
+ var r1 error
+ if rf, ok := ret.Get(0).(func() (ldapauth.LDAPConn, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() ldapauth.LDAPConn); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(ldapauth.LDAPConn)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewLDAPClient creates a new instance of LDAPClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewLDAPClient(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *LDAPClient {
+ mock := &LDAPClient{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/sessions/ldapauth/mocks/ldap_conn.go b/core/sessions/ldapauth/mocks/ldap_conn.go
new file mode 100644
index 00000000000..c05fb6c4fa6
--- /dev/null
+++ b/core/sessions/ldapauth/mocks/ldap_conn.go
@@ -0,0 +1,82 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package mocks
+
+import (
+ ldap "github.com/go-ldap/ldap/v3"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// LDAPConn is an autogenerated mock type for the LDAPConn type
+type LDAPConn struct {
+ mock.Mock
+}
+
+// Bind provides a mock function with given fields: username, password
+func (_m *LDAPConn) Bind(username string, password string) error {
+ ret := _m.Called(username, password)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(username, password)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Close provides a mock function with given fields:
+func (_m *LDAPConn) Close() error {
+ ret := _m.Called()
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func() error); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Search provides a mock function with given fields: searchRequest
+func (_m *LDAPConn) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) {
+ ret := _m.Called(searchRequest)
+
+ var r0 *ldap.SearchResult
+ var r1 error
+ if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) (*ldap.SearchResult, error)); ok {
+ return rf(searchRequest)
+ }
+ if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) *ldap.SearchResult); ok {
+ r0 = rf(searchRequest)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*ldap.SearchResult)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(*ldap.SearchRequest) error); ok {
+ r1 = rf(searchRequest)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewLDAPConn creates a new instance of LDAPConn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewLDAPConn(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *LDAPConn {
+ mock := &LDAPConn{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/sessions/ldapauth/sync.go b/core/sessions/ldapauth/sync.go
new file mode 100644
index 00000000000..67f101b62a4
--- /dev/null
+++ b/core/sessions/ldapauth/sync.go
@@ -0,0 +1,343 @@
+package ldapauth
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/go-ldap/ldap/v3"
+ "github.com/jmoiron/sqlx"
+ "github.com/lib/pq"
+
+ "github.com/smartcontractkit/chainlink/v2/core/config"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+type LDAPServerStateSyncer struct {
+ q pg.Q
+ ldapClient LDAPClient
+ config config.LDAP
+ lggr logger.Logger
+ nextSyncTime time.Time
+}
+
+// NewLDAPServerStateSync creates a reaper that cleans stale sessions from the store.
+func NewLDAPServerStateSync(
+ db *sqlx.DB,
+ pgCfg pg.QConfig,
+ config config.LDAP,
+ lggr logger.Logger,
+) utils.SleeperTask {
+ namedLogger := lggr.Named("LDAPServerStateSync")
+ serverSync := LDAPServerStateSyncer{
+ q: pg.NewQ(db, namedLogger, pgCfg),
+ ldapClient: newLDAPClient(config),
+ config: config,
+ lggr: namedLogger,
+ nextSyncTime: time.Time{},
+ }
+ // If enabled, start a background task that calls the Sync/Work function on an
+ // interval without needing an auth event to trigger it
+ // Use IsInstant to check 0 value to omit functionality.
+ if !config.UpstreamSyncInterval().IsInstant() {
+ lggr.Info("LDAP Config UpstreamSyncInterval is non-zero, sync functionality will be called on a timer, respecting the UpstreamSyncRateLimit value")
+ serverSync.StartWorkOnTimer()
+ } else {
+ // Ensure upstream server state is synced on startup manually if interval check not set
+ serverSync.Work()
+ }
+
+ // Start background Sync call task reactive to auth related events
+ serverSyncSleeperTask := utils.NewSleeperTask(&serverSync)
+ return serverSyncSleeperTask
+}
+
+func (ldSync *LDAPServerStateSyncer) Name() string {
+ return "LDAPServerStateSync"
+}
+
+func (ldSync *LDAPServerStateSyncer) StartWorkOnTimer() {
+ time.AfterFunc(ldSync.config.UpstreamSyncInterval().Duration(), ldSync.StartWorkOnTimer)
+ ldSync.Work()
+}
+
+func (ldSync *LDAPServerStateSyncer) Work() {
+ // Purge expired ldap_sessions and ldap_user_api_tokens
+ recordCreationStaleThreshold := ldSync.config.SessionTimeout().Before(time.Now())
+ err := ldSync.deleteStaleSessions(recordCreationStaleThreshold)
+ if err != nil {
+ ldSync.lggr.Error("unable to expire local LDAP sessions: ", err)
+ }
+ recordCreationStaleThreshold = ldSync.config.UserAPITokenDuration().Before(time.Now())
+ err = ldSync.deleteStaleAPITokens(recordCreationStaleThreshold)
+ if err != nil {
+ ldSync.lggr.Error("unable to expire user API tokens: ", err)
+ }
+
+ // Optional rate limiting check to limit the amount of upstream LDAP server queries performed
+ if !ldSync.config.UpstreamSyncRateLimit().IsInstant() {
+ if !time.Now().After(ldSync.nextSyncTime) {
+ return
+ }
+
+ // Enough time has elapsed to sync again, store the time for when next sync is allowed and begin sync
+ ldSync.nextSyncTime = time.Now().Add(ldSync.config.UpstreamSyncRateLimit().Duration())
+ }
+
+ ldSync.lggr.Info("Begin Upstream LDAP provider state sync after checking time against config UpstreamSyncInterval and UpstreamSyncRateLimit")
+
+ // For each defined role/group, query for the list of group members to gather the full list of possible users
+ users := []sessions.User{}
+
+ conn, err := ldSync.ldapClient.CreateEphemeralConnection()
+ if err != nil {
+ ldSync.lggr.Errorf("Failed to Dial LDAP Server", err)
+ return
+ }
+ // Root level root user auth with credentials provided from config
+ bindStr := ldSync.config.BaseUserAttr() + "=" + ldSync.config.ReadOnlyUserLogin() + "," + ldSync.config.BaseDN()
+ if err = conn.Bind(bindStr, ldSync.config.ReadOnlyUserPass()); err != nil {
+ ldSync.lggr.Errorf("Unable to login as initial root LDAP user", err)
+ }
+ defer conn.Close()
+
+ // Query for list of uniqueMember IDs present in Admin group
+ adminUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.AdminUserGroupCN(), sessions.UserRoleAdmin)
+ if err != nil {
+ ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err)
+ return
+ }
+ // Query for list of uniqueMember IDs present in Edit group
+ editUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.EditUserGroupCN(), sessions.UserRoleEdit)
+ if err != nil {
+ ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err)
+ return
+ }
+ // Query for list of uniqueMember IDs present in Edit group
+ runUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.RunUserGroupCN(), sessions.UserRoleRun)
+ if err != nil {
+ ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err)
+ return
+ }
+ // Query for list of uniqueMember IDs present in Edit group
+ readUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.ReadUserGroupCN(), sessions.UserRoleView)
+ if err != nil {
+ ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err)
+ return
+ }
+
+ users = append(users, adminUsers...)
+ users = append(users, editUsers...)
+ users = append(users, runUsers...)
+ users = append(users, readUsers...)
+
+ // Dedupe preserving order of highest role (sorted)
+ // Preserve members as a map for future lookup
+ upstreamUserStateMap := make(map[string]sessions.User)
+ dedupedEmails := []string{}
+ for _, user := range users {
+ if _, ok := upstreamUserStateMap[user.Email]; !ok {
+ upstreamUserStateMap[user.Email] = user
+ dedupedEmails = append(dedupedEmails, user.Email)
+ }
+ }
+
+ // For each unique user in list of active sessions, check for 'Is Active' propery if defined in the config. Some LDAP providers
+ // list group members that are no longer marked as active
+ usersActiveFlags, err := ldSync.validateUsersActive(dedupedEmails, conn)
+ if err != nil {
+ ldSync.lggr.Errorf("Error validating supplied user list: ", err)
+ }
+ // Remove users in the upstreamUserStateMap source of truth who are part of groups but marked as deactivated/no-active
+ for i, active := range usersActiveFlags {
+ if !active {
+ delete(upstreamUserStateMap, dedupedEmails[i])
+ }
+ }
+
+ // upstreamUserStateMap is now the most up to date source of truth
+ // Now sync database sessions and roles with new data
+ err = ldSync.q.Transaction(func(tx pg.Queryer) error {
+ // First, purge users present in the local ldap_sessions table but not in the upstream server
+ type LDAPSession struct {
+ UserEmail string
+ UserRole sessions.UserRole
+ }
+ var existingSessions []LDAPSession
+ if err = tx.Select(&existingSessions, "SELECT user_email, user_role FROM ldap_sessions WHERE localauth_user = false"); err != nil {
+ return fmt.Errorf("unable to query ldap_sessions table: %w", err)
+ }
+ var existingAPITokens []LDAPSession
+ if err = tx.Select(&existingAPITokens, "SELECT user_email, user_role FROM ldap_user_api_tokens WHERE localauth_user = false"); err != nil {
+ return fmt.Errorf("unable to query ldap_user_api_tokens table: %w", err)
+ }
+
+ // Create existing sessions and API tokens lookup map for later
+ existingSessionsMap := make(map[string]LDAPSession)
+ for _, sess := range existingSessions {
+ existingSessionsMap[sess.UserEmail] = sess
+ }
+ existingAPITokensMap := make(map[string]LDAPSession)
+ for _, sess := range existingAPITokens {
+ existingAPITokensMap[sess.UserEmail] = sess
+ }
+
+ // Populate list of session emails present in the local session table but not in the upstream state
+ emailsToPurge := []interface{}{}
+ for _, ldapSession := range existingSessions {
+ if _, ok := upstreamUserStateMap[ldapSession.UserEmail]; !ok {
+ emailsToPurge = append(emailsToPurge, ldapSession.UserEmail)
+ }
+ }
+ // Likewise for API Tokens table
+ apiTokenEmailsToPurge := []interface{}{}
+ for _, ldapSession := range existingAPITokens {
+ if _, ok := upstreamUserStateMap[ldapSession.UserEmail]; !ok {
+ apiTokenEmailsToPurge = append(apiTokenEmailsToPurge, ldapSession.UserEmail)
+ }
+ }
+
+ // Remove any active sessions this user may have
+ if len(emailsToPurge) > 0 {
+ _, err = ldSync.q.Exec("DELETE FROM ldap_sessions WHERE user_email = ANY($1)", pq.Array(emailsToPurge))
+ if err != nil {
+ return err
+ }
+ }
+
+ // Remove any active API tokens this user may have
+ if len(apiTokenEmailsToPurge) > 0 {
+ _, err = ldSync.q.Exec("DELETE FROM ldap_user_api_tokens WHERE user_email = ANY($1)", pq.Array(apiTokenEmailsToPurge))
+ if err != nil {
+ return err
+ }
+ }
+
+ // For each user session row, update role to match state of user map from upstream source
+ queryWhenClause := ""
+ emailValues := []interface{}{}
+ // Prepare CASE WHEN query statement with parameterized argument $n placeholders and matching role based on index
+ for email, user := range upstreamUserStateMap {
+ // Only build on SET CASE statement per local session and API token role, not for each upstream user value
+ _, sessionOk := existingSessionsMap[email]
+ _, tokenOk := existingAPITokensMap[email]
+ if !sessionOk && !tokenOk {
+ continue
+ }
+ emailValues = append(emailValues, email)
+ queryWhenClause += fmt.Sprintf("WHEN user_email = $%d THEN '%s' ", len(emailValues), user.Role)
+ }
+
+ // If there are remaining user entries to update
+ if len(emailValues) != 0 {
+ // Set new role state for all rows in single Exec
+ query := fmt.Sprintf("UPDATE ldap_sessions SET user_role = CASE %s ELSE user_role END", queryWhenClause)
+ _, err = ldSync.q.Exec(query, emailValues...)
+ if err != nil {
+ return err
+ }
+
+ // Update role of API tokens as well
+ query = fmt.Sprintf("UPDATE ldap_user_api_tokens SET user_role = CASE %s ELSE user_role END", queryWhenClause)
+ _, err = ldSync.q.Exec(query, emailValues...)
+ if err != nil {
+ return err
+ }
+ }
+
+ ldSync.lggr.Info("local ldap_sessions and ldap_user_api_tokens table successfully synced with upstream LDAP state")
+ return nil
+ })
+ if err != nil {
+ ldSync.lggr.Errorf("Error syncing local database state: ", err)
+ }
+ ldSync.lggr.Info("Upstream LDAP sync complete")
+}
+
+// deleteStaleSessions deletes all ldap_sessions before the passed time.
+func (ldSync *LDAPServerStateSyncer) deleteStaleSessions(before time.Time) error {
+ _, err := ldSync.q.Exec("DELETE FROM ldap_sessions WHERE created_at < $1", before)
+ return err
+}
+
+// deleteStaleAPITokens deletes all ldap_user_api_tokens before the passed time.
+func (ldSync *LDAPServerStateSyncer) deleteStaleAPITokens(before time.Time) error {
+ _, err := ldSync.q.Exec("DELETE FROM ldap_user_api_tokens WHERE created_at < $1", before)
+ return err
+}
+
+// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group
+func (ldSync *LDAPServerStateSyncer) ldapGroupMembersListToUser(conn LDAPConn, groupNameCN string, roleToAssign sessions.UserRole) ([]sessions.User, error) {
+ users, err := ldapGroupMembersListToUser(
+ conn, groupNameCN, roleToAssign, ldSync.config.GroupsDN(),
+ ldSync.config.BaseDN(), ldSync.config.QueryTimeout(),
+ ldSync.lggr,
+ )
+ if err != nil {
+ ldSync.lggr.Errorf("Error listing members of group (%s): %v", groupNameCN, err)
+ return users, errors.New("error searching group members in LDAP directory")
+ }
+ return users, nil
+}
+
+// validateUsersActive performs an additional LDAP server query for the supplied emails, checking the
+// returned user data for an 'active' property defined optionally in the config.
+// Returns same length bool 'valid' array, order preserved
+func (ldSync *LDAPServerStateSyncer) validateUsersActive(emails []string, conn LDAPConn) ([]bool, error) {
+ validUsers := make([]bool, len(emails))
+ // If active attribute to check is not defined in config, skip
+ if ldSync.config.ActiveAttribute() == "" {
+ // pre fill with valids
+ for i := range emails {
+ validUsers[i] = true
+ }
+ return validUsers, nil
+ }
+
+ // Build the full email list query to pull all 'isActive' information for each user specified in one query
+ filterQuery := "(|"
+ for _, email := range emails {
+ escapedEmail := ldap.EscapeFilter(email)
+ filterQuery = fmt.Sprintf("%s(%s=%s)", filterQuery, ldSync.config.BaseUserAttr(), escapedEmail)
+ }
+ filterQuery = fmt.Sprintf("(&%s))", filterQuery)
+ searchBaseDN := fmt.Sprintf("%s,%s", ldSync.config.UsersDN(), ldSync.config.BaseDN())
+ searchRequest := ldap.NewSearchRequest(
+ searchBaseDN,
+ ldap.ScopeWholeSubtree, ldap.NeverDerefAliases,
+ 0, int(ldSync.config.QueryTimeout().Seconds()), false,
+ filterQuery,
+ []string{ldSync.config.BaseUserAttr(), ldSync.config.ActiveAttribute()},
+ nil,
+ )
+ // Query LDAP server for the ActiveAttribute property of each specified user
+ results, err := conn.Search(searchRequest)
+ if err != nil {
+ ldSync.lggr.Errorf("Error searching user in LDAP query: %v", err)
+ return validUsers, errors.New("error searching users in LDAP directory")
+ }
+ // Ensure user response entries
+ if len(results.Entries) == 0 {
+ return validUsers, errors.New("no users matching email query")
+ }
+
+ // Pull expected ActiveAttribute value from list of string possible values
+ // keyed on email for final step to return flag bool list where order is preserved
+ emailToActiveMap := make(map[string]bool)
+ for _, result := range results.Entries {
+ isActiveAttribute := result.GetAttributeValue(ldSync.config.ActiveAttribute())
+ uidAttribute := result.GetAttributeValue(ldSync.config.BaseUserAttr())
+ emailToActiveMap[uidAttribute] = isActiveAttribute == ldSync.config.ActiveAttributeAllowedValue()
+ }
+ for i, email := range emails {
+ active, ok := emailToActiveMap[email]
+ if ok && active {
+ validUsers[i] = true
+ }
+ }
+
+ return validUsers, nil
+}
diff --git a/core/sessions/orm.go b/core/sessions/localauth/orm.go
similarity index 79%
rename from core/sessions/orm.go
rename to core/sessions/localauth/orm.go
index eaac211f242..090dc468a62 100644
--- a/core/sessions/orm.go
+++ b/core/sessions/localauth/orm.go
@@ -1,4 +1,4 @@
-package sessions
+package localauth
import (
"crypto/subtle"
@@ -6,42 +6,19 @@ import (
"strings"
"time"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
- "github.com/smartcontractkit/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/bridges"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/logger/audit"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions"
"github.com/smartcontractkit/chainlink/v2/core/utils"
"github.com/smartcontractkit/chainlink/v2/core/utils/mathutil"
)
-//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore
-
-type ORM interface {
- FindUser(email string) (User, error)
- FindUserByAPIToken(apiToken string) (User, error)
- ListUsers() ([]User, error)
- AuthorizedUserWithSession(sessionID string) (User, error)
- DeleteUser(email string) error
- DeleteUserSession(sessionID string) error
- CreateSession(sr SessionRequest) (string, error)
- ClearNonCurrentSessions(sessionID string) error
- CreateUser(user *User) error
- UpdateRole(email, newRole string) (User, error)
- SetAuthToken(user *User, token *auth.Token) error
- CreateAndSetAuthToken(user *User) (*auth.Token, error)
- DeleteAuthToken(user *User) error
- SetPassword(user *User, newPassword string) error
- Sessions(offset, limit int) ([]Session, error)
- GetUserWebAuthn(email string) ([]WebAuthn, error)
- SaveWebAuthn(token *WebAuthn) error
-
- FindExternalInitiator(eia *auth.Token) (initiator *bridges.ExternalInitiator, err error)
-}
-
type orm struct {
q pg.Q
sessionDuration time.Duration
@@ -49,38 +26,40 @@ type orm struct {
auditLogger audit.AuditLogger
}
-var _ ORM = (*orm)(nil)
+// orm implements sessions.AuthenticationProvider and sessions.BasicAdminUsersORM interfaces
+var _ sessions.AuthenticationProvider = (*orm)(nil)
+var _ sessions.BasicAdminUsersORM = (*orm)(nil)
-func NewORM(db *sqlx.DB, sd time.Duration, lggr logger.Logger, cfg pg.QConfig, auditLogger audit.AuditLogger) ORM {
- lggr = lggr.Named("SessionsORM")
+func NewORM(db *sqlx.DB, sd time.Duration, lggr logger.Logger, cfg pg.QConfig, auditLogger audit.AuditLogger) sessions.AuthenticationProvider {
+ namedLogger := lggr.Named("LocalAuthAuthenticationProviderORM")
return &orm{
- q: pg.NewQ(db, lggr, cfg),
+ q: pg.NewQ(db, namedLogger, cfg),
sessionDuration: sd,
- lggr: lggr,
+ lggr: lggr.Named("LocalAuthAuthenticationProviderORM"),
auditLogger: auditLogger,
}
}
// FindUser will attempt to return an API user by email.
-func (o *orm) FindUser(email string) (User, error) {
+func (o *orm) FindUser(email string) (sessions.User, error) {
return o.findUser(email)
}
// FindUserByAPIToken will attempt to return an API user via the user's table token_key column.
-func (o *orm) FindUserByAPIToken(apiToken string) (user User, err error) {
+func (o *orm) FindUserByAPIToken(apiToken string) (user sessions.User, err error) {
sql := "SELECT * FROM users WHERE token_key = $1"
err = o.q.Get(&user, sql, apiToken)
return
}
-func (o *orm) findUser(email string) (user User, err error) {
+func (o *orm) findUser(email string) (user sessions.User, err error) {
sql := "SELECT * FROM users WHERE lower(email) = lower($1)"
err = o.q.Get(&user, sql, email)
return
}
// ListUsers will load and return all user rows from the db.
-func (o *orm) ListUsers() (users []User, err error) {
+func (o *orm) ListUsers() (users []sessions.User, err error) {
sql := "SELECT * FROM users ORDER BY email ASC;"
err = o.q.Select(&users, sql)
return
@@ -100,31 +79,27 @@ func (o *orm) updateSessionLastUsed(sessionID string) error {
return o.q.ExecQ("UPDATE sessions SET last_used = now() WHERE id = $1", sessionID)
}
-// ErrUserSessionExpired defines the error triggered when the user session has expired
-var (
- ErrUserSessionExpired = errors.New("user session missing or expired, please login again")
- ErrEmptySessionID = errors.New("session ID cannot be empty")
-)
-
// AuthorizedUserWithSession will return the API user associated with the Session ID if it
// exists and hasn't expired, and update session's LastUsed field.
-func (o *orm) AuthorizedUserWithSession(sessionID string) (user User, err error) {
+// AuthorizedUserWithSession will return the API user associated with the Session ID if it
+// exists and hasn't expired, and update session's LastUsed field.
+func (o *orm) AuthorizedUserWithSession(sessionID string) (user sessions.User, err error) {
if len(sessionID) == 0 {
- return User{}, ErrEmptySessionID
+ return sessions.User{}, sessions.ErrEmptySessionID
}
email, err := o.findValidSession(sessionID)
if err != nil {
- return User{}, ErrUserSessionExpired
+ return sessions.User{}, sessions.ErrUserSessionExpired
}
user, err = o.findUser(email)
if err != nil {
- return User{}, ErrUserSessionExpired
+ return sessions.User{}, sessions.ErrUserSessionExpired
}
if err := o.updateSessionLastUsed(sessionID); err != nil {
- return User{}, err
+ return sessions.User{}, err
}
return user, nil
@@ -151,8 +126,8 @@ func (o *orm) DeleteUserSession(sessionID string) error {
// tokens for the user. This list must be used when logging in (for obvious reasons) but
// must also be used for registration to prevent the user from enrolling the same hardware
// token multiple times.
-func (o *orm) GetUserWebAuthn(email string) ([]WebAuthn, error) {
- var uwas []WebAuthn
+func (o *orm) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) {
+ var uwas []sessions.WebAuthn
err := o.q.Select(&uwas, "SELECT email, public_key_data FROM web_authns WHERE LOWER(email) = $1", strings.ToLower(email))
if err != nil {
return uwas, err
@@ -165,7 +140,7 @@ func (o *orm) GetUserWebAuthn(email string) ([]WebAuthn, error) {
// CreateSession will check the password in the SessionRequest against
// the hashed API User password in the db. Also will check WebAuthn if it's
// enabled for that user.
-func (o *orm) CreateSession(sr SessionRequest) (string, error) {
+func (o *orm) CreateSession(sr sessions.SessionRequest) (string, error) {
user, err := o.FindUser(sr.Email)
if err != nil {
return "", err
@@ -196,7 +171,7 @@ func (o *orm) CreateSession(sr SessionRequest) (string, error) {
// No webauthn tokens registered for the current user, so normal authentication is now complete
if len(uwas) == 0 {
lggr.Infof("No MFA for user. Creating Session")
- session := NewSession()
+ session := sessions.NewSession()
_, err = o.q.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, user.Email)
o.auditLogger.Audit(audit.AuthLoginSuccessNo2FA, map[string]interface{}{"email": sr.Email})
return session.ID, err
@@ -207,7 +182,7 @@ func (o *orm) CreateSession(sr SessionRequest) (string, error) {
// data in the next round trip request (tap key to include webauthn data on the login page)
if sr.WebAuthnData == "" {
lggr.Warnf("Attempted login to MFA user. Generating challenge for user.")
- options, webauthnError := BeginWebAuthnLogin(user, uwas, sr)
+ options, webauthnError := sessions.BeginWebAuthnLogin(user, uwas, sr)
if webauthnError != nil {
lggr.Errorf("Could not begin WebAuthn verification: %v", webauthnError)
return "", errors.New("MFA Error")
@@ -225,7 +200,7 @@ func (o *orm) CreateSession(sr SessionRequest) (string, error) {
// The user is at the final stage of logging in with MFA. We have an
// attestation back from the user, we now need to verify that it is
// correct.
- err = FinishWebAuthnLogin(user, uwas, sr)
+ err = sessions.FinishWebAuthnLogin(user, uwas, sr)
if err != nil {
// The user does have WebAuthn enabled but failed the check
@@ -236,7 +211,7 @@ func (o *orm) CreateSession(sr SessionRequest) (string, error) {
lggr.Infof("User passed MFA authentication and login will proceed")
// This is a success so we can create the sessions
- session := NewSession()
+ session := sessions.NewSession()
_, err = o.q.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, user.Email)
if err != nil {
return "", err
@@ -271,14 +246,14 @@ func (o *orm) ClearNonCurrentSessions(sessionID string) error {
}
// CreateUser creates a new API user
-func (o *orm) CreateUser(user *User) error {
+func (o *orm) CreateUser(user *sessions.User) error {
sql := "INSERT INTO users (email, hashed_password, role, created_at, updated_at) VALUES ($1, $2, $3, now(), now()) RETURNING *"
return o.q.Get(user, sql, strings.ToLower(user.Email), user.HashedPassword, user.Role)
}
// UpdateRole overwrites role field of the user specified by email.
-func (o *orm) UpdateRole(email, newRole string) (User, error) {
- var userToEdit User
+func (o *orm) UpdateRole(email, newRole string) (sessions.User, error) {
+ var userToEdit sessions.User
if newRole == "" {
return userToEdit, errors.New("user role must be specified")
@@ -291,7 +266,7 @@ func (o *orm) UpdateRole(email, newRole string) (User, error) {
}
// Patch validated role
- userRole, err := GetUserRole(newRole)
+ userRole, err := sessions.GetUserRole(newRole)
if err != nil {
return err
}
@@ -316,7 +291,7 @@ func (o *orm) UpdateRole(email, newRole string) (User, error) {
}
// SetAuthToken updates the user to use the given Authentication Token.
-func (o *orm) SetPassword(user *User, newPassword string) error {
+func (o *orm) SetPassword(user *sessions.User, newPassword string) error {
hashedPassword, err := utils.HashPassword(newPassword)
if err != nil {
return err
@@ -325,7 +300,19 @@ func (o *orm) SetPassword(user *User, newPassword string) error {
return o.q.Get(user, sql, hashedPassword, user.Email)
}
-func (o *orm) CreateAndSetAuthToken(user *User) (*auth.Token, error) {
+// TestPassword checks plaintext user provided password with hashed database password, returns nil if matched
+func (o *orm) TestPassword(email string, password string) error {
+ var hashedPassword string
+ if err := o.q.Get(&hashedPassword, "SELECT hashed_password FROM users WHERE lower(email) = lower($1)", email); err != nil {
+ return errors.New("no matching user for provided email")
+ }
+ if !utils.CheckPasswordHash(password, hashedPassword) {
+ return errors.New("passwords don't match")
+ }
+ return nil
+}
+
+func (o *orm) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) {
newToken := auth.NewToken()
err := o.SetAuthToken(user, newToken)
@@ -337,7 +324,7 @@ func (o *orm) CreateAndSetAuthToken(user *User) (*auth.Token, error) {
}
// SetAuthToken updates the user to use the given Authentication Token.
-func (o *orm) SetAuthToken(user *User, token *auth.Token) error {
+func (o *orm) SetAuthToken(user *sessions.User, token *auth.Token) error {
salt := utils.NewSecret(utils.DefaultSecretSize)
hashedSecret, err := auth.HashedSecret(token, salt)
if err != nil {
@@ -348,20 +335,20 @@ func (o *orm) SetAuthToken(user *User, token *auth.Token) error {
}
// DeleteAuthToken clears and disables the users Authentication Token.
-func (o *orm) DeleteAuthToken(user *User) error {
+func (o *orm) DeleteAuthToken(user *sessions.User) error {
sql := "UPDATE users SET token_salt = '', token_key = '', token_hashed_secret = '', updated_at = now() WHERE email = $1 RETURNING *"
return o.q.Get(user, sql, user.Email)
}
// SaveWebAuthn saves new WebAuthn token information.
-func (o *orm) SaveWebAuthn(token *WebAuthn) error {
+func (o *orm) SaveWebAuthn(token *sessions.WebAuthn) error {
sql := "INSERT INTO web_authns (email, public_key_data) VALUES ($1, $2)"
_, err := o.q.Exec(sql, token.Email, token.PublicKeyData)
return err
}
// Sessions returns all sessions limited by the parameters.
-func (o *orm) Sessions(offset, limit int) (sessions []Session, err error) {
+func (o *orm) Sessions(offset, limit int) (sessions []sessions.Session, err error) {
sql := `SELECT * FROM sessions ORDER BY created_at, id LIMIT $1 OFFSET $2;`
if err = o.q.Select(&sessions, sql, limit, offset); err != nil {
return
diff --git a/core/sessions/orm_test.go b/core/sessions/localauth/orm_test.go
similarity index 94%
rename from core/sessions/orm_test.go
rename to core/sessions/localauth/orm_test.go
index 5decb823086..c2e155de282 100644
--- a/core/sessions/orm_test.go
+++ b/core/sessions/localauth/orm_test.go
@@ -1,4 +1,4 @@
-package sessions_test
+package localauth_test
import (
"encoding/json"
@@ -7,10 +7,11 @@ import (
"github.com/go-webauthn/webauthn/protocol"
"github.com/go-webauthn/webauthn/webauthn"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
"github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
@@ -18,14 +19,15 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/logger/audit"
"github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
-func setupORM(t *testing.T) (*sqlx.DB, sessions.ORM) {
+func setupORM(t *testing.T) (*sqlx.DB, sessions.AuthenticationProvider) {
t.Helper()
db := pgtest.NewSqlxDB(t)
- orm := sessions.NewORM(db, time.Minute, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{})
+ orm := localauth.NewORM(db, time.Minute, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{})
return db, orm
}
@@ -66,7 +68,7 @@ func TestORM_AuthorizedUserWithSession(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
db := pgtest.NewSqlxDB(t)
- orm := sessions.NewORM(db, test.sessionDuration, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{})
+ orm := localauth.NewORM(db, test.sessionDuration, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{})
user := cltest.MustRandomUser(t)
require.NoError(t, orm.CreateUser(&user))
diff --git a/core/sessions/reaper.go b/core/sessions/localauth/reaper.go
similarity index 98%
rename from core/sessions/reaper.go
rename to core/sessions/localauth/reaper.go
index c4f0ed6796c..77d1b1abef2 100644
--- a/core/sessions/reaper.go
+++ b/core/sessions/localauth/reaper.go
@@ -1,4 +1,4 @@
-package sessions
+package localauth
import (
"database/sql"
diff --git a/core/sessions/reaper_test.go b/core/sessions/localauth/reaper_test.go
similarity index 69%
rename from core/sessions/reaper_test.go
rename to core/sessions/localauth/reaper_test.go
index a96c3822ef5..43a263d0321 100644
--- a/core/sessions/reaper_test.go
+++ b/core/sessions/localauth/reaper_test.go
@@ -1,4 +1,4 @@
-package sessions_test
+package localauth_test
import (
"testing"
@@ -9,8 +9,10 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/logger/audit"
"github.com/smartcontractkit/chainlink/v2/core/sessions"
+ "github.com/smartcontractkit/chainlink/v2/core/sessions/localauth"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
+ "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -31,10 +33,9 @@ func TestSessionReaper_ReapSessions(t *testing.T) {
db := pgtest.NewSqlxDB(t)
config := sessionReaperConfig{}
lggr := logger.TestLogger(t)
- orm := sessions.NewORM(db, config.SessionTimeout().Duration(), lggr, pgtest.NewQConfig(true), audit.NoopLogger)
-
- r := sessions.NewSessionReaper(db.DB, config, lggr)
+ orm := localauth.NewORM(db, config.SessionTimeout().Duration(), lggr, pgtest.NewQConfig(true), audit.NoopLogger)
+ r := localauth.NewSessionReaper(db.DB, config, lggr)
t.Cleanup(func() {
assert.NoError(t, r.Stop())
})
@@ -53,31 +54,28 @@ func TestSessionReaper_ReapSessions(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- user := cltest.MustRandomUser(t)
- require.NoError(t, orm.CreateUser(&user))
-
- session := sessions.NewSession()
- session.Email = user.Email
-
- _, err := db.Exec("INSERT INTO sessions (last_used, email, id, created_at) VALUES ($1, $2, $3, now())", test.lastUsed, user.Email, test.name)
- require.NoError(t, err)
-
t.Cleanup(func() {
- _, err2 := db.Exec("DELETE FROM sessions where email = $1", user.Email)
+ _, err2 := db.Exec("DELETE FROM sessions where email = $1", cltest.APIEmailAdmin)
require.NoError(t, err2)
})
+ _, err := db.Exec("INSERT INTO sessions (last_used, email, id, created_at) VALUES ($1, $2, $3, now())", test.lastUsed, cltest.APIEmailAdmin, test.name)
+ require.NoError(t, err)
+
r.WakeUp()
- <-r.(interface {
- WorkDone() <-chan struct{}
- }).WorkDone()
- sessions, err := orm.Sessions(0, 10)
- assert.NoError(t, err)
if test.wantReap {
- assert.Len(t, sessions, 0)
+ gomega.NewWithT(t).Eventually(func() []sessions.Session {
+ sessions, err := orm.Sessions(0, 10)
+ assert.NoError(t, err)
+ return sessions
+ }).Should(gomega.HaveLen(0))
} else {
- assert.Len(t, sessions, 1)
+ gomega.NewWithT(t).Consistently(func() []sessions.Session {
+ sessions, err := orm.Sessions(0, 10)
+ assert.NoError(t, err)
+ return sessions
+ }).Should(gomega.HaveLen(1))
}
})
}
diff --git a/core/sessions/mocks/orm.go b/core/sessions/mocks/authentication_provider.go
similarity index 75%
rename from core/sessions/mocks/orm.go
rename to core/sessions/mocks/authentication_provider.go
index 5699b9f8892..d6e33d11e45 100644
--- a/core/sessions/mocks/orm.go
+++ b/core/sessions/mocks/authentication_provider.go
@@ -11,13 +11,13 @@ import (
sessions "github.com/smartcontractkit/chainlink/v2/core/sessions"
)
-// ORM is an autogenerated mock type for the ORM type
-type ORM struct {
+// AuthenticationProvider is an autogenerated mock type for the AuthenticationProvider type
+type AuthenticationProvider struct {
mock.Mock
}
// AuthorizedUserWithSession provides a mock function with given fields: sessionID
-func (_m *ORM) AuthorizedUserWithSession(sessionID string) (sessions.User, error) {
+func (_m *AuthenticationProvider) AuthorizedUserWithSession(sessionID string) (sessions.User, error) {
ret := _m.Called(sessionID)
var r0 sessions.User
@@ -41,7 +41,7 @@ func (_m *ORM) AuthorizedUserWithSession(sessionID string) (sessions.User, error
}
// ClearNonCurrentSessions provides a mock function with given fields: sessionID
-func (_m *ORM) ClearNonCurrentSessions(sessionID string) error {
+func (_m *AuthenticationProvider) ClearNonCurrentSessions(sessionID string) error {
ret := _m.Called(sessionID)
var r0 error
@@ -55,7 +55,7 @@ func (_m *ORM) ClearNonCurrentSessions(sessionID string) error {
}
// CreateAndSetAuthToken provides a mock function with given fields: user
-func (_m *ORM) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) {
+func (_m *AuthenticationProvider) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) {
ret := _m.Called(user)
var r0 *auth.Token
@@ -81,7 +81,7 @@ func (_m *ORM) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) {
}
// CreateSession provides a mock function with given fields: sr
-func (_m *ORM) CreateSession(sr sessions.SessionRequest) (string, error) {
+func (_m *AuthenticationProvider) CreateSession(sr sessions.SessionRequest) (string, error) {
ret := _m.Called(sr)
var r0 string
@@ -105,7 +105,7 @@ func (_m *ORM) CreateSession(sr sessions.SessionRequest) (string, error) {
}
// CreateUser provides a mock function with given fields: user
-func (_m *ORM) CreateUser(user *sessions.User) error {
+func (_m *AuthenticationProvider) CreateUser(user *sessions.User) error {
ret := _m.Called(user)
var r0 error
@@ -119,7 +119,7 @@ func (_m *ORM) CreateUser(user *sessions.User) error {
}
// DeleteAuthToken provides a mock function with given fields: user
-func (_m *ORM) DeleteAuthToken(user *sessions.User) error {
+func (_m *AuthenticationProvider) DeleteAuthToken(user *sessions.User) error {
ret := _m.Called(user)
var r0 error
@@ -133,7 +133,7 @@ func (_m *ORM) DeleteAuthToken(user *sessions.User) error {
}
// DeleteUser provides a mock function with given fields: email
-func (_m *ORM) DeleteUser(email string) error {
+func (_m *AuthenticationProvider) DeleteUser(email string) error {
ret := _m.Called(email)
var r0 error
@@ -147,7 +147,7 @@ func (_m *ORM) DeleteUser(email string) error {
}
// DeleteUserSession provides a mock function with given fields: sessionID
-func (_m *ORM) DeleteUserSession(sessionID string) error {
+func (_m *AuthenticationProvider) DeleteUserSession(sessionID string) error {
ret := _m.Called(sessionID)
var r0 error
@@ -161,7 +161,7 @@ func (_m *ORM) DeleteUserSession(sessionID string) error {
}
// FindExternalInitiator provides a mock function with given fields: eia
-func (_m *ORM) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) {
+func (_m *AuthenticationProvider) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) {
ret := _m.Called(eia)
var r0 *bridges.ExternalInitiator
@@ -187,7 +187,7 @@ func (_m *ORM) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiato
}
// FindUser provides a mock function with given fields: email
-func (_m *ORM) FindUser(email string) (sessions.User, error) {
+func (_m *AuthenticationProvider) FindUser(email string) (sessions.User, error) {
ret := _m.Called(email)
var r0 sessions.User
@@ -211,7 +211,7 @@ func (_m *ORM) FindUser(email string) (sessions.User, error) {
}
// FindUserByAPIToken provides a mock function with given fields: apiToken
-func (_m *ORM) FindUserByAPIToken(apiToken string) (sessions.User, error) {
+func (_m *AuthenticationProvider) FindUserByAPIToken(apiToken string) (sessions.User, error) {
ret := _m.Called(apiToken)
var r0 sessions.User
@@ -235,7 +235,7 @@ func (_m *ORM) FindUserByAPIToken(apiToken string) (sessions.User, error) {
}
// GetUserWebAuthn provides a mock function with given fields: email
-func (_m *ORM) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) {
+func (_m *AuthenticationProvider) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) {
ret := _m.Called(email)
var r0 []sessions.WebAuthn
@@ -261,7 +261,7 @@ func (_m *ORM) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) {
}
// ListUsers provides a mock function with given fields:
-func (_m *ORM) ListUsers() ([]sessions.User, error) {
+func (_m *AuthenticationProvider) ListUsers() ([]sessions.User, error) {
ret := _m.Called()
var r0 []sessions.User
@@ -287,7 +287,7 @@ func (_m *ORM) ListUsers() ([]sessions.User, error) {
}
// SaveWebAuthn provides a mock function with given fields: token
-func (_m *ORM) SaveWebAuthn(token *sessions.WebAuthn) error {
+func (_m *AuthenticationProvider) SaveWebAuthn(token *sessions.WebAuthn) error {
ret := _m.Called(token)
var r0 error
@@ -301,7 +301,7 @@ func (_m *ORM) SaveWebAuthn(token *sessions.WebAuthn) error {
}
// Sessions provides a mock function with given fields: offset, limit
-func (_m *ORM) Sessions(offset int, limit int) ([]sessions.Session, error) {
+func (_m *AuthenticationProvider) Sessions(offset int, limit int) ([]sessions.Session, error) {
ret := _m.Called(offset, limit)
var r0 []sessions.Session
@@ -327,7 +327,7 @@ func (_m *ORM) Sessions(offset int, limit int) ([]sessions.Session, error) {
}
// SetAuthToken provides a mock function with given fields: user, token
-func (_m *ORM) SetAuthToken(user *sessions.User, token *auth.Token) error {
+func (_m *AuthenticationProvider) SetAuthToken(user *sessions.User, token *auth.Token) error {
ret := _m.Called(user, token)
var r0 error
@@ -341,7 +341,7 @@ func (_m *ORM) SetAuthToken(user *sessions.User, token *auth.Token) error {
}
// SetPassword provides a mock function with given fields: user, newPassword
-func (_m *ORM) SetPassword(user *sessions.User, newPassword string) error {
+func (_m *AuthenticationProvider) SetPassword(user *sessions.User, newPassword string) error {
ret := _m.Called(user, newPassword)
var r0 error
@@ -354,8 +354,22 @@ func (_m *ORM) SetPassword(user *sessions.User, newPassword string) error {
return r0
}
+// TestPassword provides a mock function with given fields: email, password
+func (_m *AuthenticationProvider) TestPassword(email string, password string) error {
+ ret := _m.Called(email, password)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(email, password)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// UpdateRole provides a mock function with given fields: email, newRole
-func (_m *ORM) UpdateRole(email string, newRole string) (sessions.User, error) {
+func (_m *AuthenticationProvider) UpdateRole(email string, newRole string) (sessions.User, error) {
ret := _m.Called(email, newRole)
var r0 sessions.User
@@ -378,13 +392,13 @@ func (_m *ORM) UpdateRole(email string, newRole string) (sessions.User, error) {
return r0, r1
}
-// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// NewAuthenticationProvider creates a new instance of AuthenticationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
-func NewORM(t interface {
+func NewAuthenticationProvider(t interface {
mock.TestingT
Cleanup(func())
-}) *ORM {
- mock := &ORM{}
+}) *AuthenticationProvider {
+ mock := &AuthenticationProvider{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
diff --git a/core/sessions/mocks/basic_admin_users_orm.go b/core/sessions/mocks/basic_admin_users_orm.go
new file mode 100644
index 00000000000..845e2d8880e
--- /dev/null
+++ b/core/sessions/mocks/basic_admin_users_orm.go
@@ -0,0 +1,91 @@
+// Code generated by mockery v2.35.4. DO NOT EDIT.
+
+package mocks
+
+import (
+ sessions "github.com/smartcontractkit/chainlink/v2/core/sessions"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// BasicAdminUsersORM is an autogenerated mock type for the BasicAdminUsersORM type
+type BasicAdminUsersORM struct {
+ mock.Mock
+}
+
+// CreateUser provides a mock function with given fields: user
+func (_m *BasicAdminUsersORM) CreateUser(user *sessions.User) error {
+ ret := _m.Called(user)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(*sessions.User) error); ok {
+ r0 = rf(user)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// FindUser provides a mock function with given fields: email
+func (_m *BasicAdminUsersORM) FindUser(email string) (sessions.User, error) {
+ ret := _m.Called(email)
+
+ var r0 sessions.User
+ var r1 error
+ if rf, ok := ret.Get(0).(func(string) (sessions.User, error)); ok {
+ return rf(email)
+ }
+ if rf, ok := ret.Get(0).(func(string) sessions.User); ok {
+ r0 = rf(email)
+ } else {
+ r0 = ret.Get(0).(sessions.User)
+ }
+
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(email)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ListUsers provides a mock function with given fields:
+func (_m *BasicAdminUsersORM) ListUsers() ([]sessions.User, error) {
+ ret := _m.Called()
+
+ var r0 []sessions.User
+ var r1 error
+ if rf, ok := ret.Get(0).(func() ([]sessions.User, error)); ok {
+ return rf()
+ }
+ if rf, ok := ret.Get(0).(func() []sessions.User); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]sessions.User)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// NewBasicAdminUsersORM creates a new instance of BasicAdminUsersORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewBasicAdminUsersORM(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *BasicAdminUsersORM {
+ mock := &BasicAdminUsersORM{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/core/sessions/session.go b/core/sessions/session.go
new file mode 100644
index 00000000000..90964596e9a
--- /dev/null
+++ b/core/sessions/session.go
@@ -0,0 +1,74 @@
+package sessions
+
+import (
+ "crypto/subtle"
+ "time"
+
+ "github.com/pkg/errors"
+ "gopkg.in/guregu/null.v4"
+
+ "github.com/smartcontractkit/chainlink/v2/core/auth"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+// SessionRequest encapsulates the fields needed to generate a new SessionID,
+// including the hashed password.
+type SessionRequest struct {
+ Email string `json:"email"`
+ Password string `json:"password"`
+ WebAuthnData string `json:"webauthndata"`
+ WebAuthnConfig WebAuthnConfiguration
+ SessionStore *WebAuthnSessionStore
+}
+
+// Session holds the unique id for the authenticated session.
+type Session struct {
+ ID string `json:"id"`
+ Email string `json:"email"`
+ LastUsed time.Time `json:"lastUsed"`
+ CreatedAt time.Time `json:"createdAt"`
+}
+
+// NewSession returns a session instance with ID set to a random ID and
+// LastUsed to now.
+func NewSession() Session {
+ return Session{
+ ID: utils.NewBytes32ID(),
+ LastUsed: time.Now(),
+ }
+}
+
+// Changeauth.TokenRequest is sent when updating a User's authentication token.
+type ChangeAuthTokenRequest struct {
+ Password string `json:"password"`
+}
+
+// GenerateAuthToken randomly generates and sets the users Authentication
+// Token.
+func (u *User) GenerateAuthToken() (*auth.Token, error) {
+ token := auth.NewToken()
+ return token, u.SetAuthToken(token)
+}
+
+// SetAuthToken updates the user to use the given Authentication Token.
+func (u *User) SetAuthToken(token *auth.Token) error {
+ salt := utils.NewSecret(utils.DefaultSecretSize)
+ hashedSecret, err := auth.HashedSecret(token, salt)
+ if err != nil {
+ return errors.Wrap(err, "user")
+ }
+ u.TokenSalt = null.StringFrom(salt)
+ u.TokenKey = null.StringFrom(token.AccessKey)
+ u.TokenHashedSecret = null.StringFrom(hashedSecret)
+ return nil
+}
+
+// AuthenticateUserByToken returns true on successful authentication of the
+// user against the given Authentication Token.
+func AuthenticateUserByToken(token *auth.Token, user *User) (bool, error) {
+ hashedSecret, err := auth.HashedSecret(token, user.TokenSalt.ValueOrZero())
+ if err != nil {
+ return false, err
+ }
+ return subtle.ConstantTimeCompare([]byte(hashedSecret), []byte(user.TokenHashedSecret.ValueOrZero())) == 1, nil
+}
diff --git a/core/sessions/user.go b/core/sessions/user.go
index a1208744323..f2e4827b922 100644
--- a/core/sessions/user.go
+++ b/core/sessions/user.go
@@ -1,7 +1,6 @@
package sessions
import (
- "crypto/subtle"
"fmt"
"net/mail"
"time"
@@ -9,7 +8,6 @@ import (
"github.com/pkg/errors"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/chainlink/v2/core/auth"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -108,65 +106,3 @@ func GetUserRole(role string) (UserRole, error) {
)
return UserRole(""), errors.New(errStr)
}
-
-// SessionRequest encapsulates the fields needed to generate a new SessionID,
-// including the hashed password.
-type SessionRequest struct {
- Email string `json:"email"`
- Password string `json:"password"`
- WebAuthnData string `json:"webauthndata"`
- WebAuthnConfig WebAuthnConfiguration
- SessionStore *WebAuthnSessionStore
-}
-
-// Session holds the unique id for the authenticated session.
-type Session struct {
- ID string `json:"id"`
- Email string `json:"email"`
- LastUsed time.Time `json:"lastUsed"`
- CreatedAt time.Time `json:"createdAt"`
-}
-
-// NewSession returns a session instance with ID set to a random ID and
-// LastUsed to now.
-func NewSession() Session {
- return Session{
- ID: utils.NewBytes32ID(),
- LastUsed: time.Now(),
- }
-}
-
-// Changeauth.TokenRequest is sent when updating a User's authentication token.
-type ChangeAuthTokenRequest struct {
- Password string `json:"password"`
-}
-
-// GenerateAuthToken randomly generates and sets the users Authentication
-// Token.
-func (u *User) GenerateAuthToken() (*auth.Token, error) {
- token := auth.NewToken()
- return token, u.SetAuthToken(token)
-}
-
-// SetAuthToken updates the user to use the given Authentication Token.
-func (u *User) SetAuthToken(token *auth.Token) error {
- salt := utils.NewSecret(utils.DefaultSecretSize)
- hashedSecret, err := auth.HashedSecret(token, salt)
- if err != nil {
- return errors.Wrap(err, "user")
- }
- u.TokenSalt = null.StringFrom(salt)
- u.TokenKey = null.StringFrom(token.AccessKey)
- u.TokenHashedSecret = null.StringFrom(hashedSecret)
- return nil
-}
-
-// AuthenticateUserByToken returns true on successful authentication of the
-// user against the given Authentication Token.
-func AuthenticateUserByToken(token *auth.Token, user *User) (bool, error) {
- hashedSecret, err := auth.HashedSecret(token, user.TokenSalt.ValueOrZero())
- if err != nil {
- return false, err
- }
- return subtle.ConstantTimeCompare([]byte(hashedSecret), []byte(user.TokenHashedSecret.ValueOrZero())) == 1, nil
-}
diff --git a/core/sessions/webauthn.go b/core/sessions/webauthn.go
index 0dd8242dc8a..89e7758bc5b 100644
--- a/core/sessions/webauthn.go
+++ b/core/sessions/webauthn.go
@@ -9,8 +9,8 @@ import (
"github.com/go-webauthn/webauthn/protocol"
"github.com/go-webauthn/webauthn/webauthn"
+ sqlxTypes "github.com/jmoiron/sqlx/types"
"github.com/pkg/errors"
- sqlxTypes "github.com/smartcontractkit/sqlx/types"
)
// WebAuthn holds the credentials for API user.
@@ -279,7 +279,7 @@ func (store *WebAuthnSessionStore) GetWebauthnSession(key string) (data webauthn
return
}
-func AddCredentialToUser(o ORM, email string, credential *webauthn.Credential) error {
+func AddCredentialToUser(ap AuthenticationProvider, email string, credential *webauthn.Credential) error {
credj, err := json.Marshal(credential)
if err != nil {
return err
@@ -289,5 +289,5 @@ func AddCredentialToUser(o ORM, email string, credential *webauthn.Credential) e
Email: email,
PublicKeyData: sqlxTypes.JSONText(credj),
}
- return o.SaveWebAuthn(&token)
+ return ap.SaveWebAuthn(&token)
}
diff --git a/core/sessions/webauthn_test.go b/core/sessions/webauthn_test.go
index b3c1ecf7897..9c055d9c794 100644
--- a/core/sessions/webauthn_test.go
+++ b/core/sessions/webauthn_test.go
@@ -7,7 +7,7 @@ import (
"github.com/go-webauthn/webauthn/protocol"
"github.com/go-webauthn/webauthn/webauthn"
- sqlxTypes "github.com/smartcontractkit/sqlx/types"
+ sqlxTypes "github.com/jmoiron/sqlx/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
diff --git a/core/store/migrate/migrate.go b/core/store/migrate/migrate.go
index 69cf9a78247..1e58d7a0b05 100644
--- a/core/store/migrate/migrate.go
+++ b/core/store/migrate/migrate.go
@@ -9,9 +9,9 @@ import (
"strconv"
"strings"
+ "github.com/jmoiron/sqlx"
"github.com/pkg/errors"
"github.com/pressly/goose/v3"
- "github.com/smartcontractkit/sqlx"
"gopkg.in/guregu/null.v4"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go
index fe218589d2d..ef105c75ff6 100644
--- a/core/store/migrate/migrate_test.go
+++ b/core/store/migrate/migrate_test.go
@@ -73,7 +73,7 @@ func getOCR2Spec100() OffchainReporting2OracleSpec100 {
}
func TestMigrate_0100_BootstrapConfigs(t *testing.T) {
- cfg, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil)
+ cfg, db := heavyweight.FullTestDBEmptyV2(t, nil)
lggr := logger.TestLogger(t)
err := goose.UpTo(db.DB, migrationDir, 99)
require.NoError(t, err)
@@ -342,7 +342,7 @@ ON jobs.offchainreporting2_oracle_spec_id = ocr2.id`
}
func TestMigrate_101_GenericOCR2(t *testing.T) {
- _, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil)
+ _, db := heavyweight.FullTestDBEmptyV2(t, nil)
err := goose.UpTo(db.DB, migrationDir, 100)
require.NoError(t, err)
@@ -392,7 +392,7 @@ func TestMigrate_101_GenericOCR2(t *testing.T) {
func TestMigrate(t *testing.T) {
lggr := logger.TestLogger(t)
- _, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil)
+ _, db := heavyweight.FullTestDBEmptyV2(t, nil)
err := goose.UpTo(db.DB, migrationDir, 100)
require.NoError(t, err)
@@ -443,7 +443,7 @@ func TestSetMigrationENVVars(t *testing.T) {
}
func TestDatabaseBackFillWithMigration202(t *testing.T) {
- _, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil)
+ _, db := heavyweight.FullTestDBEmptyV2(t, nil)
err := goose.UpTo(db.DB, migrationDir, 201)
require.NoError(t, err)
@@ -523,7 +523,7 @@ func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) {
maxLogsSize := 100_000
// Disable Goose logging for benchmarking
goose.SetLogger(goose.NopLogger())
- _, db := heavyweight.FullTestDBEmptyV2(b, migrationDir, nil)
+ _, db := heavyweight.FullTestDBEmptyV2(b, nil)
err := goose.UpTo(db.DB, migrationDir, previousMigration)
require.NoError(b, err)
diff --git a/core/store/migrate/migrations/0036_external_job_id.go b/core/store/migrate/migrations/0036_external_job_id.go
index 82f26206d88..fc9ec08ec60 100644
--- a/core/store/migrate/migrations/0036_external_job_id.go
+++ b/core/store/migrate/migrations/0036_external_job_id.go
@@ -6,8 +6,8 @@ import (
"fmt"
"github.com/google/uuid"
+ "github.com/jmoiron/sqlx"
"github.com/pressly/goose/v3"
- "github.com/smartcontractkit/sqlx"
)
func init() {
@@ -45,7 +45,7 @@ func Up36(ctx context.Context, tx *sql.Tx) error {
// Update all jobs to have an external_job_id.
// We do this to avoid using the uuid postgres extension.
var jobIDs []int32
- txx := sqlx.NewTx(tx, "postgres")
+ txx := sqlx.Tx{Tx: tx}
if err := txx.SelectContext(ctx, &jobIDs, "SELECT id FROM jobs"); err != nil {
return err
}
diff --git a/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql b/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql
new file mode 100644
index 00000000000..94b2e4aa8a6
--- /dev/null
+++ b/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql
@@ -0,0 +1,18 @@
+-- +goose Up
+DROP TRIGGER IF EXISTS notify_tx_insertion on evm.txes;
+DROP FUNCTION IF EXISTS evm.notifyethtxinsertion();
+
+
+-- +goose Down
+-- +goose StatementBegin
+CREATE OR REPLACE FUNCTION evm.notifytxinsertion() RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ PERFORM pg_notify('evm.insert_on_txes'::text, encode(NEW.from_address, 'hex'));
+ RETURN NULL;
+ END
+ $$;
+
+CREATE TRIGGER notify_tx_insertion AFTER INSERT ON evm.txes FOR EACH ROW EXECUTE PROCEDURE evm.notifytxinsertion();
+-- +goose StatementEnd
\ No newline at end of file
diff --git a/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql b/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql
new file mode 100644
index 00000000000..f4ae4b98e2c
--- /dev/null
+++ b/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql
@@ -0,0 +1,20 @@
+-- +goose Up
+
+-- +goose StatementBegin
+DROP TRIGGER IF EXISTS insert_on_terra_msg ON PUBLIC.cosmos_msgs;
+DROP FUNCTION IF EXISTS PUBLIC.notify_terra_msg_insert;
+-- +goose StatementEnd
+
+-- +goose Down
+
+-- +goose StatementBegin
+CREATE FUNCTION notify_terra_msg_insert() RETURNS trigger
+ LANGUAGE plpgsql
+AS $$
+BEGIN
+ PERFORM pg_notify('insert_on_terra_msg'::text, NOW()::text);
+ RETURN NULL;
+END
+$$;
+CREATE TRIGGER notify_terra_msg_insertion AFTER INSERT ON cosmos_msgs FOR EACH STATEMENT EXECUTE PROCEDURE notify_terra_msg_insert();
+-- +goose StatementEnd
diff --git a/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql b/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql
new file mode 100644
index 00000000000..f788cdab076
--- /dev/null
+++ b/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql
@@ -0,0 +1,22 @@
+-- +goose Up
+CREATE TABLE IF NOT EXISTS ldap_sessions (
+ id text PRIMARY KEY,
+ user_email text NOT NULL,
+ user_role user_roles,
+ localauth_user BOOLEAN,
+ created_at timestamp with time zone NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS ldap_user_api_tokens (
+ user_email text PRIMARY KEY,
+ user_role user_roles,
+ localauth_user BOOLEAN,
+ token_key text UNIQUE NOT NULL,
+ token_salt text NOT NULL,
+ token_hashed_secret text NOT NULL,
+ created_at timestamp with time zone NOT NULL
+);
+
+-- +goose Down
+DROP TABLE ldap_sessions;
+DROP TABLE ldap_user_api_tokens;
diff --git a/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql b/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql
new file mode 100644
index 00000000000..dbe7e91b9f6
--- /dev/null
+++ b/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql
@@ -0,0 +1,15 @@
+-- +goose Up
+ALTER TABLE evm.txes ADD COLUMN "signal_callback" BOOL DEFAULT FALSE;
+ALTER TABLE evm.txes ADD COLUMN "callback_completed" BOOL DEFAULT FALSE;
+
+UPDATE evm.txes
+SET signal_callback = TRUE AND callback_completed = FALSE
+WHERE evm.txes.pipeline_task_run_id IN (
+ SELECT pipeline_task_runs.id FROM pipeline_task_runs
+ INNER JOIN pipeline_runs ON pipeline_runs.id = pipeline_task_runs.pipeline_run_id
+ WHERE pipeline_runs.state = 'suspended'
+);
+
+-- +goose Down
+ALTER TABLE evm.txes DROP COLUMN "signal_callback";
+ALTER TABLE evm.txes DROP COLUMN "callback_completed";
diff --git a/core/web/assets/index.html b/core/web/assets/index.html
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/core/web/auth/auth.go b/core/web/auth/auth.go
index a0a9df58c79..c2458f52627 100644
--- a/core/web/auth/auth.go
+++ b/core/web/auth/auth.go
@@ -78,6 +78,9 @@ func AuthenticateByToken(c *gin.Context, authr Authenticator) error {
AccessKey: c.GetHeader(APIKey),
Secret: c.GetHeader(APISecret),
}
+ if token.AccessKey == "" {
+ return auth.ErrorAuthFailed
+ }
if token.AccessKey == "" {
return auth.ErrorAuthFailed
@@ -86,7 +89,7 @@ func AuthenticateByToken(c *gin.Context, authr Authenticator) error {
// We need to first load the user row so we can compare tokens using the stored salt
user, err := authr.FindUserByAPIToken(token.AccessKey)
if err != nil {
- if errors.Is(err, sql.ErrNoRows) {
+ if errors.Is(err, sql.ErrNoRows) || errors.Is(err, clsessions.ErrUserSessionExpired) {
return auth.ErrorAuthFailed
}
return err
diff --git a/core/web/auth/auth_test.go b/core/web/auth/auth_test.go
index 896542915ae..f0b4e5068fb 100644
--- a/core/web/auth/auth_test.go
+++ b/core/web/auth/auth_test.go
@@ -33,7 +33,7 @@ func authSuccess(*gin.Context, webauth.Authenticator) error {
}
type userFindFailer struct {
- sessions.ORM
+ sessions.AuthenticationProvider
err error
}
@@ -46,7 +46,7 @@ func (u userFindFailer) FindUserByAPIToken(token string) (sessions.User, error)
}
type userFindSuccesser struct {
- sessions.ORM
+ sessions.AuthenticationProvider
user sessions.User
}
diff --git a/core/web/auth/gql_test.go b/core/web/auth/gql_test.go
index 4688f62a336..4f3f8e27baf 100644
--- a/core/web/auth/gql_test.go
+++ b/core/web/auth/gql_test.go
@@ -21,7 +21,7 @@ import (
func Test_AuthenticateGQL_Unauthenticated(t *testing.T) {
t.Parallel()
- sessionORM := mocks.NewORM(t)
+ sessionORM := mocks.NewAuthenticationProvider(t)
sessionStore := cookie.NewStore([]byte("secret"))
r := gin.Default()
@@ -44,7 +44,7 @@ func Test_AuthenticateGQL_Unauthenticated(t *testing.T) {
func Test_AuthenticateGQL_Authenticated(t *testing.T) {
t.Parallel()
- sessionORM := mocks.NewORM(t)
+ sessionORM := mocks.NewAuthenticationProvider(t)
sessionStore := cookie.NewStore([]byte(cltest.SessionSecret))
sessionID := "sessionID"
diff --git a/core/web/jobs_controller_test.go b/core/web/jobs_controller_test.go
index fc2e8d7a30e..2e3f3a83693 100644
--- a/core/web/jobs_controller_test.go
+++ b/core/web/jobs_controller_test.go
@@ -7,6 +7,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "math/big"
"net/http"
"net/url"
"strconv"
@@ -19,10 +20,12 @@ import (
p2ppeer "github.com/libp2p/go-libp2p-core/peer"
"github.com/pelletier/go-toml"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/sqlx"
+ "github.com/jmoiron/sqlx"
+ evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest"
@@ -658,7 +661,8 @@ func setupJobsControllerTests(t *testing.T) (ta *cltest.TestApplication, cc clte
c.P2P.V1.Enabled = ptr(true)
c.P2P.PeerID = &cltest.DefaultP2PPeerID
})
- app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey)
+ ec := setupEthClientForControllerTests(t)
+ app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey, ec)
require.NoError(t, app.Start(testutils.Context(t)))
client := app.NewHTTPClient(nil)
@@ -668,6 +672,14 @@ func setupJobsControllerTests(t *testing.T) (ta *cltest.TestApplication, cc clte
return app, client
}
+func setupEthClientForControllerTests(t *testing.T) *evmclimocks.Client {
+ ec := cltest.NewEthMocksWithStartupAssertions(t)
+ ec.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe()
+ ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(100), nil).Maybe()
+ ec.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Once().Return(big.NewInt(0), nil).Maybe()
+ return ec
+}
+
func setupJobSpecsControllerTestsWithJobs(t *testing.T) (*cltest.TestApplication, cltest.HTTPClientCleaner, job.Job, int32, job.Job, int32) {
cfg := configtest.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) {
c.OCR.Enabled = ptr(true)
diff --git a/core/web/loader/loader_test.go b/core/web/loader/loader_test.go
index 0dd45a1735d..984aa9f6189 100644
--- a/core/web/loader/loader_test.go
+++ b/core/web/loader/loader_test.go
@@ -26,7 +26,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/job"
jobORMMocks "github.com/smartcontractkit/chainlink/v2/core/services/job/mocks"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -69,9 +68,6 @@ func TestLoader_Nodes(t *testing.T) {
ctx := InjectDataloader(testutils.Context(t), app)
chainID1, chainID2, notAnID := big.NewInt(1), big.NewInt(2), big.NewInt(3)
- relayID1 := relay.ID{Network: relay.EVM, ChainID: relay.ChainID(chainID1.String())}
- relayID2 := relay.ID{Network: relay.EVM, ChainID: relay.ChainID(chainID2.String())}
- notARelayID := relay.ID{Network: relay.EVM, ChainID: relay.ChainID(notAnID.String())}
genNodeStat := func(id string) relaytypes.NodeStatus {
return relaytypes.NodeStatus{
@@ -79,11 +75,9 @@ func TestLoader_Nodes(t *testing.T) {
ChainID: id,
}
}
- rcInterops := chainlinkmocks.NewRelayerChainInteroperators(t)
- rcInterops.On("NodeStatuses", mock.Anything, 0, -1,
- relayID2, relayID1, notARelayID).Return([]relaytypes.NodeStatus{
+ rcInterops := &chainlinkmocks.FakeRelayerChainInteroperators{Nodes: []relaytypes.NodeStatus{
genNodeStat(chainID2.String()), genNodeStat(chainID1.String()),
- }, 2, nil)
+ }}
app.On("GetRelayers").Return(rcInterops)
batcher := nodeBatcher{app}
diff --git a/core/web/presenters/job.go b/core/web/presenters/job.go
index 2aa97730881..06b9950755f 100644
--- a/core/web/presenters/job.go
+++ b/core/web/presenters/job.go
@@ -41,26 +41,24 @@ const (
// DirectRequestSpec defines the spec details of a DirectRequest Job
type DirectRequestSpec struct {
- ContractAddress ethkey.EIP55Address `json:"contractAddress"`
- MinIncomingConfirmations clnull.Uint32 `json:"minIncomingConfirmations"`
- MinIncomingConfirmationsEnv bool `json:"minIncomingConfirmationsEnv,omitempty"`
- MinContractPayment *assets.Link `json:"minContractPaymentLinkJuels"`
- Requesters models.AddressCollection `json:"requesters"`
- Initiator string `json:"initiator"`
- CreatedAt time.Time `json:"createdAt"`
- UpdatedAt time.Time `json:"updatedAt"`
- EVMChainID *utils.Big `json:"evmChainID"`
+ ContractAddress ethkey.EIP55Address `json:"contractAddress"`
+ MinIncomingConfirmations clnull.Uint32 `json:"minIncomingConfirmations"`
+ MinContractPayment *assets.Link `json:"minContractPaymentLinkJuels"`
+ Requesters models.AddressCollection `json:"requesters"`
+ Initiator string `json:"initiator"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+ EVMChainID *utils.Big `json:"evmChainID"`
}
// NewDirectRequestSpec initializes a new DirectRequestSpec from a
// job.DirectRequestSpec
func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec {
return &DirectRequestSpec{
- ContractAddress: spec.ContractAddress,
- MinIncomingConfirmations: spec.MinIncomingConfirmations,
- MinIncomingConfirmationsEnv: spec.MinIncomingConfirmationsEnv,
- MinContractPayment: spec.MinContractPayment,
- Requesters: spec.Requesters,
+ ContractAddress: spec.ContractAddress,
+ MinIncomingConfirmations: spec.MinIncomingConfirmations,
+ MinContractPayment: spec.MinContractPayment,
+ Requesters: spec.Requesters,
// This is hardcoded to runlog. When we support other initiators, we need
// to change this
Initiator: "runlog",
@@ -120,64 +118,48 @@ func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec {
// OffChainReportingSpec defines the spec details of a OffChainReporting Job
type OffChainReportingSpec struct {
- ContractAddress ethkey.EIP55Address `json:"contractAddress"`
- P2PBootstrapPeers pq.StringArray `json:"p2pBootstrapPeers"`
- P2PV2Bootstrappers pq.StringArray `json:"p2pv2Bootstrappers"`
- IsBootstrapPeer bool `json:"isBootstrapPeer"`
- EncryptedOCRKeyBundleID *models.Sha256Hash `json:"keyBundleID"`
- TransmitterAddress *ethkey.EIP55Address `json:"transmitterAddress"`
- ObservationTimeout models.Interval `json:"observationTimeout"`
- ObservationTimeoutEnv bool `json:"observationTimeoutEnv,omitempty"`
- BlockchainTimeout models.Interval `json:"blockchainTimeout"`
- BlockchainTimeoutEnv bool `json:"blockchainTimeoutEnv,omitempty"`
- ContractConfigTrackerSubscribeInterval models.Interval `json:"contractConfigTrackerSubscribeInterval"`
- ContractConfigTrackerSubscribeIntervalEnv bool `json:"contractConfigTrackerSubscribeIntervalEnv,omitempty"`
- ContractConfigTrackerPollInterval models.Interval `json:"contractConfigTrackerPollInterval"`
- ContractConfigTrackerPollIntervalEnv bool `json:"contractConfigTrackerPollIntervalEnv,omitempty"`
- ContractConfigConfirmations uint16 `json:"contractConfigConfirmations"`
- ContractConfigConfirmationsEnv bool `json:"contractConfigConfirmationsEnv,omitempty"`
- CreatedAt time.Time `json:"createdAt"`
- UpdatedAt time.Time `json:"updatedAt"`
- EVMChainID *utils.Big `json:"evmChainID"`
- DatabaseTimeout *models.Interval `json:"databaseTimeout"`
- DatabaseTimeoutEnv bool `json:"databaseTimeoutEnv,omitempty"`
- ObservationGracePeriod *models.Interval `json:"observationGracePeriod"`
- ObservationGracePeriodEnv bool `json:"observationGracePeriodEnv,omitempty"`
- ContractTransmitterTransmitTimeout *models.Interval `json:"contractTransmitterTransmitTimeout"`
- ContractTransmitterTransmitTimeoutEnv bool `json:"contractTransmitterTransmitTimeoutEnv,omitempty"`
- CollectTelemetry bool `json:"collectTelemetry,omitempty"`
+ ContractAddress ethkey.EIP55Address `json:"contractAddress"`
+ P2PBootstrapPeers pq.StringArray `json:"p2pBootstrapPeers"`
+ P2PV2Bootstrappers pq.StringArray `json:"p2pv2Bootstrappers"`
+ IsBootstrapPeer bool `json:"isBootstrapPeer"`
+ EncryptedOCRKeyBundleID *models.Sha256Hash `json:"keyBundleID"`
+ TransmitterAddress *ethkey.EIP55Address `json:"transmitterAddress"`
+ ObservationTimeout models.Interval `json:"observationTimeout"`
+ BlockchainTimeout models.Interval `json:"blockchainTimeout"`
+ ContractConfigTrackerSubscribeInterval models.Interval `json:"contractConfigTrackerSubscribeInterval"`
+ ContractConfigTrackerPollInterval models.Interval `json:"contractConfigTrackerPollInterval"`
+ ContractConfigConfirmations uint16 `json:"contractConfigConfirmations"`
+ CreatedAt time.Time `json:"createdAt"`
+ UpdatedAt time.Time `json:"updatedAt"`
+ EVMChainID *utils.Big `json:"evmChainID"`
+ DatabaseTimeout *models.Interval `json:"databaseTimeout"`
+ ObservationGracePeriod *models.Interval `json:"observationGracePeriod"`
+ ContractTransmitterTransmitTimeout *models.Interval `json:"contractTransmitterTransmitTimeout"`
+ CollectTelemetry bool `json:"collectTelemetry,omitempty"`
}
// NewOffChainReportingSpec initializes a new OffChainReportingSpec from a
// job.OCROracleSpec
func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec {
return &OffChainReportingSpec{
- ContractAddress: spec.ContractAddress,
- P2PBootstrapPeers: spec.P2PBootstrapPeers,
- P2PV2Bootstrappers: spec.P2PV2Bootstrappers,
- IsBootstrapPeer: spec.IsBootstrapPeer,
- EncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,
- TransmitterAddress: spec.TransmitterAddress,
- ObservationTimeout: spec.ObservationTimeout,
- ObservationTimeoutEnv: spec.ObservationTimeoutEnv,
- BlockchainTimeout: spec.BlockchainTimeout,
- BlockchainTimeoutEnv: spec.BlockchainTimeoutEnv,
- ContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,
- ContractConfigTrackerSubscribeIntervalEnv: spec.ContractConfigTrackerSubscribeIntervalEnv,
- ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,
- ContractConfigTrackerPollIntervalEnv: spec.ContractConfigTrackerPollIntervalEnv,
- ContractConfigConfirmations: spec.ContractConfigConfirmations,
- ContractConfigConfirmationsEnv: spec.ContractConfigConfirmationsEnv,
- CreatedAt: spec.CreatedAt,
- UpdatedAt: spec.UpdatedAt,
- EVMChainID: spec.EVMChainID,
- DatabaseTimeout: spec.DatabaseTimeout,
- DatabaseTimeoutEnv: spec.DatabaseTimeoutEnv,
- ObservationGracePeriod: spec.ObservationGracePeriod,
- ObservationGracePeriodEnv: spec.ObservationGracePeriodEnv,
- ContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout,
- ContractTransmitterTransmitTimeoutEnv: spec.ContractTransmitterTransmitTimeoutEnv,
- CollectTelemetry: spec.CaptureEATelemetry,
+ ContractAddress: spec.ContractAddress,
+ P2PBootstrapPeers: spec.P2PBootstrapPeers,
+ P2PV2Bootstrappers: spec.P2PV2Bootstrappers,
+ IsBootstrapPeer: spec.IsBootstrapPeer,
+ EncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID,
+ TransmitterAddress: spec.TransmitterAddress,
+ ObservationTimeout: spec.ObservationTimeout,
+ BlockchainTimeout: spec.BlockchainTimeout,
+ ContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval,
+ ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval,
+ ContractConfigConfirmations: spec.ContractConfigConfirmations,
+ CreatedAt: spec.CreatedAt,
+ UpdatedAt: spec.UpdatedAt,
+ EVMChainID: spec.EVMChainID,
+ DatabaseTimeout: spec.DatabaseTimeout,
+ ObservationGracePeriod: spec.ObservationGracePeriod,
+ ContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout,
+ CollectTelemetry: spec.CaptureEATelemetry,
}
}
diff --git a/core/web/resolver/api_token_test.go b/core/web/resolver/api_token_test.go
index b5ed52be3c5..fae0204caf5 100644
--- a/core/web/resolver/api_token_test.go
+++ b/core/web/resolver/api_token_test.go
@@ -39,6 +39,11 @@ func TestResolver_CreateAPIToken(t *testing.T) {
"password": defaultPassword,
},
}
+ variablesIncorrect := map[string]interface{}{
+ "input": map[string]interface{}{
+ "password": "wrong-password",
+ },
+ }
gError := errors.New("error")
testCases := []GQLTestCase{
@@ -56,12 +61,13 @@ func TestResolver_CreateAPIToken(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("CreateAndSetAuthToken", session.User).Return(&auth.Token{
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil)
+ f.Mocks.authProvider.On("CreateAndSetAuthToken", session.User).Return(&auth.Token{
Secret: "new-secret",
AccessKey: "new-access-key",
}, nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -83,13 +89,12 @@ func TestResolver_CreateAPIToken(t *testing.T) {
require.True(t, ok)
require.NotNil(t, session)
- session.User.HashedPassword = "wrong-password"
-
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, "wrong-password").Return(gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
- variables: variables,
+ variables: variablesIncorrect,
result: `
{
"createAPIToken": {
@@ -114,8 +119,8 @@ func TestResolver_CreateAPIToken(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, gError)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -142,9 +147,10 @@ func TestResolver_CreateAPIToken(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("CreateAndSetAuthToken", session.User).Return(nil, gError)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil)
+ f.Mocks.authProvider.On("CreateAndSetAuthToken", session.User).Return(nil, gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -189,6 +195,11 @@ func TestResolver_DeleteAPIToken(t *testing.T) {
"password": defaultPassword,
},
}
+ variablesIncorrect := map[string]interface{}{
+ "input": map[string]interface{}{
+ "password": "wrong-password",
+ },
+ }
gError := errors.New("error")
testCases := []GQLTestCase{
@@ -208,9 +219,10 @@ func TestResolver_DeleteAPIToken(t *testing.T) {
err = session.User.TokenKey.UnmarshalText([]byte("new-access-key"))
require.NoError(t, err)
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("DeleteAuthToken", session.User).Return(nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil)
+ f.Mocks.authProvider.On("DeleteAuthToken", session.User).Return(nil)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -231,13 +243,12 @@ func TestResolver_DeleteAPIToken(t *testing.T) {
require.True(t, ok)
require.NotNil(t, session)
- session.User.HashedPassword = "wrong-password"
-
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, "wrong-password").Return(gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
- variables: variables,
+ variables: variablesIncorrect,
result: `
{
"deleteAPIToken": {
@@ -262,8 +273,8 @@ func TestResolver_DeleteAPIToken(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, gError)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -290,9 +301,10 @@ func TestResolver_DeleteAPIToken(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("DeleteAuthToken", session.User).Return(gError)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil)
+ f.Mocks.authProvider.On("DeleteAuthToken", session.User).Return(gError)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
diff --git a/core/web/resolver/eth_key_test.go b/core/web/resolver/eth_key_test.go
index 6cac2f4ac4f..a7f8ce56d9f 100644
--- a/core/web/resolver/eth_key_test.go
+++ b/core/web/resolver/eth_key_test.go
@@ -100,7 +100,7 @@ func TestResolver_ETHKeys(t *testing.T) {
f.Mocks.balM.On("GetEthBalance", address).Return(assets.NewEth(1))
f.Mocks.chain.On("BalanceMonitor").Return(f.Mocks.balM)
f.Mocks.chain.On("Config").Return(f.Mocks.scfg)
- f.Mocks.relayerChainInterops.On("LegacyEVMChains").Return(legacyEVMChains)
+ f.Mocks.relayerChainInterops.EVMChains = legacyEVMChains
f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID})
f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs)
f.App.On("GetKeyStore").Return(f.Mocks.keystore)
@@ -149,7 +149,7 @@ func TestResolver_ETHKeys(t *testing.T) {
f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil)
f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil)
f.Mocks.ethKs.On("GetAll").Return(keys, nil)
- f.Mocks.relayerChainInterops.On("LegacyEVMChains").Return(f.Mocks.legacyEVMChains)
+ f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains
f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID})
f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs)
f.App.On("GetKeyStore").Return(f.Mocks.keystore)
@@ -268,7 +268,7 @@ func TestResolver_ETHKeys(t *testing.T) {
f.Mocks.ethKs.On("GetAll").Return(keys, nil)
f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs)
f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, gError)
- f.Mocks.relayerChainInterops.On("LegacyEVMChains").Return(f.Mocks.legacyEVMChains)
+ f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains
f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops)
f.App.On("GetKeyStore").Return(f.Mocks.keystore)
},
@@ -302,7 +302,7 @@ func TestResolver_ETHKeys(t *testing.T) {
f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs)
f.Mocks.ethClient.On("LINKBalance", mock.Anything, address, linkAddr).Return(assets.NewLinkFromJuels(12), gError)
f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, nil)
- f.Mocks.relayerChainInterops.On("LegacyEVMChains").Return(f.Mocks.legacyEVMChains)
+ f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains
f.Mocks.chain.On("Client").Return(f.Mocks.ethClient)
f.Mocks.balM.On("GetEthBalance", address).Return(assets.NewEth(1))
f.Mocks.chain.On("BalanceMonitor").Return(f.Mocks.balM)
@@ -358,7 +358,7 @@ func TestResolver_ETHKeys(t *testing.T) {
f.Mocks.chain.On("BalanceMonitor").Return(nil)
f.Mocks.chain.On("Config").Return(f.Mocks.scfg)
f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, nil)
- f.Mocks.relayerChainInterops.On("LegacyEVMChains").Return(f.Mocks.legacyEVMChains)
+ f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains
f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID})
f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs)
f.App.On("GetKeyStore").Return(f.Mocks.keystore)
diff --git a/core/web/resolver/mutation.go b/core/web/resolver/mutation.go
index 68cbb0b7896..f9eee0734a3 100644
--- a/core/web/resolver/mutation.go
+++ b/core/web/resolver/mutation.go
@@ -882,7 +882,7 @@ func (r *Resolver) UpdateUserPassword(ctx context.Context, args struct {
return nil, errors.New("couldn't retrieve user session")
}
- dbUser, err := r.App.SessionORM().FindUser(session.User.Email)
+ dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email)
if err != nil {
return nil, err
}
@@ -895,11 +895,11 @@ func (r *Resolver) UpdateUserPassword(ctx context.Context, args struct {
}), nil
}
- if err = r.App.SessionORM().ClearNonCurrentSessions(session.SessionID); err != nil {
+ if err = r.App.AuthenticationProvider().ClearNonCurrentSessions(session.SessionID); err != nil {
return nil, clearSessionsError{}
}
- err = r.App.SessionORM().SetPassword(&dbUser, args.Input.NewPassword)
+ err = r.App.AuthenticationProvider().SetPassword(&dbUser, args.Input.NewPassword)
if err != nil {
return nil, failedPasswordUpdateError{}
}
@@ -937,12 +937,13 @@ func (r *Resolver) CreateAPIToken(ctx context.Context, args struct {
if !ok {
return nil, errors.New("Failed to obtain current user from context")
}
- dbUser, err := r.App.SessionORM().FindUser(session.User.Email)
+ dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email)
if err != nil {
return nil, err
}
- if !utils.CheckPasswordHash(args.Input.Password, dbUser.HashedPassword) {
+ err = r.App.AuthenticationProvider().TestPassword(dbUser.Email, args.Input.Password)
+ if err != nil {
r.App.GetAuditLogger().Audit(audit.APITokenCreateAttemptPasswordMismatch, map[string]interface{}{"user": dbUser.Email})
return NewCreateAPITokenPayload(nil, map[string]string{
@@ -950,7 +951,7 @@ func (r *Resolver) CreateAPIToken(ctx context.Context, args struct {
}), nil
}
- newToken, err := r.App.SessionORM().CreateAndSetAuthToken(&dbUser)
+ newToken, err := r.App.AuthenticationProvider().CreateAndSetAuthToken(&dbUser)
if err != nil {
return nil, err
}
@@ -970,12 +971,13 @@ func (r *Resolver) DeleteAPIToken(ctx context.Context, args struct {
if !ok {
return nil, errors.New("Failed to obtain current user from context")
}
- dbUser, err := r.App.SessionORM().FindUser(session.User.Email)
+ dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email)
if err != nil {
return nil, err
}
- if !utils.CheckPasswordHash(args.Input.Password, dbUser.HashedPassword) {
+ err = r.App.AuthenticationProvider().TestPassword(dbUser.Email, args.Input.Password)
+ if err != nil {
r.App.GetAuditLogger().Audit(audit.APITokenDeleteAttemptPasswordMismatch, map[string]interface{}{"user": dbUser.Email})
return NewDeleteAPITokenPayload(nil, map[string]string{
@@ -983,7 +985,7 @@ func (r *Resolver) DeleteAPIToken(ctx context.Context, args struct {
}), nil
}
- err = r.App.SessionORM().DeleteAuthToken(&dbUser)
+ err = r.App.AuthenticationProvider().DeleteAuthToken(&dbUser)
if err != nil {
return nil, err
}
diff --git a/core/web/resolver/node_test.go b/core/web/resolver/node_test.go
index e949a67a85b..9f34b274201 100644
--- a/core/web/resolver/node_test.go
+++ b/core/web/resolver/node_test.go
@@ -5,10 +5,10 @@ import (
gqlerrors "github.com/graph-gophers/graphql-go/errors"
"github.com/pkg/errors"
- "github.com/stretchr/testify/mock"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
@@ -43,17 +43,16 @@ func TestResolver_Nodes(t *testing.T) {
name: "success",
authenticated: true,
before: func(f *gqlTestFramework) {
- f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops)
- f.Mocks.relayerChainInterops.On("NodeStatuses", mock.Anything, PageDefaultOffset, PageDefaultLimit).Return([]types.NodeStatus{
+ f.App.On("GetRelayers").Return(chainlink.RelayerChainInteroperators(f.Mocks.relayerChainInterops))
+ f.Mocks.relayerChainInterops.Nodes = []types.NodeStatus{
{
Name: "node-name",
ChainID: chainID.String(),
Config: `Name = 'node-name'`,
},
- }, 1, nil)
+ }
f.App.On("EVMORM").Return(f.Mocks.evmORM)
f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID})
-
},
query: query,
result: `
@@ -76,7 +75,7 @@ func TestResolver_Nodes(t *testing.T) {
name: "generic error",
authenticated: true,
before: func(f *gqlTestFramework) {
- f.Mocks.relayerChainInterops.On("NodeStatuses", mock.Anything, PageDefaultOffset, PageDefaultLimit).Return([]types.NodeStatus{}, 0, gError)
+ f.Mocks.relayerChainInterops.NodesErr = gError
f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops)
},
query: query,
diff --git a/core/web/resolver/resolver_test.go b/core/web/resolver/resolver_test.go
index d0523d6b968..85c495faaae 100644
--- a/core/web/resolver/resolver_test.go
+++ b/core/web/resolver/resolver_test.go
@@ -27,7 +27,7 @@ import (
pipelineMocks "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/mocks"
webhookmocks "github.com/smartcontractkit/chainlink/v2/core/services/webhook/mocks"
clsessions "github.com/smartcontractkit/chainlink/v2/core/sessions"
- sessionsMocks "github.com/smartcontractkit/chainlink/v2/core/sessions/mocks"
+ authProviderMocks "github.com/smartcontractkit/chainlink/v2/core/sessions/mocks"
"github.com/smartcontractkit/chainlink/v2/core/web/auth"
"github.com/smartcontractkit/chainlink/v2/core/web/loader"
"github.com/smartcontractkit/chainlink/v2/core/web/schema"
@@ -37,7 +37,7 @@ type mocks struct {
bridgeORM *bridgeORMMocks.ORM
evmORM *evmtest.TestConfigs
jobORM *jobORMMocks.ORM
- sessionsORM *sessionsMocks.ORM
+ authProvider *authProviderMocks.AuthenticationProvider
pipelineORM *pipelineMocks.ORM
feedsSvc *feedsMocks.Service
cfg *chainlinkMocks.GeneralConfig
@@ -52,7 +52,7 @@ type mocks struct {
solana *keystoreMocks.Solana
chain *evmORMMocks.Chain
legacyEVMChains *evmORMMocks.LegacyChainContainer
- relayerChainInterops *chainlinkMocks.RelayerChainInteroperators
+ relayerChainInterops *chainlinkMocks.FakeRelayerChainInteroperators
ethClient *evmClientMocks.Client
eIMgr *webhookmocks.ExternalInitiatorManager
balM *evmORMMocks.BalanceMonitor
@@ -97,7 +97,7 @@ func setupFramework(t *testing.T) *gqlTestFramework {
evmORM: evmtest.NewTestConfigs(),
jobORM: jobORMMocks.NewORM(t),
feedsSvc: feedsMocks.NewService(t),
- sessionsORM: sessionsMocks.NewORM(t),
+ authProvider: authProviderMocks.NewAuthenticationProvider(t),
pipelineORM: pipelineMocks.NewORM(t),
cfg: chainlinkMocks.NewGeneralConfig(t),
scfg: evmConfigMocks.NewChainScopedConfig(t),
@@ -111,7 +111,7 @@ func setupFramework(t *testing.T) *gqlTestFramework {
solana: keystoreMocks.NewSolana(t),
chain: evmORMMocks.NewChain(t),
legacyEVMChains: evmORMMocks.NewLegacyChainContainer(t),
- relayerChainInterops: chainlinkMocks.NewRelayerChainInteroperators(t),
+ relayerChainInterops: &chainlinkMocks.FakeRelayerChainInteroperators{},
ethClient: evmClientMocks.NewClient(t),
eIMgr: webhookmocks.NewExternalInitiatorManager(t),
balM: evmORMMocks.NewBalanceMonitor(t),
diff --git a/core/web/resolver/spec.go b/core/web/resolver/spec.go
index 48040d118a7..c9ee5199229 100644
--- a/core/web/resolver/spec.go
+++ b/core/web/resolver/spec.go
@@ -164,11 +164,6 @@ func (r *DirectRequestSpecResolver) MinIncomingConfirmations() int32 {
return 0
}
-// EVMChainID resolves the spec's evm chain id.
-func (r *DirectRequestSpecResolver) MinIncomingConfirmationsEnv() bool {
- return r.spec.MinIncomingConfirmationsEnv
-}
-
// MinContractPaymentLinkJuels resolves the spec's min contract payment link.
func (r *DirectRequestSpecResolver) MinContractPaymentLinkJuels() string {
return r.spec.MinContractPayment.String()
@@ -328,12 +323,6 @@ func (r *OCRSpecResolver) BlockchainTimeout() *string {
return &timeout
}
-// BlockchainTimeoutEnv resolves whether the spec's blockchain timeout comes
-// from an env var.
-func (r *OCRSpecResolver) BlockchainTimeoutEnv() bool {
- return r.spec.BlockchainTimeoutEnv
-}
-
// ContractAddress resolves the spec's contract address.
func (r *OCRSpecResolver) ContractAddress() string {
return r.spec.ContractAddress.String()
@@ -350,12 +339,6 @@ func (r *OCRSpecResolver) ContractConfigConfirmations() *int32 {
return &confirmations
}
-// ContractConfigConfirmationsEnv resolves whether spec's confirmations
-// config comes from an env var.
-func (r *OCRSpecResolver) ContractConfigConfirmationsEnv() bool {
- return r.spec.ContractConfigConfirmationsEnv
-}
-
// ContractConfigTrackerPollInterval resolves the spec's contract tracker poll
// interval config.
func (r *OCRSpecResolver) ContractConfigTrackerPollInterval() *string {
@@ -368,12 +351,6 @@ func (r *OCRSpecResolver) ContractConfigTrackerPollInterval() *string {
return &interval
}
-// ContractConfigTrackerPollIntervalEnv resolves the whether spec's tracker poll
-// config comes from an env var.
-func (r *OCRSpecResolver) ContractConfigTrackerPollIntervalEnv() bool {
- return r.spec.ContractConfigTrackerPollIntervalEnv
-}
-
// ContractConfigTrackerSubscribeInterval resolves the spec's tracker subscribe
// interval config.
func (r *OCRSpecResolver) ContractConfigTrackerSubscribeInterval() *string {
@@ -386,12 +363,6 @@ func (r *OCRSpecResolver) ContractConfigTrackerSubscribeInterval() *string {
return &interval
}
-// ContractConfigTrackerSubscribeIntervalEnv resolves whether spec's tracker
-// subscribe interval config comes from an env var.
-func (r *OCRSpecResolver) ContractConfigTrackerSubscribeIntervalEnv() bool {
- return r.spec.ContractConfigTrackerSubscribeIntervalEnv
-}
-
// CreatedAt resolves the spec's created at timestamp.
func (r *OCRSpecResolver) CreatedAt() graphql.Time {
return graphql.Time{Time: r.spec.CreatedAt}
@@ -413,34 +384,16 @@ func (r *OCRSpecResolver) DatabaseTimeout() string {
return r.spec.DatabaseTimeout.Duration().String()
}
-// DatabaseTimeoutEnv resolves the whether spec's database timeout
-// config comes from an env var.
-func (r *OCRSpecResolver) DatabaseTimeoutEnv() bool {
- return r.spec.DatabaseTimeoutEnv
-}
-
// ObservationGracePeriod resolves the spec's observation grace period.
func (r *OCRSpecResolver) ObservationGracePeriod() string {
return r.spec.ObservationGracePeriod.Duration().String()
}
-// ObservationGracePeriodEnv resolves the whether spec's observation grace period
-// config comes from an env var.
-func (r *OCRSpecResolver) ObservationGracePeriodEnv() bool {
- return r.spec.ObservationGracePeriodEnv
-}
-
// ContractTransmitterTransmitTimeout resolves the spec's contract transmitter transmit timeout.
func (r *OCRSpecResolver) ContractTransmitterTransmitTimeout() string {
return r.spec.ContractTransmitterTransmitTimeout.Duration().String()
}
-// ContractTransmitterTransmitTimeoutEnv resolves the whether spec's
-// contract transmitter transmit timeout config comes from an env var.
-func (r *OCRSpecResolver) ContractTransmitterTransmitTimeoutEnv() bool {
- return r.spec.ContractTransmitterTransmitTimeoutEnv
-}
-
// IsBootstrapPeer resolves whether spec is a bootstrap peer.
func (r *OCRSpecResolver) IsBootstrapPeer() bool {
return r.spec.IsBootstrapPeer
@@ -468,12 +421,6 @@ func (r *OCRSpecResolver) ObservationTimeout() *string {
return &timeout
}
-// ObservationTimeoutEnv resolves whether spec's observation timeout comes
-// from an env var.
-func (r *OCRSpecResolver) ObservationTimeoutEnv() bool {
- return r.spec.ObservationTimeoutEnv
-}
-
// P2PBootstrapPeers resolves the spec's p2p bootstrap peers
func (r *OCRSpecResolver) P2PBootstrapPeers() *[]string {
if len(r.spec.P2PBootstrapPeers) == 0 {
@@ -631,11 +578,6 @@ func (r *VRFSpecResolver) MinIncomingConfirmations() int32 {
return int32(r.spec.MinIncomingConfirmations)
}
-// MinIncomingConfirmations resolves the spec's min incoming confirmations.
-func (r *VRFSpecResolver) MinIncomingConfirmationsEnv() bool {
- return r.spec.ConfirmationsEnv
-}
-
// CoordinatorAddress resolves the spec's coordinator address.
func (r *VRFSpecResolver) CoordinatorAddress() string {
return r.spec.CoordinatorAddress.String()
diff --git a/core/web/resolver/spec_test.go b/core/web/resolver/spec_test.go
index 04bfffbe05e..8e4095e171e 100644
--- a/core/web/resolver/spec_test.go
+++ b/core/web/resolver/spec_test.go
@@ -10,6 +10,7 @@ import (
"gopkg.in/guregu/null.v4"
"github.com/smartcontractkit/chainlink-relay/pkg/types"
+
"github.com/smartcontractkit/chainlink/v2/core/assets"
clnull "github.com/smartcontractkit/chainlink/v2/core/null"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
@@ -91,13 +92,12 @@ func TestResolver_DirectRequestSpec(t *testing.T) {
f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{
Type: job.DirectRequest,
DirectRequestSpec: &job.DirectRequestSpec{
- ContractAddress: contractAddress,
- CreatedAt: f.Timestamp(),
- EVMChainID: utils.NewBigI(42),
- MinIncomingConfirmations: clnull.NewUint32(1, true),
- MinIncomingConfirmationsEnv: true,
- MinContractPayment: assets.NewLinkFromJuels(1000),
- Requesters: models.AddressCollection{requesterAddress},
+ ContractAddress: contractAddress,
+ CreatedAt: f.Timestamp(),
+ EVMChainID: utils.NewBigI(42),
+ MinIncomingConfirmations: clnull.NewUint32(1, true),
+ MinContractPayment: assets.NewLinkFromJuels(1000),
+ Requesters: models.AddressCollection{requesterAddress},
},
}, nil)
},
@@ -112,7 +112,6 @@ func TestResolver_DirectRequestSpec(t *testing.T) {
createdAt
evmChainID
minIncomingConfirmations
- minIncomingConfirmationsEnv
minContractPaymentLinkJuels
requesters
}
@@ -130,7 +129,6 @@ func TestResolver_DirectRequestSpec(t *testing.T) {
"createdAt": "2021-01-01T00:00:00Z",
"evmChainID": "42",
"minIncomingConfirmations": 1,
- "minIncomingConfirmationsEnv": true,
"minContractPaymentLinkJuels": "1000",
"requesters": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"]
}
@@ -373,30 +371,22 @@ func TestResolver_OCRSpec(t *testing.T) {
f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{
Type: job.OffchainReporting,
OCROracleSpec: &job.OCROracleSpec{
- BlockchainTimeout: models.Interval(1 * time.Minute),
- BlockchainTimeoutEnv: false,
- ContractAddress: contractAddress,
- ContractConfigConfirmations: 1,
- ContractConfigConfirmationsEnv: true,
- ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute),
- ContractConfigTrackerPollIntervalEnv: false,
- ContractConfigTrackerSubscribeInterval: models.Interval(2 * time.Minute),
- ContractConfigTrackerSubscribeIntervalEnv: true,
- DatabaseTimeout: models.NewInterval(3 * time.Second),
- DatabaseTimeoutEnv: true,
- ObservationGracePeriod: models.NewInterval(4 * time.Second),
- ObservationGracePeriodEnv: true,
- ContractTransmitterTransmitTimeout: models.NewInterval(555 * time.Millisecond),
- ContractTransmitterTransmitTimeoutEnv: true,
- CreatedAt: f.Timestamp(),
- EVMChainID: utils.NewBigI(42),
- IsBootstrapPeer: false,
- EncryptedOCRKeyBundleID: &keyBundleID,
- ObservationTimeout: models.Interval(2 * time.Minute),
- ObservationTimeoutEnv: false,
- P2PBootstrapPeers: pq.StringArray{"/dns4/test.com/tcp/2001/p2pkey"},
- P2PV2Bootstrappers: pq.StringArray{"12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"},
- TransmitterAddress: &transmitterAddress,
+ BlockchainTimeout: models.Interval(1 * time.Minute),
+ ContractAddress: contractAddress,
+ ContractConfigConfirmations: 1,
+ ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute),
+ ContractConfigTrackerSubscribeInterval: models.Interval(2 * time.Minute),
+ DatabaseTimeout: models.NewInterval(3 * time.Second),
+ ObservationGracePeriod: models.NewInterval(4 * time.Second),
+ ContractTransmitterTransmitTimeout: models.NewInterval(555 * time.Millisecond),
+ CreatedAt: f.Timestamp(),
+ EVMChainID: utils.NewBigI(42),
+ IsBootstrapPeer: false,
+ EncryptedOCRKeyBundleID: &keyBundleID,
+ ObservationTimeout: models.Interval(2 * time.Minute),
+ P2PBootstrapPeers: pq.StringArray{"/dns4/test.com/tcp/2001/p2pkey"},
+ P2PV2Bootstrappers: pq.StringArray{"12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"},
+ TransmitterAddress: &transmitterAddress,
},
}, nil)
},
@@ -408,26 +398,18 @@ func TestResolver_OCRSpec(t *testing.T) {
__typename
... on OCRSpec {
blockchainTimeout
- blockchainTimeoutEnv
contractAddress
contractConfigConfirmations
- contractConfigConfirmationsEnv
contractConfigTrackerPollInterval
- contractConfigTrackerPollIntervalEnv
contractConfigTrackerSubscribeInterval
- contractConfigTrackerSubscribeIntervalEnv
databaseTimeout
- databaseTimeoutEnv
observationGracePeriod
- observationGracePeriodEnv
contractTransmitterTransmitTimeout
- contractTransmitterTransmitTimeoutEnv
createdAt
evmChainID
isBootstrapPeer
keyBundleID
observationTimeout
- observationTimeoutEnv
p2pBootstrapPeers
p2pv2Bootstrappers
transmitterAddress
@@ -443,26 +425,18 @@ func TestResolver_OCRSpec(t *testing.T) {
"spec": {
"__typename": "OCRSpec",
"blockchainTimeout": "1m0s",
- "blockchainTimeoutEnv": false,
"contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C",
"contractConfigConfirmations": 1,
- "contractConfigConfirmationsEnv": true,
"contractConfigTrackerPollInterval": "1m0s",
- "contractConfigTrackerPollIntervalEnv": false,
"contractConfigTrackerSubscribeInterval": "2m0s",
- "contractConfigTrackerSubscribeIntervalEnv": true,
"databaseTimeout": "3s",
- "databaseTimeoutEnv": true,
"observationGracePeriod": "4s",
- "observationGracePeriodEnv": true,
"contractTransmitterTransmitTimeout": "555ms",
- "contractTransmitterTransmitTimeoutEnv": true,
"createdAt": "2021-01-01T00:00:00Z",
"evmChainID": "42",
"isBootstrapPeer": false,
"keyBundleID": "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5",
"observationTimeout": "2m0s",
- "observationTimeoutEnv": false,
"p2pBootstrapPeers": ["/dns4/test.com/tcp/2001/p2pkey"],
"p2pv2Bootstrappers": ["12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"],
"transmitterAddress": "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"
diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml
index 48d432138a8..f5d775fe744 100644
--- a/core/web/resolver/testdata/config-empty-effective.toml
+++ b/core/web/resolver/testdata/config-empty-effective.toml
@@ -61,6 +61,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -73,6 +74,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml
index 4b53396b94c..95d898c353b 100644
--- a/core/web/resolver/testdata/config-full.toml
+++ b/core/web/resolver/testdata/config-full.toml
@@ -67,6 +67,7 @@ MaxAgeDays = 17
MaxBackups = 9
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = '*'
BridgeResponseURL = 'https://bridge.response'
BridgeCacheTTL = '10s'
@@ -79,6 +80,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '192.158.1.37'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = 'test-rpid'
RPOrigin = 'test-rp-origin'
@@ -257,7 +277,7 @@ ResendAfterThreshold = '1h0m0s'
Enabled = true
[EVM.GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '9.223372036854775807 ether'
PriceMax = '281.474976710655 micro'
PriceMin = '13 wei'
diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml
index 1dcbfe3a830..9dd0be8f5d2 100644
--- a/core/web/resolver/testdata/config-multi-chain-effective.toml
+++ b/core/web/resolver/testdata/config-multi-chain-effective.toml
@@ -61,6 +61,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -73,6 +74,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/core/web/resolver/user_test.go b/core/web/resolver/user_test.go
index e3808eebcbb..bc64beeb459 100644
--- a/core/web/resolver/user_test.go
+++ b/core/web/resolver/user_test.go
@@ -53,10 +53,10 @@ func TestResolver_UpdateUserPassword(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("SetPassword", session.User, "new").Return(nil)
- f.Mocks.sessionsORM.On("ClearNonCurrentSessions", session.SessionID).Return(nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("SetPassword", session.User, "new").Return(nil)
+ f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return(nil)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -79,8 +79,8 @@ func TestResolver_UpdateUserPassword(t *testing.T) {
session.User.HashedPassword = "random-string"
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -108,11 +108,11 @@ func TestResolver_UpdateUserPassword(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("ClearNonCurrentSessions", session.SessionID).Return(
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return(
clearSessionsError{},
)
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
@@ -139,10 +139,10 @@ func TestResolver_UpdateUserPassword(t *testing.T) {
session.User.HashedPassword = pwd
- f.Mocks.sessionsORM.On("FindUser", session.User.Email).Return(*session.User, nil)
- f.Mocks.sessionsORM.On("ClearNonCurrentSessions", session.SessionID).Return(nil)
- f.Mocks.sessionsORM.On("SetPassword", session.User, "new").Return(failedPasswordUpdateError{})
- f.App.On("SessionORM").Return(f.Mocks.sessionsORM)
+ f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil)
+ f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return(nil)
+ f.Mocks.authProvider.On("SetPassword", session.User, "new").Return(failedPasswordUpdateError{})
+ f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider)
},
query: mutation,
variables: variables,
diff --git a/core/web/router.go b/core/web/router.go
index a873f14b708..28bd4f2170c 100644
--- a/core/web/router.go
+++ b/core/web/router.go
@@ -90,7 +90,7 @@ func NewRouter(app chainlink.Application, prometheus *ginprom.Prometheus) (*gin.
guiAssetRoutes(engine, config.Insecure().DisableRateLimiting(), app.GetLogger())
api.POST("/query",
- auth.AuthenticateGQL(app.SessionORM(), app.GetLogger().Named("GQLHandler")),
+ auth.AuthenticateGQL(app.AuthenticationProvider(), app.GetLogger().Named("GQLHandler")),
loader.Middleware(app),
graphqlHandler(app),
)
@@ -170,7 +170,7 @@ func secureMiddleware(tlsRedirect bool, tlsHost string, devWebServer bool) gin.H
}
func debugRoutes(app chainlink.Application, r *gin.RouterGroup) {
- group := r.Group("/debug", auth.Authenticate(app.SessionORM(), auth.AuthenticateBySession))
+ group := r.Group("/debug", auth.Authenticate(app.AuthenticationProvider(), auth.AuthenticateBySession))
group.GET("/vars", expvar.Handler())
}
@@ -207,7 +207,7 @@ func sessionRoutes(app chainlink.Application, r *gin.RouterGroup) {
))
sc := NewSessionsController(app)
unauth.POST("/sessions", sc.Create)
- auth := r.Group("/", auth.Authenticate(app.SessionORM(), auth.AuthenticateBySession))
+ auth := r.Group("/", auth.Authenticate(app.AuthenticationProvider(), auth.AuthenticateBySession))
auth.DELETE("/sessions", sc.Destroy)
}
@@ -231,7 +231,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) {
psec := PipelineJobSpecErrorsController{app}
unauthedv2.PATCH("/resume/:runID", prc.Resume)
- authv2 := r.Group("/v2", auth.Authenticate(app.SessionORM(),
+ authv2 := r.Group("/v2", auth.Authenticate(app.AuthenticationProvider(),
auth.AuthenticateByToken,
auth.AuthenticateBySession,
))
@@ -301,7 +301,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) {
// duplicated from above, with `evm` instead of `eth`
// legacy ones remain for backwards compatibility
- ethKeysGroup := authv2.Group("", auth.Authenticate(app.SessionORM(),
+ ethKeysGroup := authv2.Group("", auth.Authenticate(app.AuthenticationProvider(),
auth.AuthenticateByToken,
auth.AuthenticateBySession,
))
@@ -427,7 +427,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) {
}
ping := PingController{app}
- userOrEI := r.Group("/v2", auth.Authenticate(app.SessionORM(),
+ userOrEI := r.Group("/v2", auth.Authenticate(app.AuthenticationProvider(),
auth.AuthenticateExternalInitiator,
auth.AuthenticateByToken,
auth.AuthenticateBySession,
diff --git a/core/web/schema/type/spec.graphql b/core/web/schema/type/spec.graphql
index cdcbabf9ef0..98203a1870e 100644
--- a/core/web/schema/type/spec.graphql
+++ b/core/web/schema/type/spec.graphql
@@ -22,7 +22,6 @@ type DirectRequestSpec {
createdAt: Time!
evmChainID: String
minIncomingConfirmations: Int!
- minIncomingConfirmationsEnv: Boolean!
minContractPaymentLinkJuels: String!
requesters: [String!]
}
@@ -52,29 +51,21 @@ type KeeperSpec {
type OCRSpec {
blockchainTimeout: String
- blockchainTimeoutEnv: Boolean!
contractAddress: String!
contractConfigConfirmations: Int
- contractConfigConfirmationsEnv: Boolean!
contractConfigTrackerPollInterval: String
- contractConfigTrackerPollIntervalEnv: Boolean!
contractConfigTrackerSubscribeInterval: String
- contractConfigTrackerSubscribeIntervalEnv: Boolean!
createdAt: Time!
evmChainID: String
isBootstrapPeer: Boolean!
keyBundleID: String
observationTimeout: String
- observationTimeoutEnv: Boolean!
p2pBootstrapPeers: [String!]
p2pv2Bootstrappers: [String!]
transmitterAddress: String
databaseTimeout: String!
- databaseTimeoutEnv: Boolean!
observationGracePeriod: String!
- observationGracePeriodEnv: Boolean!
contractTransmitterTransmitTimeout: String!
- contractTransmitterTransmitTimeoutEnv: Boolean!
}
type OCR2Spec {
@@ -100,7 +91,6 @@ type VRFSpec {
evmChainID: String
fromAddresses: [String!]
minIncomingConfirmations: Int!
- minIncomingConfirmationsEnv: Boolean!
pollPeriod: String!
publicKey: String!
requestedConfsDelay: Int!
diff --git a/core/web/sessions_controller.go b/core/web/sessions_controller.go
index 6f029456bd1..23ecfd3b798 100644
--- a/core/web/sessions_controller.go
+++ b/core/web/sessions_controller.go
@@ -39,7 +39,7 @@ func (sc *SessionsController) Create(c *gin.Context) {
}
// Does this user have 2FA enabled?
- userWebAuthnTokens, err := sc.App.SessionORM().GetUserWebAuthn(sr.Email)
+ userWebAuthnTokens, err := sc.App.AuthenticationProvider().GetUserWebAuthn(sr.Email)
if err != nil {
sc.App.GetLogger().Errorf("Error loading user WebAuthn data: %s", err)
jsonAPIError(c, http.StatusInternalServerError, errors.New("internal Server Error"))
@@ -53,7 +53,7 @@ func (sc *SessionsController) Create(c *gin.Context) {
sr.WebAuthnConfig = sc.App.GetWebAuthnConfiguration()
}
- sid, err := sc.App.SessionORM().CreateSession(sr)
+ sid, err := sc.App.AuthenticationProvider().CreateSession(sr)
if err != nil {
jsonAPIError(c, http.StatusUnauthorized, err)
return
@@ -78,7 +78,7 @@ func (sc *SessionsController) Destroy(c *gin.Context) {
jsonAPIResponse(c, Session{Authenticated: false}, "session")
return
}
- if err := sc.App.SessionORM().DeleteUserSession(sessionID); err != nil {
+ if err := sc.App.AuthenticationProvider().DeleteUserSession(sessionID); err != nil {
jsonAPIError(c, http.StatusInternalServerError, err)
return
}
diff --git a/core/web/sessions_controller_test.go b/core/web/sessions_controller_test.go
index 7184e3f95b4..c2950caf3d1 100644
--- a/core/web/sessions_controller_test.go
+++ b/core/web/sessions_controller_test.go
@@ -27,7 +27,7 @@ func TestSessionsController_Create(t *testing.T) {
require.NoError(t, app.Start(testutils.Context(t)))
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
client := clhttptest.NewTestLocalOnlyHTTPClient()
tests := []struct {
@@ -59,7 +59,7 @@ func TestSessionsController_Create(t *testing.T) {
decrypted, err := cltest.DecodeSessionCookie(sessionCookie.Value)
require.NoError(t, err)
- user, err := app.SessionORM().AuthorizedUserWithSession(decrypted)
+ user, err := app.AuthenticationProvider().AuthorizedUserWithSession(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.email, user.Email)
@@ -69,7 +69,7 @@ func TestSessionsController_Create(t *testing.T) {
} else {
require.True(t, resp.StatusCode >= 400, "Should not be able to create session")
// Ignore fixture session
- sessions, err := app.SessionORM().Sessions(1, 2)
+ sessions, err := app.AuthenticationProvider().Sessions(1, 2)
assert.NoError(t, err)
assert.Empty(t, sessions)
}
@@ -90,7 +90,7 @@ func TestSessionsController_Create_ReapSessions(t *testing.T) {
require.NoError(t, app.Start(testutils.Context(t)))
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
staleSession := cltest.NewSession()
staleSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "241h"))
@@ -107,7 +107,7 @@ func TestSessionsController_Create_ReapSessions(t *testing.T) {
var s []sessions.Session
gomega.NewWithT(t).Eventually(func() []sessions.Session {
- s, err = app.SessionORM().Sessions(0, 10)
+ s, err = app.AuthenticationProvider().Sessions(0, 10)
assert.NoError(t, err)
return s
}).Should(gomega.HaveLen(1))
@@ -124,7 +124,7 @@ func TestSessionsController_Destroy(t *testing.T) {
require.NoError(t, app.Start(testutils.Context(t)))
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
correctSession := sessions.NewSession()
correctSession.Email = user.Email
@@ -150,7 +150,7 @@ func TestSessionsController_Destroy(t *testing.T) {
resp, err := client.Do(request)
assert.NoError(t, err)
- _, err = app.SessionORM().AuthorizedUserWithSession(test.sessionID)
+ _, err = app.AuthenticationProvider().AuthorizedUserWithSession(test.sessionID)
assert.Error(t, err)
if test.success {
assert.Equal(t, http.StatusOK, resp.StatusCode)
@@ -170,7 +170,7 @@ func TestSessionsController_Destroy_ReapSessions(t *testing.T) {
require.NoError(t, app.Start(testutils.Context(t)))
user := cltest.MustRandomUser(t)
- require.NoError(t, app.SessionORM().CreateUser(&user))
+ require.NoError(t, app.AuthenticationProvider().CreateUser(&user))
correctSession := sessions.NewSession()
correctSession.Email = user.Email
@@ -192,7 +192,7 @@ func TestSessionsController_Destroy_ReapSessions(t *testing.T) {
assert.Equal(t, http.StatusOK, resp.StatusCode)
gomega.NewWithT(t).Eventually(func() []sessions.Session {
- sessions, err := app.SessionORM().Sessions(0, 10)
+ sessions, err := app.AuthenticationProvider().Sessions(0, 10)
assert.NoError(t, err)
return sessions
}).Should(gomega.HaveLen(0))
diff --git a/core/web/user_controller.go b/core/web/user_controller.go
index 115971eafc7..857fff7b37f 100644
--- a/core/web/user_controller.go
+++ b/core/web/user_controller.go
@@ -30,10 +30,16 @@ type UpdatePasswordRequest struct {
NewPassword string `json:"newPassword"`
}
+var errUnsupportedForAuth = errors.New("action is unsupported with configured authentication provider")
+
// Index lists all API users
func (c *UserController) Index(ctx *gin.Context) {
- users, err := c.App.SessionORM().ListUsers()
+ users, err := c.App.AuthenticationProvider().ListUsers()
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("Unable to list users", "err", err)
jsonAPIError(ctx, http.StatusInternalServerError, err)
return
@@ -76,7 +82,7 @@ func (c *UserController) Create(ctx *gin.Context) {
jsonAPIError(ctx, http.StatusBadRequest, errors.Errorf("error creating API user: %s", err))
return
}
- if err = c.App.SessionORM().CreateUser(&user); err != nil {
+ if err = c.App.AuthenticationProvider().CreateUser(&user); err != nil {
// If this is a duplicate key error (code 23505), return a nicer error message
var pgErr *pgconn.PgError
if ok := errors.As(err, &pgErr); ok {
@@ -85,6 +91,10 @@ func (c *UserController) Create(ctx *gin.Context) {
return
}
}
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("Error creating new API user", "err", err)
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("error creating API user"))
return
@@ -132,8 +142,12 @@ func (c *UserController) UpdateRole(ctx *gin.Context) {
return
}
- user, err := c.App.SessionORM().UpdateRole(request.Email, request.NewRole)
+ user, err := c.App.AuthenticationProvider().UpdateRole(request.Email, request.NewRole)
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
jsonAPIError(ctx, http.StatusInternalServerError, errors.Wrap(err, "error updating API user"))
return
}
@@ -146,8 +160,12 @@ func (c *UserController) Delete(ctx *gin.Context) {
email := ctx.Param("email")
// Attempt find user by email
- user, err := c.App.SessionORM().FindUser(email)
+ user, err := c.App.AuthenticationProvider().FindUser(email)
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
jsonAPIError(ctx, http.StatusBadRequest, errors.Errorf("specified user not found: %s", email))
return
}
@@ -163,7 +181,11 @@ func (c *UserController) Delete(ctx *gin.Context) {
return
}
- if err = c.App.SessionORM().DeleteUser(email); err != nil {
+ if err = c.App.AuthenticationProvider().DeleteUser(email); err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("Error deleting API user", "err", err)
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("error deleting API user"))
return
@@ -185,8 +207,12 @@ func (c *UserController) UpdatePassword(ctx *gin.Context) {
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context"))
return
}
- user, err := c.App.SessionORM().FindUser(sessionUser.Email)
+ user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email)
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("failed to obtain current user record: %s", err)
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to update password"))
return
@@ -222,19 +248,29 @@ func (c *UserController) NewAPIToken(ctx *gin.Context) {
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context"))
return
}
- user, err := c.App.SessionORM().FindUser(sessionUser.Email)
+ user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email)
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("failed to obtain current user record: %s", err)
- jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to creatae API token"))
+ jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to create API token"))
return
}
- if !utils.CheckPasswordHash(request.Password, user.HashedPassword) {
+ // In order to create an API token, login validation with provided password must succeed
+ err = c.App.AuthenticationProvider().TestPassword(sessionUser.Email, request.Password)
+ if err != nil {
c.App.GetAuditLogger().Audit(audit.APITokenCreateAttemptPasswordMismatch, map[string]interface{}{"user": user.Email})
jsonAPIError(ctx, http.StatusUnauthorized, errors.New("incorrect password"))
return
}
newToken := auth.NewToken()
- if err := c.App.SessionORM().SetAuthToken(&user, newToken); err != nil {
+ if err := c.App.AuthenticationProvider().SetAuthToken(&user, newToken); err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
jsonAPIError(ctx, http.StatusInternalServerError, err)
return
}
@@ -256,18 +292,27 @@ func (c *UserController) DeleteAPIToken(ctx *gin.Context) {
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context"))
return
}
- user, err := c.App.SessionORM().FindUser(sessionUser.Email)
+ user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email)
if err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
c.App.GetLogger().Errorf("failed to obtain current user record: %s", err)
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to delete API token"))
return
}
- if !utils.CheckPasswordHash(request.Password, user.HashedPassword) {
+ err = c.App.AuthenticationProvider().TestPassword(sessionUser.Email, request.Password)
+ if err != nil {
c.App.GetAuditLogger().Audit(audit.APITokenDeleteAttemptPasswordMismatch, map[string]interface{}{"user": user.Email})
jsonAPIError(ctx, http.StatusUnauthorized, errors.New("incorrect password"))
return
}
- if err := c.App.SessionORM().DeleteAuthToken(&user); err != nil {
+ if err := c.App.AuthenticationProvider().DeleteAuthToken(&user); err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth)
+ return
+ }
jsonAPIError(ctx, http.StatusInternalServerError, err)
return
}
@@ -291,12 +336,15 @@ func (c *UserController) updateUserPassword(ctx *gin.Context, user *clsession.Us
if err != nil {
return err
}
- orm := c.App.SessionORM()
+ orm := c.App.AuthenticationProvider()
if err := orm.ClearNonCurrentSessions(sessionID); err != nil {
c.App.GetLogger().Errorf("failed to clear non current user sessions: %s", err)
return errors.New("unable to update password")
}
if err := orm.SetPassword(user, newPassword); err != nil {
+ if errors.Is(err, clsession.ErrNotSupported) {
+ return errUnsupportedForAuth
+ }
c.App.GetLogger().Errorf("failed to update current user password: %s", err)
return errors.New("unable to update password")
}
diff --git a/core/web/user_controller_test.go b/core/web/user_controller_test.go
index a11082ff6a4..6baab1c396a 100644
--- a/core/web/user_controller_test.go
+++ b/core/web/user_controller_test.go
@@ -188,7 +188,7 @@ func TestUserController_UpdateRole(t *testing.T) {
client := app.NewHTTPClient(nil)
user := cltest.MustRandomUser(t)
- err := app.SessionORM().CreateUser(&user)
+ err := app.AuthenticationProvider().CreateUser(&user)
require.NoError(t, err)
testCases := []struct {
@@ -235,7 +235,7 @@ func TestUserController_DeleteUser(t *testing.T) {
client := app.NewHTTPClient(nil)
user := cltest.MustRandomUser(t)
- err := app.SessionORM().CreateUser(&user)
+ err := app.AuthenticationProvider().CreateUser(&user)
require.NoError(t, err)
resp, cleanup := client.Delete(fmt.Sprintf("/v2/users/%s", url.QueryEscape(user.Email)))
diff --git a/core/web/webauthn_controller.go b/core/web/webauthn_controller.go
index 05090013237..41c8f268ad4 100644
--- a/core/web/webauthn_controller.go
+++ b/core/web/webauthn_controller.go
@@ -36,7 +36,7 @@ func (c *WebAuthnController) BeginRegistration(ctx *gin.Context) {
return
}
- orm := c.App.SessionORM()
+ orm := c.App.AuthenticationProvider()
uwas, err := orm.GetUserWebAuthn(user.Email)
if err != nil {
c.App.GetLogger().Errorf("failed to obtain current user MFA tokens: error in GetUserWebAuthn: %+v", err)
@@ -66,7 +66,7 @@ func (c *WebAuthnController) FinishRegistration(ctx *gin.Context) {
return
}
- orm := c.App.SessionORM()
+ orm := c.App.AuthenticationProvider()
uwas, err := orm.GetUserWebAuthn(user.Email)
if err != nil {
c.App.GetLogger().Errorf("failed to obtain current user MFA tokens: error in GetUserWebAuthn: %s", err)
@@ -83,7 +83,7 @@ func (c *WebAuthnController) FinishRegistration(ctx *gin.Context) {
return
}
- if sessions.AddCredentialToUser(c.App.SessionORM(), user.Email, credential) != nil {
+ if sessions.AddCredentialToUser(c.App.AuthenticationProvider(), user.Email, credential) != nil {
c.App.GetLogger().Errorf("Could not save WebAuthn credential to DB for user: %s", user.Email)
jsonAPIError(ctx, http.StatusInternalServerError, errors.New("internal Server Error"))
return
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index daeddf2ce66..a10f9dd1c6d 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,6 +9,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+### Added
+
+- Added a new, optional WebServer authentication option that supports LDAP as a user identity provider. This enables user login access and user roles to be managed and provisioned via a centralized remote server that supports the LDAP protocol, which can be helpful when running multiple nodes. See the documentation for more information and config setup instructions. There is a new `[WebServer].AuthenticationMethod` config option, when set to `ldap` requires the new `[WebServer.LDAP]` config section to be defined, see the reference `docs/core.toml`.
+- New prom metrics for mercury:
+ `mercury_transmit_queue_delete_error_count`
+ `mercury_transmit_queue_insert_error_count`
+ `mercury_transmit_queue_push_error_count`
+ Nops should consider alerting on these.
+
+
+### Changed
+
+- `L2Suggested` mode is now called `SuggestedPrice`
+
+### Removed
+
+- Removed `Optimism2` as a supported gas estimator mode
+
+### Added
+
+- Mercury v0.2 has improved consensus around current block that uses the most recent 5 blocks instead of only the latest one
+- Two new prom metrics for mercury, nops should consider adding alerting on these:
+ - `mercury_insufficient_blocks_count`
+ - `mercury_zero_blocks_count`
+
...
## 2.7.0 - UNRELEASED
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index da986e0500f..1eb9cd5023d 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -459,6 +459,7 @@ MaxBackups determines the maximum number of old log files to retain. Keeping thi
## WebServer
```toml
[WebServer]
+AuthenticationMethod = 'local' # Default
AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default
BridgeCacheTTL = '0s' # Default
BridgeResponseURL = 'https://my-chainlink-node.example.com:6688' # Example
@@ -473,6 +474,12 @@ ListenIP = '0.0.0.0' # Default
```
+### AuthenticationMethod
+```toml
+AuthenticationMethod = 'local' # Default
+```
+AuthenticationMethod defines which pluggable auth interface to use for user login and role assumption. Options include 'local' and 'ldap'. See docs for more details
+
### AllowOrigins
```toml
AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default
@@ -546,6 +553,132 @@ ListenIP = '0.0.0.0' # Default
```
ListenIP specifies the IP to bind the HTTP server to
+## WebServer.LDAP
+```toml
+[WebServer.LDAP]
+ServerTLS = true # Default
+SessionTimeout = '15m0s' # Default
+QueryTimeout = '2m0s' # Default
+BaseUserAttr = 'uid' # Default
+BaseDN = 'dc=custom,dc=example,dc=com' # Example
+UsersDN = 'ou=users' # Default
+GroupsDN = 'ou=groups' # Default
+ActiveAttribute = '' # Default
+ActiveAttributeAllowedValue = '' # Default
+AdminUserGroupCN = 'NodeAdmins' # Default
+EditUserGroupCN = 'NodeEditors' # Default
+RunUserGroupCN = 'NodeRunners' # Default
+ReadUserGroupCN = 'NodeReadOnly' # Default
+UserApiTokenEnabled = false # Default
+UserAPITokenDuration = '240h0m0s' # Default
+UpstreamSyncInterval = '0s' # Default
+UpstreamSyncRateLimit = '2m0s' # Default
+```
+Optional LDAP config if WebServer.AuthenticationMethod is set to 'ldap'
+LDAP queries are all parameterized to support custom LDAP 'dn', 'cn', and attributes
+
+### ServerTLS
+```toml
+ServerTLS = true # Default
+```
+ServerTLS defines the option to require the secure ldaps
+
+### SessionTimeout
+```toml
+SessionTimeout = '15m0s' # Default
+```
+SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions.
+
+### QueryTimeout
+```toml
+QueryTimeout = '2m0s' # Default
+```
+QueryTimeout defines how long queries should wait before timing out, defined in seconds
+
+### BaseUserAttr
+```toml
+BaseUserAttr = 'uid' # Default
+```
+BaseUserAttr defines the base attribute used to populate LDAP queries such as "uid=$", default is example
+
+### BaseDN
+```toml
+BaseDN = 'dc=custom,dc=example,dc=com' # Example
+```
+BaseDN defines the base LDAP 'dn' search filter to apply to every LDAP query, replace example,com with the appropriate LDAP server's structure
+
+### UsersDN
+```toml
+UsersDN = 'ou=users' # Default
+```
+UsersDN defines the 'dn' query to use when querying for the 'users' 'ou' group
+
+### GroupsDN
+```toml
+GroupsDN = 'ou=groups' # Default
+```
+GroupsDN defines the 'dn' query to use when querying for the 'groups' 'ou' group
+
+### ActiveAttribute
+```toml
+ActiveAttribute = '' # Default
+```
+ActiveAttribute is an optional user field to check truthiness for if a user is valid/active. This is only required if the LDAP provider lists inactive users as members of groups
+
+### ActiveAttributeAllowedValue
+```toml
+ActiveAttributeAllowedValue = '' # Default
+```
+ActiveAttributeAllowedValue is the value to check against for the above optional user attribute
+
+### AdminUserGroupCN
+```toml
+AdminUserGroupCN = 'NodeAdmins' # Default
+```
+AdminUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Admin' role
+
+### EditUserGroupCN
+```toml
+EditUserGroupCN = 'NodeEditors' # Default
+```
+EditUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Edit' role
+
+### RunUserGroupCN
+```toml
+RunUserGroupCN = 'NodeRunners' # Default
+```
+RunUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Run' role
+
+### ReadUserGroupCN
+```toml
+ReadUserGroupCN = 'NodeReadOnly' # Default
+```
+ReadUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Read' role
+
+### UserApiTokenEnabled
+```toml
+UserApiTokenEnabled = false # Default
+```
+UserApiTokenEnabled enables the users to issue API tokens with the same access of their role
+
+### UserAPITokenDuration
+```toml
+UserAPITokenDuration = '240h0m0s' # Default
+```
+UserAPITokenDuration is the duration of time an API token is active for before expiring
+
+### UpstreamSyncInterval
+```toml
+UpstreamSyncInterval = '0s' # Default
+```
+UpstreamSyncInterval is the interval at which the background LDAP sync task will be called. A '0s' value disables the background sync being run on an interval. This check is already performed during login/logout actions, all sessions and API tokens stored in the local ldap tables are updated to match the remote server
+
+### UpstreamSyncRateLimit
+```toml
+UpstreamSyncRateLimit = '2m0s' # Default
+```
+UpstreamSyncRateLimit defines a duration to limit the number of query/API calls to the upstream LDAP provider. It prevents the sync functionality from being called multiple times within the defined duration
+
## WebServer.RateLimit
```toml
[WebServer.RateLimit]
@@ -2726,7 +2859,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '1 gwei'
@@ -2774,6 +2907,243 @@ GasLimit = 3800000
+Kroma Mainnet (255)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'kroma'
+FinalityDepth = 400
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '2s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '40s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '30s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 wei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '100 wei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 24
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 400
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 10
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
+zkSync Goerli (280)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'zksync'
+FinalityDepth = 1
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '5s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '1m0s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '1m0s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '18.446744073709551615 ether'
+PriceMin = '0'
+LimitDefault = 3500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '5 gwei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = false
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 8
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 5
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 5
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 4
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
+zkSync Mainnet (324)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'zksync'
+FinalityDepth = 1
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '5s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '1m0s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '1m0s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '18.446744073709551615 ether'
+PriceMin = '0'
+LimitDefault = 3500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '5 gwei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = false
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 8
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 5
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 5
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 4
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
Optimism Goerli (420)
```toml
@@ -2885,7 +3255,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '0'
@@ -2963,7 +3333,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '750 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '1 gwei'
@@ -3042,7 +3412,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '0'
@@ -3090,6 +3460,164 @@ GasLimit = 5300000
+WeMix Mainnet (1111)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'wemix'
+FinalityDepth = 1
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '3s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '30s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '1m0s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 gwei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '5 gwei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '100 gwei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 8
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 100
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 5
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
+WeMix Testnet (1112)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'wemix'
+FinalityDepth = 1
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '3s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '30s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '1m0s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 gwei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '5 gwei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '100 gwei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 8
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 100
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 5
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
Simulated (1337)
```toml
@@ -3168,6 +3696,85 @@ GasLimit = 5300000
+Kroma Sepolia (2358)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'kroma'
+FinalityDepth = 400
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '2s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '40s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '30s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 wei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '100 wei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 24
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 400
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 10
+LeaseDuration = '0s'
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
Fantom Testnet (4002)
```toml
@@ -3199,7 +3806,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '1 gwei'
@@ -3277,7 +3884,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '750 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '1 gwei'
@@ -4383,7 +4990,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '0'
@@ -4461,7 +5068,7 @@ ResendAfterThreshold = '1m0s'
Enabled = true
[GasEstimator]
-Mode = 'L2Suggested'
+Mode = 'SuggestedPrice'
PriceDefault = '20 gwei'
PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
PriceMin = '0'
@@ -4783,7 +5390,7 @@ BlockBackfillSkip enables skipping of very long backfills.
ChainType = 'arbitrum' # Example
```
ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID.
-Available types: arbitrum, metis, optimismBedrock, xdai
+Available types: arbitrum, metis, optimismBedrock, xdai, celo, kroma, wemix, zksync
### FinalityDepth
```toml
@@ -5004,7 +5611,8 @@ Mode controls what type of gas estimator is used.
- `FixedPrice` uses static configured values for gas price (can be set via API call).
- `BlockHistory` dynamically adjusts default gas price based on heuristics from mined blocks.
-- `L2Suggested` is a special mode only for use with L2 blockchains. This mode will use the gas price suggested by the rpc endpoint via `eth_gasPrice`.
+- `L2Suggested` mode is deprecated and replaced with `SuggestedPrice`.
+- `SuggestedPrice` is a mode which uses the gas price suggested by the rpc endpoint via `eth_gasPrice`.
- `Arbitrum` is a special mode only for use with Arbitrum blockchains. It uses the suggested gas price (up to `ETH_MAX_GAS_PRICE_WEI`, with `1000 gwei` default) as well as an estimated gas limit (up to `ETH_GAS_LIMIT_MAX`, with `1,000,000,000` default).
Chainlink nodes decide what gas price to use using an `Estimator`. It ships with several simple and battle-hardened built-in estimators that should work well for almost all use-cases. Note that estimators will change their behaviour slightly depending on if you are in EIP-1559 mode or not.
diff --git a/docs/SECRETS.md b/docs/SECRETS.md
index af316cab14b..fa7ba76df42 100644
--- a/docs/SECRETS.md
+++ b/docs/SECRETS.md
@@ -51,6 +51,33 @@ AllowSimplePasswords skips the password complexity check normally enforced on UR
Environment variable: `CL_DATABASE_ALLOW_SIMPLE_PASSWORDS`
+## WebServer.LDAP
+```toml
+[WebServer.LDAP]
+ServerAddress = 'ldaps://127.0.0.1' # Example
+ReadOnlyUserLogin = 'viewer@example.com' # Example
+ReadOnlyUserPass = 'password' # Example
+```
+Optional LDAP config
+
+### ServerAddress
+```toml
+ServerAddress = 'ldaps://127.0.0.1' # Example
+```
+ServerAddress is the full ldaps:// address of the ldap server to authenticate with and query
+
+### ReadOnlyUserLogin
+```toml
+ReadOnlyUserLogin = 'viewer@example.com' # Example
+```
+ReadOnlyUserLogin is the username of the read only root user used to authenticate the requested LDAP queries
+
+### ReadOnlyUserPass
+```toml
+ReadOnlyUserPass = 'password' # Example
+```
+ReadOnlyUserPass is the password for the above account
+
## Password
```toml
[Password]
diff --git a/go.mod b/go.mod
index ad3cb5f78ed..0a85fe7f488 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,6 @@ module github.com/smartcontractkit/chainlink/v2
go 1.21
require (
- github.com/CosmWasm/wasmd v0.40.1
github.com/Depado/ginprom v1.7.11
github.com/Masterminds/semver/v3 v3.2.1
github.com/Masterminds/sprig/v3 v3.2.3
@@ -15,7 +14,7 @@ require (
github.com/esote/minmaxheap v1.0.0
github.com/ethereum/go-ethereum v1.12.0
github.com/fatih/color v1.15.0
- github.com/fxamacker/cbor/v2 v2.4.0
+ github.com/fxamacker/cbor/v2 v2.5.0
github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27
github.com/getsentry/sentry-go v0.19.0
github.com/gin-contrib/cors v1.4.0
@@ -23,8 +22,7 @@ require (
github.com/gin-contrib/sessions v0.0.5
github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4
github.com/gin-gonic/gin v1.9.1
- github.com/go-webauthn/webauthn v0.8.2
- github.com/gogo/protobuf v1.3.3
+ github.com/go-webauthn/webauthn v0.8.6
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8
github.com/google/uuid v1.3.1
github.com/gorilla/securecookie v1.1.1
@@ -59,29 +57,28 @@ require (
github.com/pressly/goose/v3 v3.15.1
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/client_model v0.5.0
- github.com/prometheus/common v0.44.0
- github.com/prometheus/prometheus v0.46.0
+ github.com/prometheus/common v0.45.0
+ github.com/prometheus/prometheus v0.47.2
github.com/robfig/cron/v3 v3.0.1
github.com/rogpeppe/go-internal v1.11.0
github.com/scylladb/go-reflectx v1.0.1
github.com/shirou/gopsutil/v3 v3.23.9
github.com/shopspring/decimal v1.3.1
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704
- github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0
- github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111
+ github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255
+ github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb
- github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545
- github.com/smartcontractkit/ocr2keepers v0.7.27
+ github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7
+ github.com/smartcontractkit/ocr2keepers v0.7.28
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687
- github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1
github.com/smartcontractkit/wsrpc v0.7.2
github.com/spf13/cast v1.5.1
github.com/stretchr/testify v1.8.4
github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a
- github.com/tidwall/gjson v1.16.0
+ github.com/tidwall/gjson v1.17.0
github.com/ugorji/go/codec v1.2.11
github.com/ulule/limiter/v3 v3.11.2
github.com/umbracle/ethgo v0.1.3
@@ -98,7 +95,8 @@ require (
golang.org/x/text v0.13.0
golang.org/x/time v0.3.0
golang.org/x/tools v0.14.0
- gonum.org/v1/gonum v0.13.0
+ gonum.org/v1/gonum v0.14.0
+ google.golang.org/grpc v1.58.3
google.golang.org/protobuf v1.31.0
gopkg.in/guregu/null.v2 v2.1.2
gopkg.in/guregu/null.v4 v4.0.0
@@ -115,7 +113,9 @@ require (
filippo.io/edwards25519 v1.0.0 // indirect
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
github.com/99designs/keyring v1.2.1 // indirect
+ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect
+ github.com/CosmWasm/wasmd v0.40.1 // indirect
github.com/CosmWasm/wasmvm v1.2.4 // indirect
github.com/DataDog/zstd v1.5.2 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
@@ -169,8 +169,10 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
+ github.com/go-ldap/ldap/v3 v3.4.5
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -179,12 +181,13 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
- github.com/go-webauthn/revoke v0.1.9 // indirect
+ github.com/go-webauthn/x v0.1.4 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gofrs/uuid v4.3.1+incompatible // indirect
- github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/gogo/protobuf v1.3.3 // indirect
+ github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
@@ -192,7 +195,7 @@ require (
github.com/google/btree v1.1.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
- github.com/google/go-tpm v0.3.3 // indirect
+ github.com/google/go-tpm v0.9.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/gorilla/context v1.1.1 // indirect
@@ -234,7 +237,7 @@ require (
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect
- github.com/jmoiron/sqlx v1.3.5 // indirect
+ github.com/jmoiron/sqlx v1.3.5
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
@@ -283,7 +286,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
github.com/minio/sha256-simd v0.1.1 // indirect
@@ -364,7 +367,6 @@ require (
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753 // indirect
- google.golang.org/grpc v1.58.3 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index f879f16272b..7fe91a6b123 100644
--- a/go.sum
+++ b/go.sum
@@ -79,6 +79,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOv
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -129,6 +131,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@@ -382,8 +386,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
-github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
+github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0=
@@ -419,6 +423,8 @@ github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
+github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
+github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -433,6 +439,8 @@ github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEai
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-ldap/ldap/v3 v3.4.5 h1:ekEKmaDrpvR2yf5Nc/DClsGG9lAmdDixe44mLzlW5r8=
+github.com/go-ldap/ldap/v3 v3.4.5/go.mod h1:bMGIq3AGbytbaMwf8wdv5Phdxz0FWHTIYMSzyrYgnQs=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
@@ -467,10 +475,10 @@ github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
-github.com/go-webauthn/revoke v0.1.9 h1:gSJ1ckA9VaKA2GN4Ukp+kiGTk1/EXtaDb1YE8RknbS0=
-github.com/go-webauthn/revoke v0.1.9/go.mod h1:j6WKPnv0HovtEs++paan9g3ar46gm1NarktkXBaPR+w=
-github.com/go-webauthn/webauthn v0.8.2 h1:8KLIbpldjz9KVGHfqEgJNbkhd7bbRXhNw4QWFJE15oA=
-github.com/go-webauthn/webauthn v0.8.2/go.mod h1:d+ezx/jMCNDiqSMzOchuynKb9CVU1NM9BumOnokfcVQ=
+github.com/go-webauthn/webauthn v0.8.6 h1:bKMtL1qzd2WTFkf1mFTVbreYrwn7dsYmEPjTq6QN90E=
+github.com/go-webauthn/webauthn v0.8.6/go.mod h1:emwVLMCI5yx9evTTvr0r+aOZCdWJqMfbRhF0MufyUog=
+github.com/go-webauthn/x v0.1.4 h1:sGmIFhcY70l6k7JIDfnjVBiAAFEssga5lXIUXe0GtAs=
+github.com/go-webauthn/x v0.1.4/go.mod h1:75Ug0oK6KYpANh5hDOanfDI+dvPWHk788naJVG/37H8=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
@@ -488,9 +496,12 @@ github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q8
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
+github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
@@ -556,12 +567,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI=
-github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw=
-github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo=
-github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4=
-github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0=
-github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4=
+github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk=
+github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -615,7 +622,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -1173,8 +1179,8 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJK
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
@@ -1383,16 +1389,16 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
-github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw=
-github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4=
+github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI=
+github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
@@ -1457,10 +1463,10 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0 h1:YrJ3moRDu2kgdv4o3Hym/FWVF4MS5cIZ7o7wk+43pvk=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0/go.mod h1:fxtwgVZzTgoU1CpdSxNvFXecIY2r8DhH2JCzPO4e9G0=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111 h1:CElKhWq0WIa9Rmg5Ssajs5Hp3m3u/nYIQdXtpj2gbcc=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255 h1:Pt6c7bJU9wIN6PQQnmN8UmYYH6lpfiQ6U/B8yEC2s5s=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255/go.mod h1:EHppaccd/LTlTMI2o4dmBHe4BknEgEFFDjDGMNuGb3k=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a h1:G/pD8uI1PULRJU8Y3eLLzjqQBp9ruG9hj+wWxtyrgTo=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 h1:DaPSVnxe7oz1QJ+AVIhQWs1W3ubQvwvGo9NbHpMs1OQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8=
@@ -1469,14 +1475,12 @@ github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545 h1:qOsw2ETQD/Sb/W2xuYn2KPWjvvsWA0C+l19rWFq8iNg=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
-github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves=
-github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7 h1:21V61XOYSxpFmFqlhr5IaEh1uQ1F6CewJ30D/U/P34c=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
+github.com/smartcontractkit/ocr2keepers v0.7.28 h1:dufAiYl4+uly9aH0+6GkS2jYzHGujq7tg0LYQE+x6JU=
+github.com/smartcontractkit/ocr2keepers v0.7.28/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb/go.mod h1:HNUu4cJekUdsJbwRBCiOybtkPJEfGRELQPe2tkoDEyk=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg=
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ=
@@ -1501,7 +1505,6 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
@@ -1512,7 +1515,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
@@ -1559,8 +1561,8 @@ github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47
github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
-github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg=
-github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
+github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
@@ -1748,6 +1750,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1788,6 +1791,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1807,7 +1811,6 @@ golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1850,6 +1853,7 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1862,8 +1866,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
-golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
+golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1876,6 +1880,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1948,7 +1953,6 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1980,6 +1984,7 @@ golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1993,6 +1998,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2068,6 +2074,7 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2079,8 +2086,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
-gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
+gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
+gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2166,7 +2173,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
diff --git a/integration-tests/.golangci.yml b/integration-tests/.golangci.yml
new file mode 100644
index 00000000000..d22b26b8260
--- /dev/null
+++ b/integration-tests/.golangci.yml
@@ -0,0 +1,78 @@
+run:
+ timeout: 15m
+linters:
+ enable:
+ - exhaustive
+ - exportloopref
+ - revive
+ - goimports
+ - gosec
+ - misspell
+ - rowserrcheck
+ - errorlint
+linters-settings:
+ exhaustive:
+ default-signifies-exhaustive: true
+ goimports:
+ local-prefixes: github.com/smartcontractkit/chainlink
+ golint:
+ min-confidence: 0.999
+ gosec:
+ excludes:
+ - G101
+ govet:
+ # report about shadowed variables
+ check-shadowing: true
+ revive:
+ confidence: 0.8
+ rules:
+ - name: blank-imports
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: dot-imports
+ - name: error-return
+ - name: error-strings
+ - name: error-naming
+ - name: if-return
+ - name: increment-decrement
+ # - name: var-naming // doesn't work with some generated names
+ - name: var-declaration
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: time-naming
+ - name: unexported-return
+ - name: indent-error-flow
+ - name: errorf
+ - name: empty-block
+ - name: superfluous-else
+ - name: unused-parameter
+ - name: unreachable-code
+ - name: redefines-builtin-id
+ - name: waitgroup-by-value
+ - name: unconditional-recursion
+ - name: struct-tag
+ - name: string-format
+ - name: string-of-int
+ - name: range-val-address
+ - name: range-val-in-closure
+ - name: modifies-value-receiver
+ - name: modifies-parameter
+ - name: identical-branches
+ - name: get-return
+ # - name: flag-parameter // probably one we should work on doing better at in the future
+ # - name: early-return // probably one we should work on doing better at in the future
+ - name: defer
+ - name: constant-logical-expr
+ - name: confusing-naming
+ - name: confusing-results
+ - name: bool-literal-in-expr
+ - name: atomic
+issues:
+ exclude-rules:
+ - text: "^G404: Use of weak random number generator"
+ linters:
+ - gosec
+ - linters:
+ - govet
+ text: "declaration of \"err\" shadows"
diff --git a/integration-tests/.tool-versions b/integration-tests/.tool-versions
index 68b6d994197..47b73e9de11 100644
--- a/integration-tests/.tool-versions
+++ b/integration-tests/.tool-versions
@@ -1,4 +1,5 @@
-golang 1.21.1
+golang 1.21.4
k3d 5.4.6
kubectl 1.25.5
nodejs 18.13.0
+golangci-lint 1.55.2
diff --git a/integration-tests/LOG_POLLER.md b/integration-tests/LOG_POLLER.md
new file mode 100644
index 00000000000..6e98fba5525
--- /dev/null
+++ b/integration-tests/LOG_POLLER.md
@@ -0,0 +1,163 @@
+# How to run Log Poller's tests
+
+## Limitations
+* currently they can only be run in Docker, not in Kubernetes
+* when using `looped` runner it's not possible to directly control execution time
+* WASP's `gun` implementation is imperfect in terms of generated load
+
+## Configuration
+Due to unfinished migration to TOML config tests use a mixed configuration approach:
+* network, RPC endpoints, funding keys, etc need to be provided by env vars
+* test-specific configuration can be provided by TOML file or via a `Config` struct (to which TOML is parsed anyway) additionally some of it can be overridden by env vars (for ease of use in CI)
+** smoke tests use the programmatical approach
+** load test uses the TOML approach
+
+## Approximated test scenario
+Different tests might have slightly modified scenarios, but generally they follow this pattern:
+* start CL nodes
+* setup OCR
+* upload Automation Registry 2.1
+* deploy UpKeep Consumers
+* deploy test contracts
+* register filters for test contracts
+* make sure all CL nodes have filters registered
+* emit test logs
+* wait for log poller to finalise last block in which logs were emitted
+** block number is determined either by finality tag or fixed finality depth depending on network configuration
+* wait for all CL nodes to have expected log count
+* compare logs that present in the EVM node with logs in CL nodes
+
+All of the checks use fluent waits.
+
+### Required env vars
+* `CHAINLINK_IMAGE`
+* `CHAINLINK_VERSION`
+* `SELECTED_NETWORKS`
+
+### Env vars required for live testnet tests
+* `EVM_WS_URL` -- RPC websocket
+* `EVM_HTTP_URL` -- RPC HTTP
+* `EVM_KEYS` -- private keys used for funding
+
+Since on live testnets we are using existing and canonical LINK contracts funding keys need to contain enough LINK to pay for the test. There's an automated check that fails during setup if there's not enough LINK. Approximately `9 LINK` is required for each UpKeep contract test uses to register a `LogTrigger`. Test contract emits 3 types of events and unless configured otherwise (programmatically!) all of them will be used, which means that due to Automation's limitation we need to register a separate `LogTrigger` for each event type for each contract. So if you want to test with 100 contracts, then you'd need to register 300 UpKeep contracts and thus your funding address needs to have at least 2700 LINK.
+
+### Programmatical config
+There are two load generators available:
+* `looped` -- it's a simple generator that just loops over all contracts and emits events at random intervals
+* `wasp` -- based on WASP load testing tool, it's more sophisticated and allows to control execution time
+
+#### Looped config
+```
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2, # number of test contracts to deploy
+ EventsPerTx: 4, # number of events to emit in a single transaction
+ UseFinalityTag: false, # if set to true then Log Poller will use finality tag returned by chain, when determining last finalised block (won't work on a simulated network, it requires eth2)
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100, # number of times each contract will be called
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200, # minimum number of milliseconds to wait before emitting events
+ MaxEmitWaitTimeMs: 500, # maximum number of milliseconds to wait before emitting events
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events { # modify that function to emit only logs you want
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+```
+
+Remember that final number of events emitted will be `Contracts * EventsPerTx * ExecutionCount * len(eventToEmit)`. And that that last number by default is equal to `3` (that's because we want to emit different event types, not just one). You can change that by overriding `EventsToEmit` field.
+
+#### WASP config
+```
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 4,
+ UseFinalityTag: false,
+ },
+ Wasp: &logpoller.WaspConfig{
+ Load: &logpoller.Load{
+ RPS: 10, # requests per second
+ LPS: 0, # logs per second
+ RateLimitUnitDuration: models.MustNewDuration(5 * time.Minutes), # for how long the load should be limited (ramp-up period)
+ Duration: models.MustNewDuration(5 * time.Minutes), # how long to generate the load for
+ CallTimeout: models.MustNewDuration(5 * time.Minutes), # how long to wait for a single call to finish
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+```
+
+Remember that you cannot specify both `RPS` and `LPS`. If you want to use `LPS` then omit `RPS` field. Also remember that depending on the events you decide to emit RPS might mean 1 request or might mean 3 requests (if you go with the default `EventsToEmit`).
+
+For other nuances do check [gun.go][integration-tests/universal/log_poller/gun.go].
+
+### TOML config
+That config follows the same structure as programmatical config shown above.
+
+Sample config: [config.toml](integration-tests/load/log_poller/config.toml)
+
+Use this snippet instead of creating the `Config` struct programmatically:
+```
+ cfg, err := lp_helpers.ReadConfig(lp_helpers.DefaultConfigFilename)
+ require.NoError(t, err)
+```
+
+And remember to add events you want emit:
+```
+ eventsToEmit := []abi.Event{}
+ for _, event := range lp_helpers.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+```
+
+### Timeouts
+Various checks inside the tests have hardcoded timeouts, which might not be suitable for your execution parameters, for example if you decided to emit 1M logs, then waiting for all of them to be indexed for `1m` might not be enough. Remember to adjust them accordingly.
+
+Sample snippet:
+```
+ gom.Eventually(func(g gomega.Gomega) {
+ logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...")
+ }
+ g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count")
+ }, "1m", "30s").Should(gomega.Succeed()) # 1m is the timeout for all nodes to have expected log count
+```
+
+## Tests
+* [Load](integration-tests/load/log_poller/log_poller_test.go)
+* [Smoke](integration-tests/smoke/log_poller/log_poller_test.go)
+
+## Running tests
+After setting all the environment variables you can run the test with:
+```
+# run in the root folder of chainlink repo
+go test -v -test.timeout=2700s -run TestLogPollerReplay integration-tests/smoke/log_poller_test.go
+```
+
+Remember to adjust test timeout accordingly to match expected duration.
+
+
+## Github Actions
+If all of that seems too complicated use this [on-demand workflow](https://github.com/smartcontractkit/chainlink/actions/workflows/on-demand-log-poller.yml).
+
+Execution time here is an approximation, so depending on network conditions it might be slightly longer or shorter.
\ No newline at end of file
diff --git a/integration-tests/Makefile b/integration-tests/Makefile
index f26518c0076..fb4bfa74f3e 100644
--- a/integration-tests/Makefile
+++ b/integration-tests/Makefile
@@ -56,6 +56,12 @@ install_gotestfmt:
go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
set -euo pipefail
+lint:
+ golangci-lint --color=always run ./... --fix -v
+
+build:
+ @go build ./... && SELECTED_NETWORKS=SIMULATED go test -run=^# ./...
+
# Builds the test image
# tag: the tag for the test image being built, example: tag=tate
# base_tag: the tag for the base-test-image to use, example: base_tag=latest
@@ -118,7 +124,7 @@ test_chaos_verbose: ## Run all smoke tests with verbose logging
# Performance
.PHONY: test_perf
-test_perf: test_need_operator_assets ## Run core node performance tests.
+test_perf: ## Run core node performance tests.
TEST_LOG_LEVEL="disabled" \
SELECTED_NETWORKS="SIMULATED,SIMULATED_1,SIMULATED_2" \
go test -timeout 1h -count=1 -json $(args) ./performance 2>&1 | tee /tmp/gotest.log | gotestfmt
diff --git a/integration-tests/actions/actions.go b/integration-tests/actions/actions.go
index 010b431b56f..02a25234774 100644
--- a/integration-tests/actions/actions.go
+++ b/integration-tests/actions/actions.go
@@ -2,17 +2,19 @@
package actions
import (
+ "crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"strings"
"testing"
+ "github.com/ethereum/go-ethereum/crypto"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
"go.uber.org/zap/zapcore"
@@ -252,7 +254,6 @@ func GetMockserverInitializerDataForOTPE(
func TeardownSuite(
t *testing.T,
env *environment.Environment,
- logsFolderPath string,
chainlinkNodes []*client.ChainlinkK8sClient,
optionalTestReporter testreporters.TestReporter, // Optionally pass in a test reporter to log further metrics
failingLogLevel zapcore.Level, // Examines logs after the test, and fails the test if any Chainlink logs are found at or above provided level
@@ -260,7 +261,7 @@ func TeardownSuite(
) error {
l := logging.GetTestLogger(t)
if err := testreporters.WriteTeardownLogs(t, env, optionalTestReporter, failingLogLevel); err != nil {
- return errors.Wrap(err, "Error dumping environment logs, leaving environment running for manual retrieval")
+ return fmt.Errorf("Error dumping environment logs, leaving environment running for manual retrieval, err: %w", err)
}
// Delete all jobs to stop depleting the funds
err := DeleteAllJobs(chainlinkNodes)
@@ -328,16 +329,16 @@ func DeleteAllJobs(chainlinkNodes []*client.ChainlinkK8sClient) error {
}
jobs, _, err := node.ReadJobs()
if err != nil {
- return errors.Wrap(err, "error reading jobs from chainlink node")
+ return fmt.Errorf("error reading jobs from chainlink node, err: %w", err)
}
for _, maps := range jobs.Data {
if _, ok := maps["id"]; !ok {
- return errors.Errorf("error reading job id from chainlink node's jobs %+v", jobs.Data)
+ return fmt.Errorf("error reading job id from chainlink node's jobs %+v", jobs.Data)
}
id := maps["id"].(string)
_, err := node.DeleteJob(id)
if err != nil {
- return errors.Wrap(err, "error deleting job from chainlink node")
+ return fmt.Errorf("error deleting job from chainlink node, err: %w", err)
}
}
}
@@ -348,7 +349,7 @@ func DeleteAllJobs(chainlinkNodes []*client.ChainlinkK8sClient) error {
// all from a remote, k8s style environment
func ReturnFunds(chainlinkNodes []*client.ChainlinkK8sClient, blockchainClient blockchain.EVMClient) error {
if blockchainClient == nil {
- return errors.New("blockchain client is nil, unable to return funds from chainlink nodes")
+ return fmt.Errorf("blockchain client is nil, unable to return funds from chainlink nodes")
}
log.Info().Msg("Attempting to return Chainlink node funds to default network wallets")
if blockchainClient.NetworkSimulated() {
@@ -414,7 +415,7 @@ func UpgradeChainlinkNodeVersions(
nodes ...*client.ChainlinkK8sClient,
) error {
if newImage == "" && newVersion == "" {
- return errors.New("unable to upgrade node version, found empty image and version, must provide either a new image or a new version")
+ return fmt.Errorf("unable to upgrade node version, found empty image and version, must provide either a new image or a new version")
}
for _, node := range nodes {
if err := node.UpgradeVersion(testEnvironment, newImage, newVersion); err != nil {
@@ -443,3 +444,17 @@ func DeployMockETHLinkFeed(cd contracts.ContractDeployer, answer *big.Int) (cont
}
return mockETHLINKFeed, err
}
+
+// todo - move to CTF
+func GenerateWallet() (common.Address, error) {
+ privateKey, err := crypto.GenerateKey()
+ if err != nil {
+ return common.Address{}, err
+ }
+ publicKey := privateKey.Public()
+ publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
+ if !ok {
+ return common.Address{}, fmt.Errorf("cannot assert type: publicKey is not of type *ecdsa.PublicKey")
+ }
+ return crypto.PubkeyToAddress(*publicKeyECDSA), nil
+}
diff --git a/integration-tests/actions/actions_local.go b/integration-tests/actions/actions_local.go
index b65bac43bb1..d4913cabd8a 100644
--- a/integration-tests/actions/actions_local.go
+++ b/integration-tests/actions/actions_local.go
@@ -2,7 +2,8 @@
package actions
import (
- "github.com/pkg/errors"
+ "fmt"
+
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
)
@@ -13,10 +14,10 @@ func UpgradeChainlinkNodeVersionsLocal(
nodes ...*test_env.ClNode,
) error {
if newImage == "" && newVersion == "" {
- return errors.New("unable to upgrade node version, found empty image and version, must provide either a new image or a new version")
+ return fmt.Errorf("unable to upgrade node version, found empty image and version, must provide either a new image or a new version")
}
for _, node := range nodes {
- if err := node.UpgradeVersion(node.NodeConfig, newImage, newVersion); err != nil {
+ if err := node.UpgradeVersion(newImage, newVersion); err != nil {
return err
}
}
diff --git a/integration-tests/actions/automation_ocr_helpers.go b/integration-tests/actions/automation_ocr_helpers.go
index 998b1ee89cf..e1635902db5 100644
--- a/integration-tests/actions/automation_ocr_helpers.go
+++ b/integration-tests/actions/automation_ocr_helpers.go
@@ -14,14 +14,15 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
- "github.com/smartcontractkit/chainlink-testing-framework/logging"
ocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
ocr3 "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
ocr2keepers20config "github.com/smartcontractkit/ocr2keepers/pkg/v2/config"
ocr2keepers30config "github.com/smartcontractkit/ocr2keepers/pkg/v3/config"
+ "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
diff --git a/integration-tests/actions/automation_ocr_helpers_local.go b/integration-tests/actions/automation_ocr_helpers_local.go
index ccc2eea99d8..f541594c4d2 100644
--- a/integration-tests/actions/automation_ocr_helpers_local.go
+++ b/integration-tests/actions/automation_ocr_helpers_local.go
@@ -8,7 +8,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/lib/pq"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
ocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
ocr3 "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
@@ -187,7 +186,7 @@ func CreateOCRKeeperJobsLocal(
} else if registryVersion == ethereum.RegistryVersion_2_0 {
contractVersion = "v2.0"
} else {
- return errors.New("v2.0 and v2.1 are the only supported versions")
+ return fmt.Errorf("v2.0 and v2.1 are the only supported versions")
}
bootstrapSpec := &client.OCR2TaskJobSpec{
diff --git a/integration-tests/actions/ocr2_helpers.go b/integration-tests/actions/ocr2_helpers.go
index aead74f2bdd..02ce73e813e 100644
--- a/integration-tests/actions/ocr2_helpers.go
+++ b/integration-tests/actions/ocr2_helpers.go
@@ -15,14 +15,15 @@ import (
"golang.org/x/sync/errgroup"
"gopkg.in/guregu/null.v4"
+ "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
diff --git a/integration-tests/actions/ocr2_helpers_local.go b/integration-tests/actions/ocr2_helpers_local.go
index b3fe6eb041f..65e0a466bee 100644
--- a/integration-tests/actions/ocr2_helpers_local.go
+++ b/integration-tests/actions/ocr2_helpers_local.go
@@ -12,6 +12,12 @@ import (
"github.com/google/uuid"
"github.com/lib/pq"
"github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
+ "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "golang.org/x/sync/errgroup"
+ "gopkg.in/guregu/null.v4"
+
"github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
@@ -19,11 +25,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/testhelpers"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "golang.org/x/sync/errgroup"
- "gopkg.in/guregu/null.v4"
)
func CreateOCRv2JobsLocal(
diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go
index ce693964323..e424aaa11b3 100644
--- a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go
+++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go
@@ -16,9 +16,6 @@ import (
"go.dedis.ch/kyber/v3/group/edwards25519"
"gopkg.in/guregu/null.v4"
- "github.com/smartcontractkit/chainlink-testing-framework/logging"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
"github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/smartcontractkit/ocr2vrf/altbn_128"
@@ -26,6 +23,10 @@ import (
"github.com/smartcontractkit/ocr2vrf/ocr2vrf"
ocr2vrftypes "github.com/smartcontractkit/ocr2vrf/types"
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
+
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
)
diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go
index c123aaff6a2..72d668076e9 100644
--- a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go
+++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go
@@ -22,6 +22,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func SetAndWaitForVRFBeaconProcessToFinish(t *testing.T, ocr2VRFPluginConfig *OCR2VRFPluginConfig, vrfBeacon contracts.VRFBeacon) {
@@ -172,7 +173,7 @@ func FundVRFCoordinatorV3Subscription(t *testing.T, linkToken contracts.LinkToke
require.NoError(t, err, "Error waiting for TXs to complete")
}
-func DeployOCR2VRFContracts(t *testing.T, contractDeployer contracts.ContractDeployer, chainClient blockchain.EVMClient, linkToken contracts.LinkToken, mockETHLinkFeed contracts.MockETHLINKFeed, beaconPeriodBlocksCount *big.Int, keyID string) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer) {
+func DeployOCR2VRFContracts(t *testing.T, contractDeployer contracts.ContractDeployer, chainClient blockchain.EVMClient, linkToken contracts.LinkToken, beaconPeriodBlocksCount *big.Int, keyID string) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer) {
dkg, err := contractDeployer.DeployDKG()
require.NoError(t, err, "Error deploying DKG Contract")
@@ -272,14 +273,14 @@ func RequestRandomnessFulfillmentAndWaitForFulfilment(
}
func getRequestId(t *testing.T, consumer contracts.VRFBeaconConsumer, receipt *types.Receipt, confirmationDelay *big.Int) *big.Int {
- periodBlocks, err := consumer.IBeaconPeriodBlocks(nil)
+ periodBlocks, err := consumer.IBeaconPeriodBlocks(utils.TestContext(t))
require.NoError(t, err, "Error getting Beacon Period block count")
blockNumber := receipt.BlockNumber
periodOffset := new(big.Int).Mod(blockNumber, periodBlocks)
nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset)
- requestID, err := consumer.GetRequestIdsBy(nil, nextBeaconOutputHeight, confirmationDelay)
+ requestID, err := consumer.GetRequestIdsBy(utils.TestContext(t), nextBeaconOutputHeight, confirmationDelay)
require.NoError(t, err, "Error getting requestID from consumer contract")
return requestID
@@ -305,7 +306,6 @@ func SetupOCR2VRFUniverse(
contractDeployer,
chainClient,
linkToken,
- mockETHLinkFeed,
ocr2vrf_constants.BeaconPeriodBlocksCount,
ocr2vrf_constants.KeyID,
)
diff --git a/integration-tests/actions/ocr_helpers.go b/integration-tests/actions/ocr_helpers.go
index cfc8cfe589b..4f713dcdd6d 100644
--- a/integration-tests/actions/ocr_helpers.go
+++ b/integration-tests/actions/ocr_helpers.go
@@ -27,7 +27,6 @@ func DeployOCRContracts(
numberOfContracts int,
linkTokenContract contracts.LinkToken,
contractDeployer contracts.ContractDeployer,
- bootstrapNode *client.ChainlinkK8sClient,
workerNodes []*client.ChainlinkK8sClient,
client blockchain.EVMClient,
) ([]contracts.OffchainAggregator, error) {
diff --git a/integration-tests/actions/ocr_helpers_local.go b/integration-tests/actions/ocr_helpers_local.go
index 8bb4e834794..e6dd5ae77f6 100644
--- a/integration-tests/actions/ocr_helpers_local.go
+++ b/integration-tests/actions/ocr_helpers_local.go
@@ -9,11 +9,11 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
+ "golang.org/x/sync/errgroup"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
- "golang.org/x/sync/errgroup"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
@@ -280,7 +280,7 @@ func TrackForwarderLocal(
chainID := chainClient.GetChainID()
_, _, err := node.TrackForwarder(chainID, authorizedForwarder)
if err != nil {
- return errors.Wrap(err, "failed to track forwarder")
+ return fmt.Errorf("failed to track forwarder, err: %w", err)
}
logger.Info().Str("NodeURL", node.Config.URL).
Str("ForwarderAddress", authorizedForwarder.Hex()).
@@ -305,7 +305,7 @@ func DeployOCRContractsForwarderFlowLocal(
contracts.DefaultOffChainAggregatorOptions(),
)
if err != nil {
- return nil, errors.Wrap(err, "failed to deploy offchain aggregator")
+ return nil, fmt.Errorf("failed to deploy offchain aggregator, err: %w", err)
}
ocrInstances = append(ocrInstances, ocrInstance)
err = client.WaitForEvents()
@@ -329,7 +329,7 @@ func DeployOCRContractsForwarderFlowLocal(
for _, ocrInstance := range ocrInstances {
err := ocrInstance.SetPayees(transmitters, payees)
if err != nil {
- return nil, errors.Wrap(err, "failed to set OCR payees")
+ return nil, fmt.Errorf("failed to set OCR payees, err: %w", err)
}
if err := client.WaitForEvents(); err != nil {
return nil, err
@@ -348,7 +348,7 @@ func DeployOCRContractsForwarderFlowLocal(
forwarderAddresses,
)
if err != nil {
- return nil, errors.Wrap(err, "failed to set on-chain config")
+ return nil, fmt.Errorf("failed to set on-chain config, err: %w", err)
}
if err = client.WaitForEvents(); err != nil {
return nil, err
diff --git a/integration-tests/actions/operator_forwarder_helpers.go b/integration-tests/actions/operator_forwarder_helpers.go
index 37b50c4fa9a..a1d7135416c 100644
--- a/integration-tests/actions/operator_forwarder_helpers.go
+++ b/integration-tests/actions/operator_forwarder_helpers.go
@@ -1,7 +1,6 @@
package actions
import (
- "context"
"math/big"
"testing"
@@ -17,6 +16,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func DeployForwarderContracts(
@@ -67,7 +67,7 @@ func AcceptAuthorizedReceiversOperator(
err = chainClient.WaitForEvents()
require.NoError(t, err, "Waiting for events in nodes shouldn't fail")
- senders, err := forwarderInstance.GetAuthorizedSenders(context.Background())
+ senders, err := forwarderInstance.GetAuthorizedSenders(utils.TestContext(t))
require.NoError(t, err, "Getting authorized senders shouldn't fail")
var nodesAddrs []string
for _, o := range nodeAddresses {
@@ -75,20 +75,18 @@ func AcceptAuthorizedReceiversOperator(
}
require.Equal(t, nodesAddrs, senders, "Senders addresses should match node addresses")
- owner, err := forwarderInstance.Owner(context.Background())
+ owner, err := forwarderInstance.Owner(utils.TestContext(t))
require.NoError(t, err, "Getting authorized forwarder owner shouldn't fail")
require.Equal(t, operator.Hex(), owner, "Forwarder owner should match operator")
}
func ProcessNewEvent(
t *testing.T,
- eventSub geth.Subscription,
operatorCreated chan *operator_factory.OperatorFactoryOperatorCreated,
authorizedForwarderCreated chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated,
event *types.Log,
eventDetails *abi.Event,
operatorFactoryInstance contracts.OperatorFactory,
- contractABI *abi.ABI,
chainClient blockchain.EVMClient,
) {
l := logging.GetTestLogger(t)
@@ -141,7 +139,7 @@ func SubscribeOperatorFactoryEvents(
l := logging.GetTestLogger(t)
contractABI, err := operator_factory.OperatorFactoryMetaData.GetAbi()
require.NoError(t, err, "Getting contract abi for OperatorFactory shouldn't fail")
- latestBlockNum, err := chainClient.LatestBlockNumber(context.Background())
+ latestBlockNum, err := chainClient.LatestBlockNumber(utils.TestContext(t))
require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail")
query := geth.FilterQuery{
FromBlock: big.NewInt(0).SetUint64(latestBlockNum),
@@ -149,7 +147,7 @@ func SubscribeOperatorFactoryEvents(
}
eventLogs := make(chan types.Log)
- sub, err := chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs)
+ sub, err := chainClient.SubscribeFilterLogs(utils.TestContext(t), query, eventLogs)
require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail")
go func() {
defer sub.Unsubscribe()
@@ -160,14 +158,14 @@ func SubscribeOperatorFactoryEvents(
l.Error().Err(err).Msg("Error while watching for new contract events. Retrying Subscription")
sub.Unsubscribe()
- sub, err = chainClient.SubscribeFilterLogs(context.Background(), query, eventLogs)
+ sub, err = chainClient.SubscribeFilterLogs(utils.TestContext(t), query, eventLogs)
require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail")
case vLog := <-eventLogs:
eventDetails, err := contractABI.EventByID(vLog.Topics[0])
require.NoError(t, err, "Getting event details for OperatorFactory instance shouldn't fail")
go ProcessNewEvent(
- t, sub, operatorCreated, authorizedForwarderCreated, &vLog,
- eventDetails, operatorFactoryInstance, contractABI, chainClient,
+ t, operatorCreated, authorizedForwarderCreated, &vLog,
+ eventDetails, operatorFactoryInstance, chainClient,
)
if eventDetails.Name == "AuthorizedForwarderCreated" || eventDetails.Name == "OperatorCreated" {
remainingExpectedEvents--
diff --git a/integration-tests/actions/vrfv1/actions.go b/integration-tests/actions/vrfv1/actions.go
index 68d3e584cee..f8d7190709f 100644
--- a/integration-tests/actions/vrfv1/actions.go
+++ b/integration-tests/actions/vrfv1/actions.go
@@ -1,7 +1,8 @@
package vrfv1
import (
- "github.com/pkg/errors"
+ "fmt"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
)
@@ -21,15 +22,15 @@ type Contracts struct {
func DeployVRFContracts(cd contracts.ContractDeployer, bc blockchain.EVMClient, lt contracts.LinkToken) (*Contracts, error) {
bhs, err := cd.DeployBlockhashStore()
if err != nil {
- return nil, errors.Wrap(err, ErrDeployBHSV1)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployBHSV1, err)
}
coordinator, err := cd.DeployVRFCoordinator(lt.Address(), bhs.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrDeployVRFCootrinatorV1)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployVRFCootrinatorV1, err)
}
consumer, err := cd.DeployVRFConsumer(lt.Address(), coordinator.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrDeployVRFConsumerV1)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployVRFConsumerV1, err)
}
if err := bc.WaitForEvents(); err != nil {
return nil, err
diff --git a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go
index 24ac217a334..a832d020b0f 100644
--- a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go
+++ b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go
@@ -6,7 +6,6 @@ import (
"math/big"
"github.com/google/uuid"
- "github.com/pkg/errors"
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -43,15 +42,15 @@ func DeployVRFV2Contracts(
) (*VRFV2Contracts, error) {
bhs, err := contractDeployer.DeployBlockhashStore()
if err != nil {
- return nil, errors.Wrap(err, ErrDeployBlockHashStore)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployBlockHashStore, err)
}
coordinator, err := contractDeployer.DeployVRFCoordinatorV2(linkTokenContract.Address(), bhs.Address(), linkEthFeedContract.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrDeployCoordinator)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployCoordinator, err)
}
loadTestConsumer, err := contractDeployer.DeployVRFv2LoadTestConsumer(coordinator.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrAdvancedConsumer)
+ return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err)
}
err = chainClient.WaitForEvents()
if err != nil {
@@ -70,7 +69,7 @@ func CreateVRFV2Jobs(
for _, chainlinkNode := range chainlinkNodes {
vrfKey, err := chainlinkNode.MustCreateVRFKey()
if err != nil {
- return nil, errors.Wrap(err, ErrCreatingVRFv2Key)
+ return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Key, err)
}
pubKeyCompressed := vrfKey.Data.ID
jobUUID := uuid.New()
@@ -79,11 +78,11 @@ func CreateVRFV2Jobs(
}
ost, err := os.String()
if err != nil {
- return nil, errors.Wrap(err, ErrParseJob)
+ return nil, fmt.Errorf("%s, err %w", ErrParseJob, err)
}
nativeTokenPrimaryKeyAddress, err := chainlinkNode.PrimaryEthAddress()
if err != nil {
- return nil, errors.Wrap(err, ErrNodePrimaryKey)
+ return nil, fmt.Errorf("%s, err %w", ErrNodePrimaryKey, err)
}
job, err := chainlinkNode.MustCreateJob(&client.VRFV2JobSpec{
Name: fmt.Sprintf("vrf-%s", jobUUID),
@@ -97,15 +96,15 @@ func CreateVRFV2Jobs(
BatchFulfillmentEnabled: false,
})
if err != nil {
- return nil, errors.Wrap(err, ErrCreatingVRFv2Job)
+ return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Job, err)
}
provingKey, err := VRFV2RegisterProvingKey(vrfKey, nativeTokenPrimaryKeyAddress, coordinator)
if err != nil {
- return nil, errors.Wrap(err, ErrCreatingProvingKey)
+ return nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKey, err)
}
keyHash, err := coordinator.HashOfKey(context.Background(), provingKey)
if err != nil {
- return nil, errors.Wrap(err, ErrCreatingProvingKeyHash)
+ return nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKeyHash, err)
}
ji := VRFV2JobInfo{
Job: job,
@@ -125,14 +124,14 @@ func VRFV2RegisterProvingKey(
) (VRFV2EncodedProvingKey, error) {
provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey)
if err != nil {
- return VRFV2EncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey)
+ return VRFV2EncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err)
}
err = coordinator.RegisterProvingKey(
oracleAddress,
provingKey,
)
if err != nil {
- return VRFV2EncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey)
+ return VRFV2EncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err)
}
return provingKey, nil
}
@@ -140,11 +139,11 @@ func VRFV2RegisterProvingKey(
func FundVRFCoordinatorV2Subscription(linkToken contracts.LinkToken, coordinator contracts.VRFCoordinatorV2, chainClient blockchain.EVMClient, subscriptionID uint64, linkFundingAmount *big.Int) error {
encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint64"}]`, subscriptionID)
if err != nil {
- return errors.Wrap(err, ErrABIEncodingFunding)
+ return fmt.Errorf("%s, err %w", ErrABIEncodingFunding, err)
}
_, err = linkToken.TransferAndCall(coordinator.Address(), big.NewInt(0).Mul(linkFundingAmount, big.NewInt(1e18)), encodedSubId)
if err != nil {
- return errors.Wrap(err, ErrSendingLinkToken)
+ return fmt.Errorf("%s, err %w", ErrSendingLinkToken, err)
}
return chainClient.WaitForEvents()
}
diff --git a/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go b/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go
index 10d4f19c244..a47103a8a18 100644
--- a/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go
+++ b/integration-tests/actions/vrfv2plus/vrfv2plus_config/config.go
@@ -7,8 +7,8 @@ type VRFV2PlusConfig struct {
IsNativePayment bool `envconfig:"IS_NATIVE_PAYMENT" default:"false"` // Whether to use native payment or LINK token
LinkNativeFeedResponse int64 `envconfig:"LINK_NATIVE_FEED_RESPONSE" default:"1000000000000000000"` // Response of the LINK/ETH feed
MinimumConfirmations uint16 `envconfig:"MINIMUM_CONFIRMATIONS" default:"3"` // Minimum number of confirmations for the VRF Coordinator
- SubscriptionFundingAmountLink int64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_LINK" default:"10"` // Amount of LINK to fund the subscription with
- SubscriptionFundingAmountNative int64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_NATIVE" default:"1"` // Amount of native currency to fund the subscription with
+ SubscriptionFundingAmountLink float64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_LINK" default:"5"` // Amount of LINK to fund the subscription with
+ SubscriptionFundingAmountNative float64 `envconfig:"SUBSCRIPTION_FUNDING_AMOUNT_NATIVE" default:"1"` // Amount of native currency to fund the subscription with
NumberOfWords uint32 `envconfig:"NUMBER_OF_WORDS" default:"3"` // Number of words to request
CallbackGasLimit uint32 `envconfig:"CALLBACK_GAS_LIMIT" default:"1000000"` // Gas limit for the callback
MaxGasLimitCoordinatorConfig uint32 `envconfig:"MAX_GAS_LIMIT_COORDINATOR_CONFIG" default:"2500000"` // Max gas limit for the VRF Coordinator config
@@ -23,6 +23,8 @@ type VRFV2PlusConfig struct {
RandomnessRequestCountPerRequest uint16 `envconfig:"RANDOMNESS_REQUEST_COUNT_PER_REQUEST" default:"1"` // How many randomness requests to send per request
RandomnessRequestCountPerRequestDeviation uint16 `envconfig:"RANDOMNESS_REQUEST_COUNT_PER_REQUEST_DEVIATION" default:"0"` // How many randomness requests to send per request
+ RandomWordsFulfilledEventTimeout time.Duration `envconfig:"RANDOM_WORDS_FULFILLED_EVENT_TIMEOUT" default:"2m"` // How long to wait for the RandomWordsFulfilled event to be emitted
+
//Wrapper Config
WrapperGasOverhead uint32 `envconfig:"WRAPPER_GAS_OVERHEAD" default:"50000"`
CoordinatorGasOverhead uint32 `envconfig:"COORDINATOR_GAS_OVERHEAD" default:"52000"`
diff --git a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go
index 46f0ca58e69..28fb2635ff3 100644
--- a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go
+++ b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go
@@ -7,13 +7,15 @@ import (
"sync"
"time"
+ "github.com/smartcontractkit/chainlink-testing-framework/utils"
+
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer"
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
@@ -70,19 +72,19 @@ func DeployVRFV2_5Contracts(
) (*VRFV2_5Contracts, error) {
bhs, err := contractDeployer.DeployBlockhashStore()
if err != nil {
- return nil, errors.Wrap(err, ErrDeployBlockHashStore)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployBlockHashStore, err)
}
err = chainClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
coordinator, err := contractDeployer.DeployVRFCoordinatorV2_5(bhs.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrDeployCoordinator)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployCoordinator, err)
}
err = chainClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
consumers, err := DeployVRFV2PlusConsumers(contractDeployer, coordinator, consumerContractsAmount)
if err != nil {
@@ -90,7 +92,7 @@ func DeployVRFV2_5Contracts(
}
err = chainClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
return &VRFV2_5Contracts{coordinator, bhs, consumers}, nil
}
@@ -106,11 +108,11 @@ func DeployVRFV2PlusDirectFundingContracts(
vrfv2PlusWrapper, err := contractDeployer.DeployVRFV2PlusWrapper(linkTokenAddress, linkEthFeedAddress, coordinator.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrDeployWrapper)
+ return nil, fmt.Errorf("%s, err %w", ErrDeployWrapper, err)
}
err = chainClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
consumers, err := DeployVRFV2PlusWrapperConsumers(contractDeployer, linkTokenAddress, vrfv2PlusWrapper, consumerContractsAmount)
@@ -119,7 +121,7 @@ func DeployVRFV2PlusDirectFundingContracts(
}
err = chainClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
return &VRFV2PlusWrapperContracts{vrfv2PlusWrapper, consumers}, nil
}
@@ -129,7 +131,7 @@ func DeployVRFV2PlusConsumers(contractDeployer contracts.ContractDeployer, coord
for i := 1; i <= consumerContractsAmount; i++ {
loadTestConsumer, err := contractDeployer.DeployVRFv2PlusLoadTestConsumer(coordinator.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrAdvancedConsumer)
+ return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err)
}
consumers = append(consumers, loadTestConsumer)
}
@@ -141,7 +143,7 @@ func DeployVRFV2PlusWrapperConsumers(contractDeployer contracts.ContractDeployer
for i := 1; i <= consumerContractsAmount; i++ {
loadTestConsumer, err := contractDeployer.DeployVRFV2PlusWrapperLoadTestConsumer(linkTokenAddress, vrfV2PlusWrapper.Address())
if err != nil {
- return nil, errors.Wrap(err, ErrAdvancedConsumer)
+ return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err)
}
consumers = append(consumers, loadTestConsumer)
}
@@ -162,7 +164,7 @@ func CreateVRFV2PlusJob(
}
ost, err := os.String()
if err != nil {
- return nil, errors.Wrap(err, ErrParseJob)
+ return nil, fmt.Errorf("%s, err %w", ErrParseJob, err)
}
job, err := chainlinkNode.MustCreateJob(&client.VRFV2PlusJobSpec{
@@ -177,7 +179,7 @@ func CreateVRFV2PlusJob(
BatchFulfillmentEnabled: false,
})
if err != nil {
- return nil, errors.Wrap(err, ErrCreatingVRFv2PlusJob)
+ return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusJob, err)
}
return job, nil
@@ -190,14 +192,14 @@ func VRFV2_5RegisterProvingKey(
) (VRFV2PlusEncodedProvingKey, error) {
provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey)
if err != nil {
- return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey)
+ return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err)
}
err = coordinator.RegisterProvingKey(
oracleAddress,
provingKey,
)
if err != nil {
- return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey)
+ return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err)
}
return provingKey, nil
}
@@ -209,26 +211,32 @@ func VRFV2PlusUpgradedVersionRegisterProvingKey(
) (VRFV2PlusEncodedProvingKey, error) {
provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey)
if err != nil {
- return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrEncodingProvingKey)
+ return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrEncodingProvingKey, err)
}
err = coordinator.RegisterProvingKey(
oracleAddress,
provingKey,
)
if err != nil {
- return VRFV2PlusEncodedProvingKey{}, errors.Wrap(err, ErrRegisterProvingKey)
+ return VRFV2PlusEncodedProvingKey{}, fmt.Errorf("%s, err %w", ErrRegisterProvingKey, err)
}
return provingKey, nil
}
-func FundVRFCoordinatorV2_5Subscription(linkToken contracts.LinkToken, coordinator contracts.VRFCoordinatorV2_5, chainClient blockchain.EVMClient, subscriptionID *big.Int, linkFundingAmount *big.Int) error {
+func FundVRFCoordinatorV2_5Subscription(
+ linkToken contracts.LinkToken,
+ coordinator contracts.VRFCoordinatorV2_5,
+ chainClient blockchain.EVMClient,
+ subscriptionID *big.Int,
+ linkFundingAmountJuels *big.Int,
+) error {
encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint256"}]`, subscriptionID)
if err != nil {
- return errors.Wrap(err, ErrABIEncodingFunding)
+ return fmt.Errorf("%s, err %w", ErrABIEncodingFunding, err)
}
- _, err = linkToken.TransferAndCall(coordinator.Address(), big.NewInt(0).Mul(linkFundingAmount, big.NewInt(1e18)), encodedSubId)
+ _, err = linkToken.TransferAndCall(coordinator.Address(), linkFundingAmountJuels, encodedSubId)
if err != nil {
- return errors.Wrap(err, ErrSendingLinkToken)
+ return fmt.Errorf("%s, err %w", ErrSendingLinkToken, err)
}
return chainClient.WaitForEvents()
}
@@ -236,9 +244,10 @@ func FundVRFCoordinatorV2_5Subscription(linkToken contracts.LinkToken, coordinat
// SetupVRFV2_5Environment will create specified number of subscriptions and add the same conumer/s to each of them
func SetupVRFV2_5Environment(
env *test_env.CLClusterTestEnv,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
linkToken contracts.LinkToken,
mockNativeLINKFeed contracts.MockETHLINKFeed,
+ registerProvingKeyAgainstAddress string,
numberOfConsumers int,
numberOfSubToCreate int,
l zerolog.Logger,
@@ -247,7 +256,7 @@ func SetupVRFV2_5Environment(
l.Info().Msg("Deploying VRFV2 Plus contracts")
vrfv2_5Contracts, err := DeployVRFV2_5Contracts(env.ContractDeployer, env.EVMClient, numberOfConsumers)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrDeployVRFV2_5Contracts)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrDeployVRFV2_5Contracts, err)
}
l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Setting Coordinator Config")
@@ -263,46 +272,51 @@ func SetupVRFV2_5Environment(
},
)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrSetVRFCoordinatorConfig)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrSetVRFCoordinatorConfig, err)
}
l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Setting Link and ETH/LINK feed")
err = vrfv2_5Contracts.Coordinator.SetLINKAndLINKNativeFeed(linkToken.Address(), mockNativeLINKFeed.Address())
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrSetLinkNativeLinkFeed)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrSetLinkNativeLinkFeed, err)
}
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
- }
- l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Creating and funding subscriptions, adding consumers")
- subIDs, err := CreateFundSubsAndAddConsumers(env, vrfv2PlusConfig, linkToken, vrfv2_5Contracts.Coordinator, vrfv2_5Contracts.LoadTestConsumers, numberOfSubToCreate)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
+ }
+ l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Int("Number of Subs to create", numberOfSubToCreate).Msg("Creating and funding subscriptions, adding consumers")
+ subIDs, err := CreateFundSubsAndAddConsumers(
+ env,
+ vrfv2PlusConfig,
+ linkToken,
+ vrfv2_5Contracts.Coordinator, vrfv2_5Contracts.LoadTestConsumers, numberOfSubToCreate)
if err != nil {
return nil, nil, nil, err
}
l.Info().Str("Node URL", env.ClCluster.NodeAPIs()[0].URL()).Msg("Creating VRF Key on the Node")
vrfKey, err := env.ClCluster.NodeAPIs()[0].MustCreateVRFKey()
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrCreatingVRFv2PlusKey)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusKey, err)
}
pubKeyCompressed := vrfKey.Data.ID
- nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress()
- if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrNodePrimaryKey)
- }
l.Info().Str("Coordinator", vrfv2_5Contracts.Coordinator.Address()).Msg("Registering Proving Key")
- provingKey, err := VRFV2_5RegisterProvingKey(vrfKey, nativeTokenPrimaryKeyAddress, vrfv2_5Contracts.Coordinator)
+ provingKey, err := VRFV2_5RegisterProvingKey(vrfKey, registerProvingKeyAgainstAddress, vrfv2_5Contracts.Coordinator)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrRegisteringProvingKey)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrRegisteringProvingKey, err)
}
keyHash, err := vrfv2_5Contracts.Coordinator.HashOfKey(context.Background(), provingKey)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrCreatingProvingKeyHash)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingProvingKeyHash, err)
}
chainID := env.EVMClient.GetChainID()
+ nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress()
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrNodePrimaryKey, err)
+ }
+
l.Info().Msg("Creating VRFV2 Plus Job")
job, err := CreateVRFV2PlusJob(
env.ClCluster.NodeAPIs()[0],
@@ -313,7 +327,7 @@ func SetupVRFV2_5Environment(
vrfv2PlusConfig.MinimumConfirmations,
)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrCreateVRFV2PlusJobs)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreateVRFV2PlusJobs, err)
}
// this part is here because VRFv2 can work with only a specific key
@@ -321,7 +335,7 @@ func SetupVRFV2_5Environment(
// Key = '...'
addr, err := env.ClCluster.Nodes[0].API.PrimaryEthAddress()
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrGetPrimaryKey)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrGetPrimaryKey, err)
}
nodeConfig := node.NewConfig(env.ClCluster.Nodes[0].NodeConfig,
node.WithVRFv2EVMEstimator(addr),
@@ -329,7 +343,7 @@ func SetupVRFV2_5Environment(
l.Info().Msg("Restarting Node with new sending key PriceMax configuration")
err = env.ClCluster.Nodes[0].Restart(nodeConfig)
if err != nil {
- return nil, nil, nil, errors.Wrap(err, ErrRestartCLNode)
+ return nil, nil, nil, fmt.Errorf("%s, err %w", ErrRestartCLNode, err)
}
vrfv2PlusKeyData := VRFV2PlusKeyData{
@@ -351,7 +365,7 @@ func SetupVRFV2_5Environment(
func CreateFundSubsAndAddConsumers(
env *test_env.CLClusterTestEnv,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
linkToken contracts.LinkToken,
coordinator contracts.VRFCoordinatorV2_5,
consumers []contracts.VRFv2PlusLoadTestConsumer,
@@ -378,14 +392,14 @@ func CreateFundSubsAndAddConsumers(
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
return subIDs, nil
}
func CreateSubsAndFund(
env *test_env.CLClusterTestEnv,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
linkToken contracts.LinkToken,
coordinator contracts.VRFCoordinatorV2_5,
subAmountToCreate int,
@@ -396,7 +410,7 @@ func CreateSubsAndFund(
}
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
err = FundSubscriptions(env, vrfv2PlusConfig, linkToken, coordinator, subs)
if err != nil {
@@ -430,7 +444,7 @@ func AddConsumersToSubs(
for _, consumer := range consumers {
err := coordinator.AddConsumer(subID, consumer.Address())
if err != nil {
- return errors.Wrap(err, ErrAddConsumerToSub)
+ return fmt.Errorf("%s, err %w", ErrAddConsumerToSub, err)
}
}
}
@@ -439,7 +453,7 @@ func AddConsumersToSubs(
func SetupVRFV2PlusWrapperEnvironment(
env *test_env.CLClusterTestEnv,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
linkToken contracts.LinkToken,
mockNativeLINKFeed contracts.MockETHLINKFeed,
coordinator contracts.VRFCoordinatorV2_5,
@@ -462,7 +476,7 @@ func SetupVRFV2PlusWrapperEnvironment(
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
err = wrapperContracts.VRFV2PlusWrapper.SetConfig(
vrfv2PlusConfig.WrapperGasOverhead,
@@ -481,7 +495,7 @@ func SetupVRFV2PlusWrapperEnvironment(
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
//fund sub
@@ -492,7 +506,7 @@ func SetupVRFV2PlusWrapperEnvironment(
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
err = FundSubscriptions(env, vrfv2PlusConfig, linkToken, coordinator, []*big.Int{wrapperSubID})
@@ -510,7 +524,7 @@ func SetupVRFV2PlusWrapperEnvironment(
}
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
//fund consumer with Eth
@@ -520,21 +534,24 @@ func SetupVRFV2PlusWrapperEnvironment(
}
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
return wrapperContracts, wrapperSubID, nil
}
func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts.VRFCoordinatorV2_5) (*big.Int, error) {
tx, err := coordinator.CreateSubscription()
if err != nil {
- return nil, errors.Wrap(err, ErrCreateVRFSubscription)
+ return nil, fmt.Errorf("%s, err %w", ErrCreateVRFSubscription, err)
}
err = env.EVMClient.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, ErrWaitTXsComplete)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
receipt, err := env.EVMClient.GetTxReceipt(tx.Hash())
+ if err != nil {
+ return nil, fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
+ }
//SubscriptionsCreated Log should be emitted with the subscription ID
subID := receipt.Logs[0].Topics[1].Big()
@@ -542,7 +559,7 @@ func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts
//verify that the subscription was created
_, err = coordinator.FindSubscriptionID(subID)
if err != nil {
- return nil, errors.Wrap(err, ErrFindSubID)
+ return nil, fmt.Errorf("%s, err %w", ErrFindSubID, err)
}
return subID, nil
@@ -551,11 +568,11 @@ func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts
func GetUpgradedCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) {
linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background())
if err != nil {
- return nil, nil, errors.Wrap(err, ErrLinkTotalBalance)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err)
}
nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background())
if err != nil {
- return nil, nil, errors.Wrap(err, ErrNativeTokenBalance)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err)
}
return
}
@@ -563,37 +580,42 @@ func GetUpgradedCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2Pl
func GetCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2_5) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) {
linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background())
if err != nil {
- return nil, nil, errors.Wrap(err, ErrLinkTotalBalance)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err)
}
nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background())
if err != nil {
- return nil, nil, errors.Wrap(err, ErrNativeTokenBalance)
+ return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err)
}
return
}
func FundSubscriptions(
env *test_env.CLClusterTestEnv,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
linkAddress contracts.LinkToken,
coordinator contracts.VRFCoordinatorV2_5,
subIDs []*big.Int,
) error {
for _, subID := range subIDs {
//Native Billing
- err := coordinator.FundSubscriptionWithNative(subID, big.NewInt(0).Mul(big.NewInt(vrfv2PlusConfig.SubscriptionFundingAmountNative), big.NewInt(1e18)))
+ amountWei := utils.EtherToWei(big.NewFloat(vrfv2PlusConfig.SubscriptionFundingAmountNative))
+ err := coordinator.FundSubscriptionWithNative(
+ subID,
+ amountWei,
+ )
if err != nil {
- return errors.Wrap(err, ErrFundSubWithNativeToken)
+ return fmt.Errorf("%s, err %w", ErrFundSubWithNativeToken, err)
}
//Link Billing
- err = FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, big.NewInt(vrfv2PlusConfig.SubscriptionFundingAmountLink))
+ amountJuels := utils.EtherToWei(big.NewFloat(vrfv2PlusConfig.SubscriptionFundingAmountLink))
+ err = FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, amountJuels)
if err != nil {
- return errors.Wrap(err, ErrFundSubWithLinkToken)
+ return fmt.Errorf("%s, err %w", ErrFundSubWithLinkToken, err)
}
}
err := env.EVMClient.WaitForEvents()
if err != nil {
- return errors.Wrap(err, ErrWaitTXsComplete)
+ return fmt.Errorf("%s, err %w", ErrWaitTXsComplete, err)
}
return nil
}
@@ -605,7 +627,8 @@ func RequestRandomnessAndWaitForFulfillment(
subID *big.Int,
isNativeBilling bool,
randomnessRequestCountPerRequest uint16,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
+ randomWordsFulfilledEventTimeout time.Duration,
l zerolog.Logger,
) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) {
logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l)
@@ -619,10 +642,18 @@ func RequestRandomnessAndWaitForFulfillment(
randomnessRequestCountPerRequest,
)
if err != nil {
- return nil, errors.Wrap(err, ErrRequestRandomness)
+ return nil, fmt.Errorf("%s, err %w", ErrRequestRandomness, err)
}
- return WaitForRequestAndFulfillmentEvents(consumer.Address(), coordinator, vrfv2PlusData, subID, isNativeBilling, l)
+ return WaitForRequestAndFulfillmentEvents(
+ consumer.Address(),
+ coordinator,
+ vrfv2PlusData,
+ subID,
+ isNativeBilling,
+ randomWordsFulfilledEventTimeout,
+ l,
+ )
}
func RequestRandomnessAndWaitForFulfillmentUpgraded(
@@ -631,7 +662,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded(
vrfv2PlusData *VRFV2PlusData,
subID *big.Int,
isNativeBilling bool,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
l zerolog.Logger,
) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) {
logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l)
@@ -645,7 +676,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded(
vrfv2PlusConfig.RandomnessRequestCountPerRequest,
)
if err != nil {
- return nil, errors.Wrap(err, ErrRequestRandomness)
+ return nil, fmt.Errorf("%s, err %w", ErrRequestRandomness, err)
}
randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent(
@@ -655,7 +686,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded(
time.Minute*1,
)
if err != nil {
- return nil, errors.Wrap(err, ErrWaitRandomWordsRequestedEvent)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsRequestedEvent, err)
}
LogRandomnessRequestedEventUpgraded(l, coordinator, randomWordsRequestedEvent)
@@ -666,7 +697,7 @@ func RequestRandomnessAndWaitForFulfillmentUpgraded(
time.Minute*2,
)
if err != nil {
- return nil, errors.Wrap(err, ErrWaitRandomWordsFulfilledEvent)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsFulfilledEvent, err)
}
LogRandomWordsFulfilledEventUpgraded(l, coordinator, randomWordsFulfilledEvent)
@@ -679,7 +710,8 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment(
vrfv2PlusData *VRFV2PlusData,
subID *big.Int,
isNativeBilling bool,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
+ randomWordsFulfilledEventTimeout time.Duration,
l zerolog.Logger,
) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) {
logRandRequest(consumer.Address(), coordinator.Address(), subID, isNativeBilling, vrfv2PlusConfig, l)
@@ -691,7 +723,7 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment(
vrfv2PlusConfig.RandomnessRequestCountPerRequest,
)
if err != nil {
- return nil, errors.Wrap(err, ErrRequestRandomnessDirectFundingNativePayment)
+ return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingNativePayment, err)
}
} else {
_, err := consumer.RequestRandomness(
@@ -701,14 +733,22 @@ func DirectFundingRequestRandomnessAndWaitForFulfillment(
vrfv2PlusConfig.RandomnessRequestCountPerRequest,
)
if err != nil {
- return nil, errors.Wrap(err, ErrRequestRandomnessDirectFundingLinkPayment)
+ return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingLinkPayment, err)
}
}
wrapperAddress, err := consumer.GetWrapper(context.Background())
if err != nil {
- return nil, errors.Wrap(err, "error getting wrapper address")
+ return nil, fmt.Errorf("error getting wrapper address, err: %w", err)
}
- return WaitForRequestAndFulfillmentEvents(wrapperAddress.String(), coordinator, vrfv2PlusData, subID, isNativeBilling, l)
+ return WaitForRequestAndFulfillmentEvents(
+ wrapperAddress.String(),
+ coordinator,
+ vrfv2PlusData,
+ subID,
+ isNativeBilling,
+ randomWordsFulfilledEventTimeout,
+ l,
+ )
}
func WaitForRequestAndFulfillmentEvents(
@@ -717,6 +757,7 @@ func WaitForRequestAndFulfillmentEvents(
vrfv2PlusData *VRFV2PlusData,
subID *big.Int,
isNativeBilling bool,
+ randomWordsFulfilledEventTimeout time.Duration,
l zerolog.Logger,
) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) {
randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent(
@@ -726,7 +767,7 @@ func WaitForRequestAndFulfillmentEvents(
time.Minute*1,
)
if err != nil {
- return nil, errors.Wrap(err, ErrWaitRandomWordsRequestedEvent)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsRequestedEvent, err)
}
LogRandomnessRequestedEvent(l, coordinator, randomWordsRequestedEvent, isNativeBilling)
@@ -734,10 +775,10 @@ func WaitForRequestAndFulfillmentEvents(
randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent(
[]*big.Int{subID},
[]*big.Int{randomWordsRequestedEvent.RequestId},
- time.Minute*2,
+ randomWordsFulfilledEventTimeout,
)
if err != nil {
- return nil, errors.Wrap(err, ErrWaitRandomWordsFulfilledEvent)
+ return nil, fmt.Errorf("%s, err %w", ErrWaitRandomWordsFulfilledEvent, err)
}
LogRandomWordsFulfilledEvent(l, coordinator, randomWordsFulfilledEvent, isNativeBilling)
@@ -762,7 +803,7 @@ func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2PlusLoadT
fmt.Errorf("timeout waiting for rand request and fulfilments to be equal AFTER performance test was executed. Request Count: %d, Fulfilment Count: %d",
metrics.RequestCount.Uint64(), metrics.FulfilmentCount.Uint64())
case <-ticker.C:
- go getLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel)
+ go retreiveLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel)
case metrics = <-metricsChannel:
if metrics.RequestCount.Cmp(metrics.FulfilmentCount) == 0 {
ticker.Stop()
@@ -777,7 +818,42 @@ func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2PlusLoadT
}
}
-func getLoadTestMetrics(
+func ReturnFundsForFulfilledRequests(client blockchain.EVMClient, coordinator contracts.VRFCoordinatorV2_5, l zerolog.Logger) error {
+ linkTotalBalance, err := coordinator.GetLinkTotalBalance(context.Background())
+ if err != nil {
+ return fmt.Errorf("Error getting LINK total balance, err: %w", err)
+ }
+ defaultWallet := client.GetDefaultWallet().Address()
+ l.Info().
+ Str("LINK amount", linkTotalBalance.String()).
+ Str("Returning to", defaultWallet).
+ Msg("Returning LINK for fulfilled requests")
+ err = coordinator.OracleWithdraw(
+ common.HexToAddress(defaultWallet),
+ linkTotalBalance,
+ )
+ if err != nil {
+ return fmt.Errorf("Error withdrawing LINK from coordinator to default wallet, err: %w", err)
+ }
+ nativeTotalBalance, err := coordinator.GetNativeTokenTotalBalance(context.Background())
+ if err != nil {
+ return fmt.Errorf("Error getting NATIVE total balance, err: %w", err)
+ }
+ l.Info().
+ Str("Native Token amount", linkTotalBalance.String()).
+ Str("Returning to", defaultWallet).
+ Msg("Returning Native Token for fulfilled requests")
+ err = coordinator.OracleWithdrawNative(
+ common.HexToAddress(defaultWallet),
+ nativeTotalBalance,
+ )
+ if err != nil {
+ return fmt.Errorf("Error withdrawing NATIVE from coordinator to default wallet, err: %w", err)
+ }
+ return nil
+}
+
+func retreiveLoadTestMetrics(
consumer contracts.VRFv2PlusLoadTestConsumer,
metricsChannel chan *contracts.VRFLoadTestMetrics,
metricsErrorChannel chan error,
@@ -934,7 +1010,7 @@ func logRandRequest(
coordinator string,
subID *big.Int,
isNativeBilling bool,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
l zerolog.Logger) {
l.Debug().
Str("Consumer", consumer).
diff --git a/integration-tests/benchmark/keeper_test.go b/integration-tests/benchmark/keeper_test.go
index 6fbf929e47d..9342f3629b9 100644
--- a/integration-tests/benchmark/keeper_test.go
+++ b/integration-tests/benchmark/keeper_test.go
@@ -37,6 +37,7 @@ Enabled = true
[P2P]
[P2P.V2]
+Enabled = true
AnnounceAddresses = ["0.0.0.0:6690"]
ListenAddresses = ["0.0.0.0:6690"]
[Keeper]
@@ -161,6 +162,7 @@ func TestAutomationBenchmark(t *testing.T) {
RegistryVersions: registryVersions,
KeeperRegistrySettings: &contracts.KeeperRegistrySettings{
PaymentPremiumPPB: uint32(0),
+ FlatFeeMicroLINK: uint32(40000),
BlockCountPerTurn: big.NewInt(100),
CheckGasLimit: uint32(45_000_000), //45M
StalenessSeconds: big.NewInt(90_000),
@@ -225,7 +227,7 @@ func addRegistry(registryToTest string) []eth_contracts.KeeperRegistryVersion {
case "2_0-Multiple":
return repeatRegistries(eth_contracts.RegistryVersion_2_0, NumberOfRegistries)
case "2_1-Multiple":
- return repeatRegistries(eth_contracts.RegistryVersion_1_0, NumberOfRegistries)
+ return repeatRegistries(eth_contracts.RegistryVersion_2_1, NumberOfRegistries)
default:
return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_0}
}
@@ -241,13 +243,13 @@ func repeatRegistries(registryVersion eth_contracts.KeeperRegistryVersion, numbe
var networkConfig = map[string]NetworkConfig{
"SimulatedGeth": {
- upkeepSLA: int64(20),
+ upkeepSLA: int64(120), //2 minutes
blockTime: time.Second,
deltaStage: 30 * time.Second,
funding: big.NewFloat(100_000),
},
"geth": {
- upkeepSLA: int64(20),
+ upkeepSLA: int64(120), //2 minutes
blockTime: time.Second,
deltaStage: 30 * time.Second,
funding: big.NewFloat(100_000),
@@ -282,6 +284,18 @@ var networkConfig = map[string]NetworkConfig{
deltaStage: time.Duration(0),
funding: big.NewFloat(ChainlinkNodeFunding),
},
+ "BaseGoerli": {
+ upkeepSLA: int64(60),
+ blockTime: 2 * time.Second,
+ deltaStage: 20 * time.Second,
+ funding: big.NewFloat(ChainlinkNodeFunding),
+ },
+ "ArbitrumSepolia": {
+ upkeepSLA: int64(120),
+ blockTime: time.Second,
+ deltaStage: 20 * time.Second,
+ funding: big.NewFloat(ChainlinkNodeFunding),
+ },
}
func getEnv(key, fallback string) string {
diff --git a/integration-tests/chaos/automation_chaos_test.go b/integration-tests/chaos/automation_chaos_test.go
index a3d4e37406d..6ebf14d806e 100644
--- a/integration-tests/chaos/automation_chaos_test.go
+++ b/integration-tests/chaos/automation_chaos_test.go
@@ -1,7 +1,6 @@
package chaos
import (
- "context"
"fmt"
"math/big"
"testing"
@@ -14,7 +13,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/environment"
- a "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/alias"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/cdk8s/blockscout"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum"
@@ -26,6 +24,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
var (
@@ -117,6 +116,7 @@ func TestAutomationChaos(t *testing.T) {
}
for name, registryVersion := range registryVersions {
+ registryVersion := registryVersion
t.Run(name, func(t *testing.T) {
t.Parallel()
@@ -132,7 +132,7 @@ func TestAutomationChaos(t *testing.T) {
chainlink.New(0, defaultAutomationSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -141,7 +141,7 @@ func TestAutomationChaos(t *testing.T) {
chainlink.New(0, defaultAutomationSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -150,9 +150,9 @@ func TestAutomationChaos(t *testing.T) {
chainlink.New(0, defaultAutomationSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
DurationStr: "1m",
- ContainerNames: &[]*string{a.Str("chainlink-db")},
+ ContainerNames: &[]*string{utils.Ptr("chainlink-db")},
},
},
NetworkChaosFailMajorityNetwork: {
@@ -160,8 +160,8 @@ func TestAutomationChaos(t *testing.T) {
chainlink.New(0, defaultAutomationSettings),
chaos.NewNetworkPartition,
&chaos.Props{
- FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")},
- ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
+ ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -170,16 +170,16 @@ func TestAutomationChaos(t *testing.T) {
chainlink.New(0, defaultAutomationSettings),
chaos.NewNetworkPartition,
&chaos.Props{
- FromLabels: &map[string]*string{"app": a.Str("geth")},
- ToLabels: &map[string]*string{ChaosGroupMajorityPlus: a.Str("1")},
+ FromLabels: &map[string]*string{"app": utils.Ptr("geth")},
+ ToLabels: &map[string]*string{ChaosGroupMajorityPlus: utils.Ptr("1")},
DurationStr: "1m",
},
},
}
- for n, tst := range testCases {
- name := n
- testCase := tst
+ for name, testCase := range testCases {
+ name := name
+ testCase := testCase
t.Run(fmt.Sprintf("Automation_%s", name), func(t *testing.T) {
t.Parallel()
network := networks.MustGetSelectedNetworksFromEnv()[0] // Need a new copy of the network for each test
@@ -224,7 +224,7 @@ func TestAutomationChaos(t *testing.T) {
if chainClient != nil {
chainClient.GasStats().PrintStats()
}
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -269,7 +269,7 @@ func TestAutomationChaos(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(it_utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 5
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
@@ -284,7 +284,7 @@ func TestAutomationChaos(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(it_utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 10
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
diff --git a/integration-tests/chaos/ocr2vrf_chaos_test.go b/integration-tests/chaos/ocr2vrf_chaos_test.go
index 0beccadddda..8739a5960af 100644
--- a/integration-tests/chaos/ocr2vrf_chaos_test.go
+++ b/integration-tests/chaos/ocr2vrf_chaos_test.go
@@ -13,7 +13,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/environment"
- a "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/alias"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
@@ -26,6 +25,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/config"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestOCR2VRFChaos(t *testing.T) {
@@ -68,7 +68,7 @@ func TestOCR2VRFChaos(t *testing.T) {
chainlink.New(0, defaultOCR2VRFSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -78,7 +78,7 @@ func TestOCR2VRFChaos(t *testing.T) {
// chainlink.New(0, defaultOCR2VRFSettings),
// chaos.NewFailPods,
// &chaos.Props{
- // LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ // LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
// DurationStr: "1m",
// },
//},
@@ -88,9 +88,9 @@ func TestOCR2VRFChaos(t *testing.T) {
// chainlink.New(0, defaultOCR2VRFSettings),
// chaos.NewFailPods,
// &chaos.Props{
- // LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ // LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
// DurationStr: "1m",
- // ContainerNames: &[]*string{a.Str("chainlink-db")},
+ // ContainerNames: &[]*string{utils.Ptr("chainlink-db")},
// },
//},
//NetworkChaosFailMajorityNetwork: {
@@ -98,8 +98,8 @@ func TestOCR2VRFChaos(t *testing.T) {
// chainlink.New(0, defaultOCR2VRFSettings),
// chaos.NewNetworkPartition,
// &chaos.Props{
- // FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")},
- // ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ // FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
+ // ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
// DurationStr: "1m",
// },
//},
@@ -108,8 +108,8 @@ func TestOCR2VRFChaos(t *testing.T) {
// chainlink.New(0, defaultOCR2VRFSettings),
// chaos.NewNetworkPartition,
// &chaos.Props{
- // FromLabels: &map[string]*string{"app": a.Str("geth")},
- // ToLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ // FromLabels: &map[string]*string{"app": utils.Ptr("geth")},
+ // ToLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
// DurationStr: "1m",
// },
//},
@@ -150,7 +150,7 @@ func TestOCR2VRFChaos(t *testing.T) {
require.NoError(t, err, "Retrieving on-chain wallet addresses for chainlink nodes shouldn't fail")
t.Cleanup(func() {
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -186,7 +186,7 @@ func TestOCR2VRFChaos(t *testing.T) {
)
for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ {
- randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i)))
+ randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i)))
require.NoError(t, err)
l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract")
require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0")
@@ -213,7 +213,7 @@ func TestOCR2VRFChaos(t *testing.T) {
)
for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ {
- randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i)))
+ randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i)))
require.NoError(t, err, "Error getting Randomness result from Consumer Contract")
l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract")
require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0")
diff --git a/integration-tests/chaos/ocr_chaos_test.go b/integration-tests/chaos/ocr_chaos_test.go
index b65f8bb74f7..76e25d92000 100644
--- a/integration-tests/chaos/ocr_chaos_test.go
+++ b/integration-tests/chaos/ocr_chaos_test.go
@@ -1,7 +1,6 @@
package chaos
import (
- "context"
"fmt"
"math/big"
"os"
@@ -15,7 +14,6 @@ import (
ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/environment"
- a "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/alias"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver"
@@ -28,6 +26,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/config"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
var (
@@ -81,8 +80,8 @@ func TestOCRChaos(t *testing.T) {
chainlink.New(0, defaultOCRSettings),
chaos.NewNetworkPartition,
&chaos.Props{
- FromLabels: &map[string]*string{ChaosGroupMajority: a.Str("1")},
- ToLabels: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ FromLabels: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
+ ToLabels: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -91,8 +90,8 @@ func TestOCRChaos(t *testing.T) {
chainlink.New(0, defaultOCRSettings),
chaos.NewNetworkPartition,
&chaos.Props{
- FromLabels: &map[string]*string{"app": a.Str("geth")},
- ToLabels: &map[string]*string{ChaosGroupMajorityPlus: a.Str("1")},
+ FromLabels: &map[string]*string{"app": utils.Ptr("geth")},
+ ToLabels: &map[string]*string{ChaosGroupMajorityPlus: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -101,7 +100,7 @@ func TestOCRChaos(t *testing.T) {
chainlink.New(0, defaultOCRSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMinority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMinority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -110,7 +109,7 @@ func TestOCRChaos(t *testing.T) {
chainlink.New(0, defaultOCRSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
DurationStr: "1m",
},
},
@@ -119,9 +118,9 @@ func TestOCRChaos(t *testing.T) {
chainlink.New(0, defaultOCRSettings),
chaos.NewFailPods,
&chaos.Props{
- LabelsSelector: &map[string]*string{ChaosGroupMajority: a.Str("1")},
+ LabelsSelector: &map[string]*string{ChaosGroupMajority: utils.Ptr("1")},
DurationStr: "1m",
- ContainerNames: &[]*string{a.Str("chainlink-db")},
+ ContainerNames: &[]*string{utils.Ptr("chainlink-db")},
},
},
}
@@ -165,7 +164,7 @@ func TestOCRChaos(t *testing.T) {
if chainClient != nil {
chainClient.GasStats().PrintStats()
}
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -181,7 +180,7 @@ func TestOCRChaos(t *testing.T) {
err = actions.FundChainlinkNodes(chainlinkNodes, chainClient, big.NewFloat(10))
require.NoError(t, err)
- ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, bootstrapNode, workerNodes, chainClient)
+ ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, workerNodes, chainClient)
require.NoError(t, err)
err = chainClient.WaitForEvents()
require.NoError(t, err)
@@ -196,7 +195,7 @@ func TestOCRChaos(t *testing.T) {
err := ocr.RequestNewRound()
require.NoError(t, err, "Error requesting new round")
}
- round, err := ocrInstances[0].GetLatestRound(context.Background())
+ round, err := ocrInstances[0].GetLatestRound(it_utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred())
l.Info().Int64("RoundID", round.RoundId.Int64()).Msg("Latest OCR Round")
if round.RoundId.Int64() == chaosStartRound && !chaosApplied {
diff --git a/integration-tests/client/chainlink.go b/integration-tests/client/chainlink.go
index 8a79cb3ec95..3638fa11c7f 100644
--- a/integration-tests/client/chainlink.go
+++ b/integration-tests/client/chainlink.go
@@ -1213,3 +1213,23 @@ func (c *ChainlinkClient) GetForwarders() (*Forwarders, *http.Response, error) {
}
return response, resp.RawResponse, err
}
+
+// Replays log poller from block number
+func (c *ChainlinkClient) ReplayLogPollerFromBlock(fromBlock, evmChainID int64) (*ReplayResponse, *http.Response, error) {
+ specObj := &ReplayResponse{}
+ c.l.Info().Str(NodeURL, c.Config.URL).Int64("From block", fromBlock).Int64("EVM chain ID", evmChainID).Msg("Replaying Log Poller from block")
+ resp, err := c.APIClient.R().
+ SetResult(&specObj).
+ SetQueryParams(map[string]string{
+ "evmChainID": fmt.Sprint(evmChainID),
+ }).
+ SetPathParams(map[string]string{
+ "fromBlock": fmt.Sprint(fromBlock),
+ }).
+ Post("/v2/replay_from_block/{fromBlock}")
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return specObj, resp.RawResponse, err
+}
diff --git a/integration-tests/client/chainlink_k8s.go b/integration-tests/client/chainlink_k8s.go
index 3fbf9eaf73c..27fd956103e 100644
--- a/integration-tests/client/chainlink_k8s.go
+++ b/integration-tests/client/chainlink_k8s.go
@@ -63,7 +63,7 @@ func (c *ChainlinkK8sClient) UpgradeVersion(testEnvironment *environment.Environ
},
},
}
- testEnvironment, err := testEnvironment.UpdateHelm(c.ChartName, upgradeVals)
+ _, err := testEnvironment.UpdateHelm(c.ChartName, upgradeVals)
return err
}
diff --git a/integration-tests/client/chainlink_models.go b/integration-tests/client/chainlink_models.go
index 6013e13e0fa..c6d1209d2ea 100644
--- a/integration-tests/client/chainlink_models.go
+++ b/integration-tests/client/chainlink_models.go
@@ -9,6 +9,7 @@ import (
"gopkg.in/guregu/null.v4"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
)
// EIServiceConfig represents External Initiator service config
@@ -1407,3 +1408,16 @@ type ForwarderAttributes struct {
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
+
+type ReplayResponse struct {
+ Data ReplayResponseData `json:"data"`
+}
+
+type ReplayResponseData struct {
+ Attributes ReplayResponseAttributes `json:"attributes"`
+}
+
+type ReplayResponseAttributes struct {
+ Message string `json:"message"`
+ EVMChainID *utils.Big `json:"evmChainID"`
+}
diff --git a/integration-tests/config/config.go b/integration-tests/config/config.go
index 44c108b0d7f..1da8254e0ed 100644
--- a/integration-tests/config/config.go
+++ b/integration-tests/config/config.go
@@ -8,7 +8,6 @@ Enabled = true
[P2P.V2]
Enabled = false
-[P2P]
[P2P.V1]
Enabled = true
ListenIP = '0.0.0.0'
diff --git a/integration-tests/contracts/contract_deployer.go b/integration-tests/contracts/contract_deployer.go
index 710422891c4..45195d327ee 100644
--- a/integration-tests/contracts/contract_deployer.go
+++ b/integration-tests/contracts/contract_deployer.go
@@ -12,11 +12,12 @@ import (
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
- "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper"
+ "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
+
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_load_test_client"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_v1_events_mock"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_consumer_benchmark"
@@ -45,6 +46,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0"
registry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
+ le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_aggregator_proxy"
@@ -138,6 +140,7 @@ type ContractDeployer interface {
DeployMercuryVerifierProxyContract(accessControllerAddr common.Address) (MercuryVerifierProxy, error)
DeployMercuryFeeManager(linkAddress common.Address, nativeAddress common.Address, proxyAddress common.Address, rewardManagerAddress common.Address) (MercuryFeeManager, error)
DeployMercuryRewardManager(linkAddress common.Address) (MercuryRewardManager, error)
+ DeployLogEmitterContract() (LogEmitter, error)
}
// NewContractDeployer returns an instance of a contract deployer based on the client type
@@ -171,6 +174,10 @@ func NewContractDeployer(bcClient blockchain.EVMClient, logger zerolog.Logger) (
return &LineaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil
case *blockchain.FantomClient:
return &FantomContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil
+ case *blockchain.KromaClient:
+ return &KromaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil
+ case *blockchain.WeMixClient:
+ return &WeMixContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil
}
return nil, errors.New("unknown blockchain client implementation for contract deployer, register blockchain client in NewContractDeployer")
}
@@ -238,6 +245,14 @@ type FantomContractDeployer struct {
*EthereumContractDeployer
}
+type KromaContractDeployer struct {
+ *EthereumContractDeployer
+}
+
+type WeMixContractDeployer struct {
+ *EthereumContractDeployer
+}
+
// NewEthereumContractDeployer returns an instantiated instance of the ETH contract deployer
func NewEthereumContractDeployer(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractDeployer {
return &EthereumContractDeployer{
@@ -854,34 +869,41 @@ func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, r
client: e.client,
registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar),
}, err
- } else {
- instance, err := e.client.LoadContract("AutomationRegistrar", address, func(
- address common.Address,
- backend bind.ContractBackend,
- ) (interface{}, error) {
- return registrar21.NewAutomationRegistrar(address, backend)
- })
- if err != nil {
- return nil, err
- }
- return &EthereumKeeperRegistrar{
- address: &address,
- client: e.client,
- registrar21: instance.(*registrar21.AutomationRegistrar),
- }, err
}
+ instance, err := e.client.LoadContract("AutomationRegistrar", address, func(
+ address common.Address,
+ backend bind.ContractBackend,
+ ) (interface{}, error) {
+ return registrar21.NewAutomationRegistrar(address, backend)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &EthereumKeeperRegistrar{
+ address: &address,
+ client: e.client,
+ registrar21: instance.(*registrar21.AutomationRegistrar),
+ }, err
}
func (e *EthereumContractDeployer) DeployKeeperRegistry(
opts *KeeperRegistryOpts,
) (KeeperRegistry, error) {
var mode uint8
- switch e.client.GetChainID() {
+ switch e.client.GetChainID().Int64() {
//Arbitrum payment model
- case big.NewInt(421613):
+ //Goerli Arbitrum
+ case 421613:
+ mode = uint8(1)
+ //Sepolia Arbitrum
+ case 421614:
mode = uint8(1)
//Optimism payment model
- case big.NewInt(420):
+ //Goerli Optimism
+ case 420:
+ mode = uint8(2)
+ //Goerli Base
+ case 84531:
mode = uint8(2)
default:
mode = uint8(0)
@@ -1605,3 +1627,21 @@ func (e *EthereumContractDeployer) DeployWERC20Mock() (WERC20Mock, error) {
l: e.l,
}, err
}
+
+func (e *EthereumContractDeployer) DeployLogEmitterContract() (LogEmitter, error) {
+ address, _, instance, err := e.client.DeployContract("Log Emitter", func(
+ auth *bind.TransactOpts,
+ backend bind.ContractBackend,
+ ) (common.Address, *types.Transaction, interface{}, error) {
+ return le.DeployLogEmitter(auth, backend)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &LogEmitterContract{
+ client: e.client,
+ instance: instance.(*le.LogEmitter),
+ address: *address,
+ l: e.l,
+ }, err
+}
diff --git a/integration-tests/contracts/contract_loader.go b/integration-tests/contracts/contract_loader.go
index 4dda2d3f0c4..9a2f20226d3 100644
--- a/integration-tests/contracts/contract_loader.go
+++ b/integration-tests/contracts/contract_loader.go
@@ -2,6 +2,7 @@ package contracts
import (
"errors"
+
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2_5"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics"
@@ -64,6 +65,8 @@ func NewContractLoader(bcClient blockchain.EVMClient, logger zerolog.Logger) (Co
return &OptimismContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil
case *blockchain.PolygonZkEvmClient:
return &PolygonZkEvmContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil
+ case *blockchain.WeMixClient:
+ return &WeMixContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil
}
return nil, errors.New("unknown blockchain client implementation for contract Loader, register blockchain client in NewContractLoader")
}
@@ -107,6 +110,11 @@ type PolygonZKEVMContractLoader struct {
*EthereumContractLoader
}
+// WeMixContractLoader wraps for WeMix
+type WeMixContractLoader struct {
+ *EthereumContractLoader
+}
+
// NewEthereumContractLoader returns an instantiated instance of the ETH contract Loader
func NewEthereumContractLoader(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractLoader {
return &EthereumContractLoader{
diff --git a/integration-tests/contracts/contract_models.go b/integration-tests/contracts/contract_models.go
index 51fce7cb120..4c8d610fa1b 100644
--- a/integration-tests/contracts/contract_models.go
+++ b/integration-tests/contracts/contract_models.go
@@ -400,3 +400,13 @@ type WERC20Mock interface {
Transfer(to string, amount *big.Int) error
Mint(account common.Address, amount *big.Int) (*types.Transaction, error)
}
+
+type LogEmitter interface {
+ Address() common.Address
+ EmitLogInts(ints []int) (*types.Transaction, error)
+ EmitLogIntsIndexed(ints []int) (*types.Transaction, error)
+ EmitLogStrings(strings []string) (*types.Transaction, error)
+ EmitLogInt(payload int) (*types.Transaction, error)
+ EmitLogIntIndexed(payload int) (*types.Transaction, error)
+ EmitLogString(strings string) (*types.Transaction, error)
+}
diff --git a/integration-tests/contracts/contract_vrf_models.go b/integration-tests/contracts/contract_vrf_models.go
index f0f57f58e75..baee2ccd929 100644
--- a/integration-tests/contracts/contract_vrf_models.go
+++ b/integration-tests/contracts/contract_vrf_models.go
@@ -80,11 +80,17 @@ type VRFCoordinatorV2_5 interface {
AddConsumer(subId *big.Int, consumerAddress string) error
FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error
Address() string
+ PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error)
GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error)
+ OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error)
+ CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error)
+ OracleWithdraw(recipient common.Address, amount *big.Int) error
+ OracleWithdrawNative(recipient common.Address, amount *big.Int) error
GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error)
GetLinkTotalBalance(ctx context.Context) (*big.Int, error)
FindSubscriptionID(subID *big.Int) (*big.Int, error)
WaitForSubscriptionCreatedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated, error)
+ WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error)
WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error)
WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested, error)
WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted, error)
@@ -256,12 +262,13 @@ type RequestStatus struct {
}
type LoadTestRequestStatus struct {
- Fulfilled bool
- RandomWords []*big.Int
- requestTimestamp *big.Int
- fulfilmentTimestamp *big.Int
- requestBlockNumber *big.Int
- fulfilmentBlockNumber *big.Int
+ Fulfilled bool
+ RandomWords []*big.Int
+ // Currently Unused November 8, 2023, Mignt be used in near future, will remove if not.
+ // requestTimestamp *big.Int
+ // fulfilmentTimestamp *big.Int
+ // requestBlockNumber *big.Int
+ // fulfilmentBlockNumber *big.Int
}
type VRFLoadTestMetrics struct {
diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go
index 5b3a93fe0c2..9cb858fe007 100644
--- a/integration-tests/contracts/ethereum_contracts.go
+++ b/integration-tests/contracts/ethereum_contracts.go
@@ -13,10 +13,14 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator"
+ "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
+ ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper"
+ ocrTypes "github.com/smartcontractkit/libocr/offchainreporting/types"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_coordinator"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/functions/generated/functions_load_test_client"
@@ -44,10 +48,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/werc20_mock"
- "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator"
- "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
- ocrConfigHelper "github.com/smartcontractkit/libocr/offchainreporting/confighelper"
- ocrTypes "github.com/smartcontractkit/libocr/offchainreporting/types"
"github.com/smartcontractkit/chainlink/integration-tests/client"
eth_contracts "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
@@ -940,7 +940,7 @@ func (f *EthereumFluxAggregator) PaymentAmount(ctx context.Context) (*big.Int, e
return payment, nil
}
-func (f *EthereumFluxAggregator) RequestNewRound(ctx context.Context) error {
+func (f *EthereumFluxAggregator) RequestNewRound(_ context.Context) error {
opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet())
if err != nil {
return err
@@ -979,7 +979,7 @@ func (f *EthereumFluxAggregator) WatchSubmissionReceived(ctx context.Context, ev
}
}
-func (f *EthereumFluxAggregator) SetRequesterPermissions(ctx context.Context, addr common.Address, authorized bool, roundsDelay uint32) error {
+func (f *EthereumFluxAggregator) SetRequesterPermissions(_ context.Context, addr common.Address, authorized bool, roundsDelay uint32) error {
opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet())
if err != nil {
return err
@@ -1020,7 +1020,7 @@ func (f *EthereumFluxAggregator) LatestRoundID(ctx context.Context) (*big.Int, e
}
func (f *EthereumFluxAggregator) WithdrawPayment(
- ctx context.Context,
+ _ context.Context,
from common.Address,
to common.Address,
amount *big.Int) error {
@@ -2162,11 +2162,11 @@ func (e *EthereumFunctionsRouter) CreateSubscriptionWithConsumer(consumer string
topicOneInputs := abi.Arguments{fabi.Events["SubscriptionCreated"].Inputs[0]}
topicOneHash := []common.Hash{r.Logs[0].Topics[1:][0]}
if err := abi.ParseTopicsIntoMap(topicsMap, topicOneInputs, topicOneHash); err != nil {
- return 0, errors.Wrap(err, "failed to decode topic value")
+ return 0, fmt.Errorf("failed to decode topic value, err: %w", err)
}
e.l.Info().Interface("NewTopicsDecoded", topicsMap).Send()
if topicsMap["subscriptionId"] == 0 {
- return 0, errors.New("failed to decode subscription ID after creation")
+ return 0, fmt.Errorf("failed to decode subscription ID after creation")
}
return topicsMap["subscriptionId"].(uint64), nil
}
diff --git a/integration-tests/contracts/ethereum_keeper_contracts.go b/integration-tests/contracts/ethereum_keeper_contracts.go
index 135b016ee55..2c0250e7454 100644
--- a/integration-tests/contracts/ethereum_keeper_contracts.go
+++ b/integration-tests/contracts/ethereum_keeper_contracts.go
@@ -250,25 +250,25 @@ func (rcs *KeeperRegistrySettings) EncodeOnChainConfig(registrar string, registr
encodedOnchainConfig, err := utilsABI.Methods["_onChainConfig"].Inputs.Pack(&onchainConfigStruct)
return encodedOnchainConfig, err
- } else {
- configType := goabi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)")
- onchainConfig, err := goabi.Encode(map[string]interface{}{
- "paymentPremiumPPB": rcs.PaymentPremiumPPB,
- "flatFeeMicroLink": rcs.FlatFeeMicroLINK,
- "checkGasLimit": rcs.CheckGasLimit,
- "stalenessSeconds": rcs.StalenessSeconds,
- "gasCeilingMultiplier": rcs.GasCeilingMultiplier,
- "minUpkeepSpend": rcs.MinUpkeepSpend,
- "maxPerformGas": rcs.MaxPerformGas,
- "maxCheckDataSize": rcs.MaxCheckDataSize,
- "maxPerformDataSize": rcs.MaxPerformDataSize,
- "fallbackGasPrice": rcs.FallbackGasPrice,
- "fallbackLinkPrice": rcs.FallbackLinkPrice,
- "transcoder": common.Address{},
- "registrar": registrar,
- }, configType)
- return onchainConfig, err
}
+ configType := goabi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)")
+ onchainConfig, err := goabi.Encode(map[string]interface{}{
+ "paymentPremiumPPB": rcs.PaymentPremiumPPB,
+ "flatFeeMicroLink": rcs.FlatFeeMicroLINK,
+ "checkGasLimit": rcs.CheckGasLimit,
+ "stalenessSeconds": rcs.StalenessSeconds,
+ "gasCeilingMultiplier": rcs.GasCeilingMultiplier,
+ "minUpkeepSpend": rcs.MinUpkeepSpend,
+ "maxPerformGas": rcs.MaxPerformGas,
+ "maxCheckDataSize": rcs.MaxCheckDataSize,
+ "maxPerformDataSize": rcs.MaxPerformDataSize,
+ "fallbackGasPrice": rcs.FallbackGasPrice,
+ "fallbackLinkPrice": rcs.FallbackLinkPrice,
+ "transcoder": common.Address{},
+ "registrar": registrar,
+ }, configType)
+ return onchainConfig, err
+
}
func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address {
@@ -276,6 +276,7 @@ func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address {
Pending: false,
}
+ //nolint: exhaustive
switch v.version {
case ethereum.RegistryVersion_2_1:
ownerAddress, _ := v.registry2_1.Owner(callOpts)
@@ -283,6 +284,8 @@ func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address {
case ethereum.RegistryVersion_2_0:
ownerAddress, _ := v.registry2_0.Owner(callOpts)
return ownerAddress
+ case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1, ethereum.RegistryVersion_1_2, ethereum.RegistryVersion_1_3:
+ return common.HexToAddress(v.client.GetDefaultWallet().Address())
}
return common.HexToAddress(v.client.GetDefaultWallet().Address())
@@ -664,7 +667,7 @@ func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr s
info, err = v.registry1_2.GetKeeperInfo(opts, common.HexToAddress(keeperAddr))
case ethereum.RegistryVersion_1_3:
info, err = v.registry1_3.GetKeeperInfo(opts, common.HexToAddress(keeperAddr))
- case ethereum.RegistryVersion_2_0:
+ case ethereum.RegistryVersion_2_0, ethereum.RegistryVersion_2_1:
// this is not used anywhere
return nil, fmt.Errorf("not supported")
}
@@ -710,6 +713,8 @@ func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, o
ocrConfig.OffchainConfigVersion,
ocrConfig.OffchainConfig,
)
+ case ethereum.RegistryVersion_2_1:
+ return fmt.Errorf("not supported")
}
if err != nil {
@@ -760,6 +765,8 @@ func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32,
checkData,
nil, //offchain config
)
+ case ethereum.RegistryVersion_2_1:
+ return fmt.Errorf("not supported")
}
if err != nil {
@@ -877,6 +884,8 @@ func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, e
return []string{}, err
}
list = state.Transmitters
+ case ethereum.RegistryVersion_2_1:
+ return nil, fmt.Errorf("not supported")
}
if err != nil {
@@ -1112,6 +1121,7 @@ func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*Upkee
// ParseStaleUpkeepReportLog Parses Stale upkeep report log
func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) {
+ //nolint:exhaustive
switch v.version {
case ethereum.RegistryVersion_2_0:
parsedLog, err := v.registry2_0.ParseStaleUpkeepReport(*log)
@@ -1129,7 +1139,6 @@ func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*Sta
return &StaleUpkeepReportLog{
Id: parsedLog.Id,
}, nil
-
}
return nil, fmt.Errorf("keeper registry version %d is not supported", v.version)
}
@@ -1850,7 +1859,7 @@ func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context)
return eligible, err
}
-func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(ctx context.Context, gas *big.Int) error {
+func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error {
opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
if err != nil {
return err
@@ -1862,7 +1871,7 @@ func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(ctx context.Contex
return v.client.ProcessTransaction(tx)
}
-func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(ctx context.Context, gas *big.Int) error {
+func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error {
opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
if err != nil {
return err
@@ -1897,7 +1906,7 @@ func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context)
return cnt, nil
}
-func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(ctx context.Context, expectedData []byte) error {
+func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error {
opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
if err != nil {
return err
@@ -2041,31 +2050,23 @@ func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byt
common.HexToAddress(senderAddr),
)
- if err != nil {
- return nil, err
- }
- return req, nil
- } else {
- req, err := registrarABI.Pack(
- "register",
- name,
- email,
- common.HexToAddress(upkeepAddr),
- gasLimit,
- common.HexToAddress(adminAddr),
- uint8(0), // trigger type
- checkData,
- []byte{}, // triggerConfig
- []byte{}, // offchainConfig
- amount,
- common.HexToAddress(senderAddr),
- )
-
- if err != nil {
- return nil, err
- }
- return req, nil
+ return req, err
}
+ req, err := registrarABI.Pack(
+ "register",
+ name,
+ email,
+ common.HexToAddress(upkeepAddr),
+ gasLimit,
+ common.HexToAddress(adminAddr),
+ uint8(0), // trigger type
+ checkData,
+ []byte{}, // triggerConfig
+ []byte{}, // offchainConfig
+ amount,
+ common.HexToAddress(senderAddr),
+ )
+ return req, err
}
registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.ABI))
if err != nil {
diff --git a/integration-tests/contracts/ethereum_ocr2vrf_contracts.go b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go
index e8149b21251..cb52d1941a8 100644
--- a/integration-tests/contracts/ethereum_ocr2vrf_contracts.go
+++ b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go
@@ -10,7 +10,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
@@ -230,7 +229,7 @@ func (dkgContract *EthereumDKG) WaitForTransmittedEvent(timeout time.Duration) (
case err = <-subscription.Err():
return nil, err
case <-time.After(timeout):
- return nil, errors.New("timeout waiting for DKGTransmitted event")
+ return nil, fmt.Errorf("timeout waiting for DKGTransmitted event")
case transmittedEvent := <-transmittedEventsChannel:
return transmittedEvent, nil
}
@@ -250,7 +249,7 @@ func (dkgContract *EthereumDKG) WaitForConfigSetEvent(timeout time.Duration) (*d
case err = <-subscription.Err():
return nil, err
case <-time.After(timeout):
- return nil, errors.New("timeout waiting for DKGConfigSet event")
+ return nil, fmt.Errorf("timeout waiting for DKGConfigSet event")
case configSetEvent := <-configSetEventsChannel:
return configSetEvent, nil
}
@@ -451,7 +450,7 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomness(
) (*types.Receipt, error) {
opts, err := consumer.client.TransactionOpts(consumer.client.GetDefaultWallet())
if err != nil {
- return nil, errors.Wrap(err, "TransactionOpts failed")
+ return nil, fmt.Errorf("TransactionOpts failed, err: %w", err)
}
tx, err := consumer.vrfBeaconConsumer.TestRequestRandomness(
opts,
@@ -460,20 +459,20 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomness(
confirmationDelayArg,
)
if err != nil {
- return nil, errors.Wrap(err, "TestRequestRandomness failed")
+ return nil, fmt.Errorf("TestRequestRandomness failed, err: %w", err)
}
err = consumer.client.ProcessTransaction(tx)
if err != nil {
- return nil, errors.Wrap(err, "ProcessTransaction failed")
+ return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err)
}
err = consumer.client.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, "WaitForEvents failed")
+ return nil, fmt.Errorf("WaitForEvents failed, err: %w", err)
}
receipt, err := consumer.client.GetTxReceipt(tx.Hash())
if err != nil {
- return nil, errors.Wrap(err, "GetTxReceipt failed")
+ return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err)
}
log.Info().Interface("Sub ID", subID).
Interface("Number of Words", numWords).
@@ -526,20 +525,20 @@ func (consumer *EthereumVRFBeaconConsumer) RequestRandomnessFulfillment(
arguments,
)
if err != nil {
- return nil, errors.Wrap(err, "TestRequestRandomnessFulfillment failed")
+ return nil, fmt.Errorf("TestRequestRandomnessFulfillment failed, err: %w", err)
}
err = consumer.client.ProcessTransaction(tx)
if err != nil {
- return nil, errors.Wrap(err, "ProcessTransaction failed")
+ return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err)
}
err = consumer.client.WaitForEvents()
if err != nil {
- return nil, errors.Wrap(err, "WaitForEvents failed")
+ return nil, fmt.Errorf("WaitForEvents failed, err: %w", err)
}
receipt, err := consumer.client.GetTxReceipt(tx.Hash())
if err != nil {
- return nil, errors.Wrap(err, "GetTxReceipt failed")
+ return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err)
}
log.Info().Interface("Sub ID", subID).
Interface("Number of Words", numWords).
diff --git a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
index 1488f97131a..330166dc79d 100644
--- a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
+++ b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go
@@ -96,6 +96,18 @@ func (v *EthereumVRFCoordinatorV2_5) GetActiveSubscriptionIds(ctx context.Contex
return activeSubscriptionIds, nil
}
+func (v *EthereumVRFCoordinatorV2_5) PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error) {
+ opts := &bind.CallOpts{
+ From: common.HexToAddress(v.client.GetDefaultWallet().Address()),
+ Context: ctx,
+ }
+ pendingRequestExists, err := v.coordinator.PendingRequestExists(opts, subID)
+ if err != nil {
+ return false, err
+ }
+ return pendingRequestExists, nil
+}
+
func (v *EthereumVRFCoordinatorV2_5) GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error) {
opts := &bind.CallOpts{
From: common.HexToAddress(v.client.GetDefaultWallet().Address()),
@@ -131,6 +143,75 @@ func (v *EthereumVRFCoordinatorV2_5) GetNativeTokenTotalBalance(ctx context.Cont
return totalBalance, nil
}
+// OwnerCancelSubscription cancels subscription by Coordinator owner
+// return funds to sub owner,
+// does not check if pending requests for a sub exist
+func (v *EthereumVRFCoordinatorV2_5) OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error) {
+ opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
+ if err != nil {
+ return nil, err
+ }
+ tx, err := v.coordinator.OwnerCancelSubscription(
+ opts,
+ subID,
+ )
+ if err != nil {
+ return nil, err
+ }
+ return tx, v.client.ProcessTransaction(tx)
+}
+
+// CancelSubscription cancels subscription by Sub owner,
+// return funds to specified address,
+// checks if pending requests for a sub exist
+func (v *EthereumVRFCoordinatorV2_5) CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error) {
+ opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
+ if err != nil {
+ return nil, err
+ }
+ tx, err := v.coordinator.CancelSubscription(
+ opts,
+ subID,
+ to,
+ )
+ if err != nil {
+ return nil, err
+ }
+ return tx, v.client.ProcessTransaction(tx)
+}
+
+func (v *EthereumVRFCoordinatorV2_5) OracleWithdraw(recipient common.Address, amount *big.Int) error {
+ opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
+ if err != nil {
+ return err
+ }
+ tx, err := v.coordinator.OracleWithdraw(
+ opts,
+ recipient,
+ amount,
+ )
+ if err != nil {
+ return err
+ }
+ return v.client.ProcessTransaction(tx)
+}
+
+func (v *EthereumVRFCoordinatorV2_5) OracleWithdrawNative(recipient common.Address, amount *big.Int) error {
+ opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
+ if err != nil {
+ return err
+ }
+ tx, err := v.coordinator.OracleWithdrawNative(
+ opts,
+ recipient,
+ amount,
+ )
+ if err != nil {
+ return err
+ }
+ return v.client.ProcessTransaction(tx)
+}
+
func (v *EthereumVRFCoordinatorV2_5) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig vrf_coordinator_v2_5.VRFCoordinatorV25FeeConfig) error {
opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet())
if err != nil {
@@ -287,6 +368,26 @@ func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCreatedEvent(timeout tim
}
}
+func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error) {
+ eventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled)
+ subscription, err := v.coordinator.WatchSubscriptionCanceled(nil, eventsChannel, []*big.Int{subID})
+ if err != nil {
+ return nil, err
+ }
+ defer subscription.Unsubscribe()
+
+ for {
+ select {
+ case err := <-subscription.Err():
+ return nil, err
+ case <-time.After(timeout):
+ return nil, fmt.Errorf("timeout waiting for SubscriptionCanceled event")
+ case sub := <-eventsChannel:
+ return sub, nil
+ }
+ }
+}
+
func (v *EthereumVRFCoordinatorV2_5) WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) {
randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled)
subscription, err := v.coordinator.WatchRandomWordsFulfilled(nil, randomWordsFulfilledEventsChannel, requestID, subID)
diff --git a/integration-tests/contracts/test_contracts.go b/integration-tests/contracts/test_contracts.go
new file mode 100644
index 00000000000..3080668da69
--- /dev/null
+++ b/integration-tests/contracts/test_contracts.go
@@ -0,0 +1,80 @@
+package contracts
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/rs/zerolog"
+
+ "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
+
+ le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter"
+)
+
+type LogEmitterContract struct {
+ address common.Address
+ client blockchain.EVMClient
+ instance *le.LogEmitter
+ l zerolog.Logger
+}
+
+func (e *LogEmitterContract) Address() common.Address {
+ return e.address
+}
+
+func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) {
+ opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet())
+ if err != nil {
+ return nil, err
+ }
+ bigInts := make([]*big.Int, len(ints))
+ for i, v := range ints {
+ bigInts[i] = big.NewInt(int64(v))
+ }
+ tx, err := e.instance.EmitLog1(opts, bigInts)
+ if err != nil {
+ return nil, err
+ }
+ return tx, e.client.ProcessTransaction(tx)
+}
+
+func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) {
+ opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet())
+ if err != nil {
+ return nil, err
+ }
+ bigInts := make([]*big.Int, len(ints))
+ for i, v := range ints {
+ bigInts[i] = big.NewInt(int64(v))
+ }
+ tx, err := e.instance.EmitLog2(opts, bigInts)
+ if err != nil {
+ return nil, err
+ }
+ return tx, e.client.ProcessTransaction(tx)
+}
+
+func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) {
+ opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet())
+ if err != nil {
+ return nil, err
+ }
+ tx, err := e.instance.EmitLog3(opts, strings)
+ if err != nil {
+ return nil, err
+ }
+ return tx, e.client.ProcessTransaction(tx)
+}
+
+func (e *LogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) {
+ return e.EmitLogInts([]int{payload})
+}
+
+func (e *LogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) {
+ return e.EmitLogIntsIndexed([]int{payload})
+}
+
+func (e *LogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) {
+ return e.EmitLogStrings([]string{strings})
+}
diff --git a/integration-tests/docker/cmd/test_env.go b/integration-tests/docker/cmd/test_env.go
index 31b7de5dcdd..5fe2001350e 100644
--- a/integration-tests/docker/cmd/test_env.go
+++ b/integration-tests/docker/cmd/test_env.go
@@ -9,10 +9,11 @@ import (
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
- "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
- "github.com/smartcontractkit/chainlink/integration-tests/utils"
"github.com/spf13/cobra"
"github.com/testcontainers/testcontainers-go"
+
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
)
func main() {
@@ -31,7 +32,7 @@ func main() {
Use: "cl-cluster",
Short: "Basic CL cluster",
RunE: func(cmd *cobra.Command, args []string) error {
- utils.SetupCoreDockerEnvLogger()
+ log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL")
log.Info().Msg("Starting CL cluster test environment..")
_, err := test_env.NewCLTestEnvBuilder().
@@ -50,6 +51,7 @@ func main() {
return nil
},
}
+
startEnvCmd.AddCommand(startFullEnvCmd)
// Set default log level for non-testcontainer code
diff --git a/integration-tests/docker/test_env/cl_node.go b/integration-tests/docker/test_env/cl_node.go
index 4c40e641210..3c0a6d3af76 100644
--- a/integration-tests/docker/test_env/cl_node.go
+++ b/integration-tests/docker/test_env/cl_node.go
@@ -1,15 +1,11 @@
package test_env
import (
- "context"
- "crypto/ed25519"
- "encoding/hex"
"fmt"
"math/big"
"net/url"
"os"
"strings"
- "sync"
"testing"
"time"
@@ -17,7 +13,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/pelletier/go-toml/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
tc "github.com/testcontainers/testcontainers-go"
@@ -30,8 +25,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/logwatch"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/utils"
@@ -119,7 +112,7 @@ func (n *ClNode) SetTestLogger(t *testing.T) {
// Restart restarts only CL node, DB container is reused
func (n *ClNode) Restart(cfg *chainlink.Config) error {
- if err := n.Container.Terminate(context.Background()); err != nil {
+ if err := n.Container.Terminate(utils.TestContext(n.t)); err != nil {
return err
}
n.NodeConfig = cfg
@@ -127,7 +120,7 @@ func (n *ClNode) Restart(cfg *chainlink.Config) error {
}
// UpgradeVersion restarts the cl node with new image and version
-func (n *ClNode) UpgradeVersion(cfg *chainlink.Config, newImage, newVersion string) error {
+func (n *ClNode) UpgradeVersion(newImage, newVersion string) error {
if newVersion == "" {
return fmt.Errorf("new version is empty")
}
@@ -143,9 +136,9 @@ func (n *ClNode) PrimaryETHAddress() (string, error) {
return n.API.PrimaryEthAddress()
}
-func (n *ClNode) AddBootstrapJob(verifierAddr common.Address, fromBlock uint64, chainId int64,
+func (n *ClNode) AddBootstrapJob(verifierAddr common.Address, chainId int64,
feedId [32]byte) (*client.Job, error) {
- spec := utils.BuildBootstrapSpec(verifierAddr, chainId, fromBlock, feedId)
+ spec := utils.BuildBootstrapSpec(verifierAddr, chainId, feedId)
return n.API.MustCreateJob(spec)
}
@@ -197,13 +190,17 @@ func (n *ClNode) AddMercuryOCRJob(verifierAddr common.Address, fromBlock uint64,
}
func (n *ClNode) GetContainerName() string {
- name, err := n.Container.Name(context.Background())
+ name, err := n.Container.Name(utils.TestContext(n.t))
if err != nil {
return ""
}
return strings.Replace(name, "/", "", -1)
}
+func (n *ClNode) GetAPIClient() *client.ChainlinkClient {
+ return n.API
+}
+
func (n *ClNode) GetPeerUrl() (string, error) {
p2pKeys, err := n.API.MustReadP2PKeys()
if err != nil {
@@ -282,18 +279,18 @@ func (n *ClNode) StartContainer() error {
Logger: l,
})
if err != nil {
- return errors.Wrap(err, ErrStartCLNodeContainer)
+ return fmt.Errorf("%s err: %w", ErrStartCLNodeContainer, err)
}
if n.lw != nil {
- if err := n.lw.ConnectContainer(context.Background(), container, "cl-node", true); err != nil {
+ if err := n.lw.ConnectContainer(utils.TestContext(n.t), container, "cl-node", true); err != nil {
return err
}
}
- clEndpoint, err := test_env.GetEndpoint(context.Background(), container, "http")
+ clEndpoint, err := test_env.GetEndpoint(utils.TestContext(n.t), container, "http")
if err != nil {
return err
}
- ip, err := container.ContainerIP(context.Background())
+ ip, err := container.ContainerIP(utils.TestContext(n.t))
if err != nil {
return err
}
@@ -314,7 +311,7 @@ func (n *ClNode) StartContainer() error {
},
n.l)
if err != nil {
- return errors.Wrap(err, ErrConnectNodeClient)
+ return fmt.Errorf("%s err: %w", ErrConnectNodeClient, err)
}
clClient.Config.InternalIP = n.ContainerName
n.Container = container
@@ -411,83 +408,3 @@ func (n *ClNode) getContainerRequest(secrets string) (
},
}, nil
}
-
-func GetOracleIdentities(chainlinkNodes []*ClNode) ([]int, []confighelper.OracleIdentityExtra) {
- S := make([]int, len(chainlinkNodes))
- oracleIdentities := make([]confighelper.OracleIdentityExtra, len(chainlinkNodes))
- sharedSecretEncryptionPublicKeys := make([]ocrtypes.ConfigEncryptionPublicKey, len(chainlinkNodes))
- var wg sync.WaitGroup
- for i, cl := range chainlinkNodes {
- wg.Add(1)
- go func(i int, cl *ClNode) error {
- defer wg.Done()
-
- ocr2Keys, err := cl.API.MustReadOCR2Keys()
- if err != nil {
- return err
- }
- var ocr2Config client.OCR2KeyAttributes
- for _, key := range ocr2Keys.Data {
- if key.Attributes.ChainType == string(chaintype.EVM) {
- ocr2Config = key.Attributes
- break
- }
- }
-
- keys, err := cl.API.MustReadP2PKeys()
- if err != nil {
- return err
- }
- p2pKeyID := keys.Data[0].Attributes.PeerID
-
- offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_"))
- if err != nil {
- return err
- }
-
- offchainPkBytesFixed := [ed25519.PublicKeySize]byte{}
- copy(offchainPkBytesFixed[:], offchainPkBytes)
- if err != nil {
- return err
- }
-
- configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_"))
- if err != nil {
- return err
- }
-
- configPkBytesFixed := [ed25519.PublicKeySize]byte{}
- copy(configPkBytesFixed[:], configPkBytes)
- if err != nil {
- return err
- }
-
- onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnChainPublicKey, "ocr2on_evm_"))
- if err != nil {
- return err
- }
-
- csaKeys, _, err := cl.API.ReadCSAKeys()
- if err != nil {
- return err
- }
-
- sharedSecretEncryptionPublicKeys[i] = configPkBytesFixed
- oracleIdentities[i] = confighelper.OracleIdentityExtra{
- OracleIdentity: confighelper.OracleIdentity{
- OnchainPublicKey: onchainPkBytes,
- OffchainPublicKey: offchainPkBytesFixed,
- PeerID: p2pKeyID,
- TransmitAccount: ocrtypes.Account(csaKeys.Data[0].ID),
- },
- ConfigEncryptionPublicKey: configPkBytesFixed,
- }
- S[i] = 1
-
- return nil
- }(i, cl)
- }
- wg.Wait()
-
- return S, oracleIdentities
-}
diff --git a/integration-tests/docker/test_env/cl_node_cluster.go b/integration-tests/docker/test_env/cl_node_cluster.go
index a717a192649..08122b5744d 100644
--- a/integration-tests/docker/test_env/cl_node_cluster.go
+++ b/integration-tests/docker/test_env/cl_node_cluster.go
@@ -1,10 +1,12 @@
package test_env
import (
+ "fmt"
+
"github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
- "github.com/smartcontractkit/chainlink/integration-tests/client"
"golang.org/x/sync/errgroup"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/client"
)
var (
@@ -60,7 +62,7 @@ func (c *ClCluster) NodeCSAKeys() ([]string, error) {
for _, n := range c.Nodes {
csaKeys, err := n.GetNodeCSAKeys()
if err != nil {
- return nil, errors.Wrap(err, ErrGetNodeCSAKeys)
+ return nil, fmt.Errorf("%s, err: %w", ErrGetNodeCSAKeys, err)
}
keys = append(keys, csaKeys.Data[0].ID)
}
diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go
index 07b193f102f..9987bab2fe0 100644
--- a/integration-tests/docker/test_env/test_env.go
+++ b/integration-tests/docker/test_env/test_env.go
@@ -1,18 +1,17 @@
package test_env
import (
- "context"
"encoding/json"
"fmt"
"io"
"math/big"
"os"
"path/filepath"
+ "runtime/debug"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/keystore"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
tc "github.com/testcontainers/testcontainers-go"
@@ -23,11 +22,11 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
"github.com/smartcontractkit/chainlink-testing-framework/logwatch"
+ "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
)
var (
@@ -40,26 +39,26 @@ type CLClusterTestEnv struct {
LogWatch *logwatch.LogWatch
/* components */
- ClCluster *ClCluster
- Geth *test_env.Geth // for tests using --dev networks
- PrivateChain []test_env.PrivateChain // for tests using non-dev networks
- MockAdapter *test_env.Killgrave
- EVMClient blockchain.EVMClient
- ContractDeployer contracts.ContractDeployer
- ContractLoader contracts.ContractLoader
- l zerolog.Logger
- t *testing.T
+ ClCluster *ClCluster
+ PrivateChain []test_env.PrivateChain // for tests using non-dev networks -- unify it with new approach
+ MockAdapter *test_env.Killgrave
+ EVMClient blockchain.EVMClient
+ ContractDeployer contracts.ContractDeployer
+ ContractLoader contracts.ContractLoader
+ RpcProvider test_env.RpcProvider
+ PrivateEthereumConfig *test_env.EthereumNetwork // new approach to private chains, supporting eth1 and eth2
+ l zerolog.Logger
+ t *testing.T
}
func NewTestEnv() (*CLClusterTestEnv, error) {
- utils.SetupCoreDockerEnvLogger()
+ log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL")
network, err := docker.CreateNetwork(log.Logger)
if err != nil {
return nil, err
}
n := []string{network.Name}
return &CLClusterTestEnv{
- Geth: test_env.NewGeth(n),
MockAdapter: test_env.NewKillgrave(n, ""),
Network: network,
l: log.Logger,
@@ -67,11 +66,10 @@ func NewTestEnv() (*CLClusterTestEnv, error) {
}
// WithTestEnvConfig sets the test environment cfg.
-// Sets up the Geth and MockAdapter containers with the provided cfg.
+// Sets up private ethereum chain and MockAdapter containers with the provided cfg.
func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTestEnv {
te.Cfg = cfg
n := []string{te.Network.Name}
- te.Geth = test_env.NewGeth(n, test_env.WithContainerName(te.Cfg.Geth.ContainerName))
te.MockAdapter = test_env.NewKillgrave(n, te.Cfg.MockAdapter.ImpostersPath, test_env.WithContainerName(te.Cfg.MockAdapter.ContainerName))
return te
}
@@ -79,7 +77,6 @@ func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTest
func (te *CLClusterTestEnv) WithTestLogger(t *testing.T) *CLClusterTestEnv {
te.t = t
te.l = logging.GetTestLogger(t)
- te.Geth.WithTestLogger(t)
te.MockAdapter.WithTestLogger(t)
return te
}
@@ -114,7 +111,7 @@ func (te *CLClusterTestEnv) StartPrivateChain() error {
for _, chain := range te.PrivateChain {
primaryNode := chain.GetPrimaryNode()
if primaryNode == nil {
- return errors.WithStack(fmt.Errorf("primary node is nil in PrivateChain interface"))
+ return fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack()))
}
err := primaryNode.Start()
if err != nil {
@@ -128,8 +125,26 @@ func (te *CLClusterTestEnv) StartPrivateChain() error {
return nil
}
-func (te *CLClusterTestEnv) StartGeth() (blockchain.EVMNetwork, test_env.InternalDockerUrls, error) {
- return te.Geth.StartContainer()
+func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *test_env.EthereumNetwork) (blockchain.EVMNetwork, test_env.RpcProvider, error) {
+ // if environment is being restored from a previous state, use the existing config
+ // this might fail terribly if temporary folders with chain data on the host machine were removed
+ if te.Cfg != nil && te.Cfg.EthereumNetwork != nil {
+ builder := test_env.NewEthereumNetworkBuilder()
+ c, err := builder.WithExistingConfig(*te.Cfg.EthereumNetwork).
+ WithTest(te.t).
+ Build()
+ if err != nil {
+ return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err
+ }
+ cfg = &c
+ }
+ n, rpc, err := cfg.Start()
+
+ if err != nil {
+ return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err
+ }
+
+ return n, rpc, nil
}
func (te *CLClusterTestEnv) StartMockAdapter() error {
@@ -164,8 +179,9 @@ func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count i
func (te *CLClusterTestEnv) FundChainlinkNodes(amount *big.Float) error {
for _, cl := range te.ClCluster.Nodes {
if err := cl.Fund(te.EVMClient, amount); err != nil {
- return errors.Wrap(err, ErrFundCLNode)
+ return fmt.Errorf("%s, err: %w", ErrFundCLNode, err)
}
+ time.Sleep(5 * time.Second)
}
return te.EVMClient.WaitForEvents()
}
@@ -180,12 +196,14 @@ func (te *CLClusterTestEnv) Terminate() error {
func (te *CLClusterTestEnv) Cleanup() error {
te.l.Info().Msg("Cleaning up test environment")
if te.t == nil {
- return errors.New("cannot cleanup test environment without a testing.T")
+ return fmt.Errorf("cannot cleanup test environment without a testing.T")
}
if te.ClCluster == nil || len(te.ClCluster.Nodes) == 0 {
- return errors.New("chainlink nodes are nil, unable cleanup chainlink nodes")
+ return fmt.Errorf("chainlink nodes are nil, unable cleanup chainlink nodes")
}
+ te.logWhetherAllContainersAreRunning()
+
// TODO: This is an imperfect and temporary solution, see TT-590 for a more sustainable solution
// Collect logs if the test fails, or if we just want them
if te.t.Failed() || os.Getenv("TEST_LOG_COLLECT") == "true" {
@@ -195,7 +213,7 @@ func (te *CLClusterTestEnv) Cleanup() error {
}
if te.EVMClient == nil {
- return errors.New("evm client is nil, unable to return funds from chainlink nodes during cleanup")
+ return fmt.Errorf("evm client is nil, unable to return funds from chainlink nodes during cleanup")
} else if te.EVMClient.NetworkSimulated() {
te.l.Info().
Str("Network Name", te.EVMClient.GetNetworkName()).
@@ -215,6 +233,21 @@ func (te *CLClusterTestEnv) Cleanup() error {
return nil
}
+func (te *CLClusterTestEnv) logWhetherAllContainersAreRunning() {
+ for _, node := range te.ClCluster.Nodes {
+ isCLRunning := node.Container.IsRunning()
+ isDBRunning := node.PostgresDb.Container.IsRunning()
+
+ if !isCLRunning {
+ te.l.Warn().Str("Node", node.ContainerName).Msg("Chainlink node was not running, when test ended")
+ }
+
+ if !isDBRunning {
+ te.l.Warn().Str("Node", node.ContainerName).Msg("Postgres DB is not running, when test ended")
+ }
+ }
+}
+
// collectTestLogs collects the logs from all the Chainlink nodes in the test environment and writes them to local files
func (te *CLClusterTestEnv) collectTestLogs() error {
te.l.Info().Msg("Collecting test logs")
@@ -233,7 +266,7 @@ func (te *CLClusterTestEnv) collectTestLogs() error {
return err
}
defer logFile.Close()
- logReader, err := node.Container.Logs(context.Background())
+ logReader, err := node.Container.Logs(utils.TestContext(te.t))
if err != nil {
return err
}
diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go
index d1550240500..77c56690155 100644
--- a/integration-tests/docker/test_env/test_env_builder.go
+++ b/integration-tests/docker/test_env/test_env_builder.go
@@ -4,9 +4,9 @@ import (
"fmt"
"math/big"
"os"
+ "runtime/debug"
"testing"
- "github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
@@ -19,6 +19,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+ evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
)
type CleanUpType string
@@ -30,22 +31,25 @@ const (
)
type CLTestEnvBuilder struct {
- hasLogWatch bool
- hasGeth bool
- hasKillgrave bool
- hasForwarders bool
- clNodeConfig *chainlink.Config
- secretsConfig string
- nonDevGethNetworks []blockchain.EVMNetwork
- clNodesCount int
- customNodeCsaKeys []string
- defaultNodeCsaKeys []string
- l zerolog.Logger
- t *testing.T
- te *CLClusterTestEnv
- isNonEVM bool
- cleanUpType CleanUpType
- cleanUpCustomFn func()
+ hasLogWatch bool
+ // hasGeth bool
+ hasKillgrave bool
+ hasForwarders bool
+ clNodeConfig *chainlink.Config
+ secretsConfig string
+ nonDevGethNetworks []blockchain.EVMNetwork
+ clNodesCount int
+ customNodeCsaKeys []string
+ defaultNodeCsaKeys []string
+ l zerolog.Logger
+ t *testing.T
+ te *CLClusterTestEnv
+ isNonEVM bool
+ cleanUpType CleanUpType
+ cleanUpCustomFn func()
+ chainOptionsFn []ChainOption
+ evmClientNetworkOption []EVMClientNetworkOption
+ ethereumNetwork *test_env.EthereumNetwork
/* funding */
ETHFunds *big.Float
@@ -115,8 +119,27 @@ func (b *CLTestEnvBuilder) WithFunding(eth *big.Float) *CLTestEnvBuilder {
return b
}
+// deprecated
+// left only for backward compatibility
func (b *CLTestEnvBuilder) WithGeth() *CLTestEnvBuilder {
- b.hasGeth = true
+ ethBuilder := test_env.NewEthereumNetworkBuilder()
+ cfg, err := ethBuilder.
+ WithConsensusType(test_env.ConsensusType_PoW).
+ WithExecutionLayer(test_env.ExecutionLayer_Geth).
+ WithTest(b.t).
+ Build()
+
+ if err != nil {
+ panic(err)
+ }
+
+ b.ethereumNetwork = &cfg
+
+ return b
+}
+
+func (b *CLTestEnvBuilder) WithPrivateEthereumNetwork(en test_env.EthereumNetwork) *CLTestEnvBuilder {
+ b.ethereumNetwork = &en
return b
}
@@ -162,6 +185,24 @@ func (b *CLTestEnvBuilder) WithCustomCleanup(customFn func()) *CLTestEnvBuilder
return b
}
+type ChainOption = func(*evmcfg.Chain) *evmcfg.Chain
+
+func (b *CLTestEnvBuilder) WithChainOptions(opts ...ChainOption) *CLTestEnvBuilder {
+ b.chainOptionsFn = make([]ChainOption, 0)
+ b.chainOptionsFn = append(b.chainOptionsFn, opts...)
+
+ return b
+}
+
+type EVMClientNetworkOption = func(*blockchain.EVMNetwork) *blockchain.EVMNetwork
+
+func (b *CLTestEnvBuilder) EVMClientNetworkOptions(opts ...EVMClientNetworkOption) *CLTestEnvBuilder {
+ b.evmClientNetworkOption = make([]EVMClientNetworkOption, 0)
+ b.evmClientNetworkOption = append(b.evmClientNetworkOption, opts...)
+
+ return b
+}
+
func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
if b.te == nil {
var err error
@@ -170,13 +211,6 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
return nil, err
}
}
- b.l.Info().
- Bool("hasGeth", b.hasGeth).
- Bool("hasKillgrave", b.hasKillgrave).
- Int("clNodesCount", b.clNodesCount).
- Strs("customNodeCsaKeys", b.customNodeCsaKeys).
- Strs("defaultNodeCsaKeys", b.defaultNodeCsaKeys).
- Msg("Building CL cluster test environment..")
var err error
if b.t != nil {
@@ -209,7 +243,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
case CleanUpTypeNone:
b.l.Warn().Msg("test environment won't be cleaned up")
case "":
- return b.te, errors.WithMessage(errors.New("explicit cleanup type must be set when building test environment"), "test environment builder failed")
+ return b.te, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("explicit cleanup type must be set when building test environment"))
}
if b.nonDevGethNetworks != nil {
@@ -222,14 +256,14 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
for i, n := range b.te.PrivateChain {
primaryNode := n.GetPrimaryNode()
if primaryNode == nil {
- return b.te, errors.WithStack(fmt.Errorf("primary node is nil in PrivateChain interface"))
+ return b.te, fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack()))
}
nonDevNetworks = append(nonDevNetworks, *n.GetNetworkConfig())
nonDevNetworks[i].URLs = []string{primaryNode.GetInternalWsUrl()}
nonDevNetworks[i].HTTPURLs = []string{primaryNode.GetInternalHttpUrl()}
}
if nonDevNetworks == nil {
- return nil, errors.New("cannot create nodes with custom config without nonDevNetworks")
+ return nil, fmt.Errorf("cannot create nodes with custom config without nonDevNetworks")
}
err = b.te.StartClCluster(b.clNodeConfig, b.clNodesCount, b.secretsConfig)
@@ -238,17 +272,29 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
}
return b.te, nil
}
+
networkConfig := networks.MustGetSelectedNetworksFromEnv()[0]
- var internalDockerUrls test_env.InternalDockerUrls
- if b.hasGeth && networkConfig.Simulated {
- networkConfig, internalDockerUrls, err = b.te.StartGeth()
+ var rpcProvider test_env.RpcProvider
+ if b.ethereumNetwork != nil && networkConfig.Simulated {
+ // TODO here we should save the ethereum network config to te.Cfg, but it doesn't exist at this point
+ // in general it seems we have no methods for saving config to file and we only load it from file
+ // but I don't know how that config file is to be created or whether anyone ever done that
+ var enCfg test_env.EthereumNetwork
+ b.ethereumNetwork.DockerNetworkNames = []string{b.te.Network.Name}
+ networkConfig, rpcProvider, err = b.te.StartEthereumNetwork(b.ethereumNetwork)
if err != nil {
return nil, err
}
-
+ b.te.RpcProvider = rpcProvider
+ b.te.PrivateEthereumConfig = &enCfg
}
if !b.isNonEVM {
+ if b.evmClientNetworkOption != nil && len(b.evmClientNetworkOption) > 0 {
+ for _, fn := range b.evmClientNetworkOption {
+ fn(&networkConfig)
+ }
+ }
bc, err := blockchain.NewEVMClientFromNetwork(networkConfig, b.l)
if err != nil {
return nil, err
@@ -286,14 +332,22 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
var httpUrls []string
var wsUrls []string
if networkConfig.Simulated {
- httpUrls = []string{internalDockerUrls.HttpUrl}
- wsUrls = []string{internalDockerUrls.WsUrl}
+ httpUrls = rpcProvider.PrivateHttpUrls()
+ wsUrls = rpcProvider.PrivateWsUrsl()
} else {
httpUrls = networkConfig.HTTPURLs
wsUrls = networkConfig.URLs
}
node.SetChainConfig(cfg, wsUrls, httpUrls, networkConfig, b.hasForwarders)
+
+ if b.chainOptionsFn != nil && len(b.chainOptionsFn) > 0 {
+ for _, fn := range b.chainOptionsFn {
+ for _, evmCfg := range cfg.EVM {
+ fn(&evmCfg.Chain)
+ }
+ }
+ }
}
err := b.te.StartClCluster(cfg, b.clNodesCount, b.secretsConfig)
@@ -308,7 +362,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
b.defaultNodeCsaKeys = nodeCsaKeys
}
- if b.hasGeth && b.clNodesCount > 0 && b.ETHFunds != nil {
+ if b.ethereumNetwork != nil && b.clNodesCount > 0 && b.ETHFunds != nil {
b.te.ParallelTransactions(true)
defer b.te.ParallelTransactions(false)
if err := b.te.FundChainlinkNodes(b.ETHFunds); err != nil {
@@ -316,5 +370,20 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) {
}
}
+ var enDesc string
+ if b.te.PrivateEthereumConfig != nil {
+ enDesc = b.te.PrivateEthereumConfig.Describe()
+ } else {
+ enDesc = "none"
+ }
+
+ b.l.Info().
+ Str("privateEthereumNetwork", enDesc).
+ Bool("hasKillgrave", b.hasKillgrave).
+ Int("clNodesCount", b.clNodesCount).
+ Strs("customNodeCsaKeys", b.customNodeCsaKeys).
+ Strs("defaultNodeCsaKeys", b.defaultNodeCsaKeys).
+ Msg("Building CL cluster test environment..")
+
return b.te, nil
}
diff --git a/integration-tests/docker/test_env/test_env_config.go b/integration-tests/docker/test_env/test_env_config.go
index 1a0c8d5c86a..0902deb0c2d 100644
--- a/integration-tests/docker/test_env/test_env_config.go
+++ b/integration-tests/docker/test_env/test_env_config.go
@@ -3,14 +3,16 @@ package test_env
import (
"encoding/json"
+ cte "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
env "github.com/smartcontractkit/chainlink/integration-tests/types/envcommon"
)
type TestEnvConfig struct {
- Networks []string `json:"networks"`
- Geth GethConfig `json:"geth"`
- MockAdapter MockAdapterConfig `json:"mock_adapter"`
- ClCluster *ClCluster `json:"clCluster"`
+ Networks []string `json:"networks"`
+ Geth GethConfig `json:"geth"`
+ MockAdapter MockAdapterConfig `json:"mock_adapter"`
+ ClCluster *ClCluster `json:"clCluster"`
+ EthereumNetwork *cte.EthereumNetwork `json:"private_ethereum_config"`
}
type MockAdapterConfig struct {
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 3affd799194..a943e1c41a9 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -6,24 +6,26 @@ go 1.21
replace github.com/smartcontractkit/chainlink/v2 => ../
require (
+ cosmossdk.io/errors v1.0.0
github.com/K-Phoen/grabana v0.21.17
github.com/cli/go-gh/v2 v2.0.0
github.com/ethereum/go-ethereum v1.12.0
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.3.1
+ github.com/jmoiron/sqlx v1.3.5
github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.10.9
github.com/manifoldco/promptui v0.9.0
github.com/onsi/gomega v1.27.8
github.com/pelletier/go-toml/v2 v2.1.0
- github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.30.0
+ github.com/scylladb/go-reflectx v1.0.1
github.com/segmentio/ksuid v1.0.4
github.com/slack-go/slack v0.12.2
- github.com/smartcontractkit/chainlink-testing-framework v1.18.1
+ github.com/smartcontractkit/chainlink-testing-framework v1.18.6
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
- github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545
- github.com/smartcontractkit/ocr2keepers v0.7.27
+ github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7
+ github.com/smartcontractkit/ocr2keepers v0.7.28
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1
github.com/smartcontractkit/wasp v0.3.0
@@ -49,13 +51,13 @@ require (
cosmossdk.io/api v0.3.1 // indirect
cosmossdk.io/core v0.5.1 // indirect
cosmossdk.io/depinject v1.0.0-alpha.3 // indirect
- cosmossdk.io/errors v1.0.0 // indirect
cosmossdk.io/math v1.0.1 // indirect
dario.cat/mergo v1.0.0 // indirect
filippo.io/edwards25519 v1.0.0 // indirect
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
github.com/99designs/keyring v1.2.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect
github.com/CosmWasm/wasmd v0.40.1 // indirect
github.com/CosmWasm/wasmvm v1.2.4 // indirect
@@ -140,7 +142,7 @@ require (
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
- github.com/fxamacker/cbor/v2 v2.4.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.5.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gagliardetto/binary v0.7.1 // indirect
github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 // indirect
@@ -150,9 +152,11 @@ require (
github.com/gin-contrib/sessions v0.0.5 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
+ github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
+ github.com/go-ldap/ldap/v3 v3.4.5 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -170,15 +174,15 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
- github.com/go-webauthn/revoke v0.1.9 // indirect
- github.com/go-webauthn/webauthn v0.8.2 // indirect
+ github.com/go-webauthn/webauthn v0.8.6 // indirect
+ github.com/go-webauthn/x v0.1.4 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/protobuf v1.3.3 // indirect
github.com/gogo/status v1.1.1 // indirect
- github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
@@ -187,7 +191,7 @@ require (
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
- github.com/google/go-tpm v0.3.3 // indirect
+ github.com/google/go-tpm v0.9.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect
@@ -255,7 +259,6 @@ require (
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect
- github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -312,7 +315,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/miekg/dns v1.1.55 // indirect
github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
@@ -359,23 +362,23 @@ require (
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/alertmanager v0.25.1 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/exporter-toolkit v0.10.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/prometheus/prometheus v0.46.0 // indirect
+ github.com/prometheus/prometheus v0.47.2 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/sasha-s/go-deadlock v0.3.1 // indirect
- github.com/scylladb/go-reflectx v1.0.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/sercand/kuberesolver v2.4.0+incompatible // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
@@ -383,11 +386,10 @@ require (
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect
- github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0 // indirect
- github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111 // indirect
+ github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255 // indirect
+ github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a // indirect
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 // indirect
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb // indirect
- github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb // indirect
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect
github.com/smartcontractkit/wsrpc v0.7.2 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
@@ -406,7 +408,7 @@ require (
github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect
github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a // indirect
github.com/tidwall/btree v1.6.0 // indirect
- github.com/tidwall/gjson v1.16.0 // indirect
+ github.com/tidwall/gjson v1.17.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
@@ -424,7 +426,6 @@ require (
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
- github.com/yuin/goldmark v1.4.13 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
github.com/zondax/hid v0.9.1 // indirect
github.com/zondax/ledger-go v0.14.1 // indirect
@@ -452,17 +453,16 @@ require (
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
- golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.17.0 // indirect
- golang.org/x/oauth2 v0.10.0 // indirect
+ golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.14.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
- gonum.org/v1/gonum v0.13.0 // indirect
+ gonum.org/v1/gonum v0.14.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230717213848-3f92550aa753 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230717213848-3f92550aa753 // indirect
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 98685fdf889..5719c36b5a8 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -575,6 +575,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
+github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -641,6 +643,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA=
+github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@@ -990,8 +994,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
-github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
-github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
+github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE=
+github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0=
@@ -1027,6 +1031,8 @@ github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
+github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A=
+github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -1047,6 +1053,8 @@ github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
+github.com/go-ldap/ldap/v3 v3.4.5 h1:ekEKmaDrpvR2yf5Nc/DClsGG9lAmdDixe44mLzlW5r8=
+github.com/go-ldap/ldap/v3 v3.4.5/go.mod h1:bMGIq3AGbytbaMwf8wdv5Phdxz0FWHTIYMSzyrYgnQs=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
@@ -1126,10 +1134,10 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
-github.com/go-webauthn/revoke v0.1.9 h1:gSJ1ckA9VaKA2GN4Ukp+kiGTk1/EXtaDb1YE8RknbS0=
-github.com/go-webauthn/revoke v0.1.9/go.mod h1:j6WKPnv0HovtEs++paan9g3ar46gm1NarktkXBaPR+w=
-github.com/go-webauthn/webauthn v0.8.2 h1:8KLIbpldjz9KVGHfqEgJNbkhd7bbRXhNw4QWFJE15oA=
-github.com/go-webauthn/webauthn v0.8.2/go.mod h1:d+ezx/jMCNDiqSMzOchuynKb9CVU1NM9BumOnokfcVQ=
+github.com/go-webauthn/webauthn v0.8.6 h1:bKMtL1qzd2WTFkf1mFTVbreYrwn7dsYmEPjTq6QN90E=
+github.com/go-webauthn/webauthn v0.8.6/go.mod h1:emwVLMCI5yx9evTTvr0r+aOZCdWJqMfbRhF0MufyUog=
+github.com/go-webauthn/x v0.1.4 h1:sGmIFhcY70l6k7JIDfnjVBiAAFEssga5lXIUXe0GtAs=
+github.com/go-webauthn/x v0.1.4/go.mod h1:75Ug0oK6KYpANh5hDOanfDI+dvPWHk788naJVG/37H8=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@@ -1181,9 +1189,12 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg=
github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
+github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@@ -1258,12 +1269,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
-github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI=
-github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw=
-github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo=
-github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4=
-github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0=
-github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4=
+github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk=
+github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -1343,7 +1350,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -1979,8 +1985,9 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJK
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
+github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
@@ -2254,8 +2261,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
-github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
+github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
@@ -2360,28 +2367,26 @@ github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ
github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo=
github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0 h1:YrJ3moRDu2kgdv4o3Hym/FWVF4MS5cIZ7o7wk+43pvk=
-github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231030134738-81a5a89699a0/go.mod h1:fxtwgVZzTgoU1CpdSxNvFXecIY2r8DhH2JCzPO4e9G0=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111 h1:CElKhWq0WIa9Rmg5Ssajs5Hp3m3u/nYIQdXtpj2gbcc=
-github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231031114820-e9826d481111/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255 h1:Pt6c7bJU9wIN6PQQnmN8UmYYH6lpfiQ6U/B8yEC2s5s=
+github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231109141932-cb1ea9020255/go.mod h1:EHppaccd/LTlTMI2o4dmBHe4BknEgEFFDjDGMNuGb3k=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a h1:G/pD8uI1PULRJU8Y3eLLzjqQBp9ruG9hj+wWxtyrgTo=
+github.com/smartcontractkit/chainlink-relay v0.1.7-0.20231113174149-046d4ddaca1a/go.mod h1:M9U1JV7IQi8Sfj4JR1qSi1tIh6omgW78W/8SHN/8BUQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05 h1:DaPSVnxe7oz1QJ+AVIhQWs1W3ubQvwvGo9NbHpMs1OQ=
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb/go.mod h1:/30flFG4L/iCYAFeA3DUzR0xuHSxAMONiWTzyzvsNwo=
-github.com/smartcontractkit/chainlink-testing-framework v1.18.1 h1:YznR7isiPYbywuUma5eVSyuZYwbUHIGJ2lpcJazOZgo=
-github.com/smartcontractkit/chainlink-testing-framework v1.18.1/go.mod h1:lMdEUTdSmzldCwqf+todFEyebE9Vlb23+5rvIHJBPOk=
+github.com/smartcontractkit/chainlink-testing-framework v1.18.6 h1:UL3DxsPflSRALP62rsg5v3NdOsa8RHGhHMUImoWDD6k=
+github.com/smartcontractkit/chainlink-testing-framework v1.18.6/go.mod h1:zScXRqmvbyTFUooyLYrOp4+V/sFPUbFJNRc72YmnuIk=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545 h1:qOsw2ETQD/Sb/W2xuYn2KPWjvvsWA0C+l19rWFq8iNg=
-github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
-github.com/smartcontractkit/ocr2keepers v0.7.27 h1:kwqMrzmEdq6gH4yqNuLQCbdlED0KaIjwZzu3FF+Gves=
-github.com/smartcontractkit/ocr2keepers v0.7.27/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7 h1:21V61XOYSxpFmFqlhr5IaEh1uQ1F6CewJ30D/U/P34c=
+github.com/smartcontractkit/libocr v0.0.0-20231107151413-13e0202ae8d7/go.mod h1:2lyRkw/qLQgUWlrWWmq5nj0y90rWeO6Y+v+fCakRgb0=
+github.com/smartcontractkit/ocr2keepers v0.7.28 h1:dufAiYl4+uly9aH0+6GkS2jYzHGujq7tg0LYQE+x6JU=
+github.com/smartcontractkit/ocr2keepers v0.7.28/go.mod h1:1QGzJURnoWpysguPowOe2bshV0hNp1YX10HHlhDEsas=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687 h1:NwC3SOc25noBTe1KUQjt45fyTIuInhoE2UfgcHAdihM=
github.com/smartcontractkit/ocr2vrf v0.0.0-20230804151440-2f1eb1e20687/go.mod h1:YYZq52t4wcHoMQeITksYsorD+tZcOyuVU5+lvot3VFM=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb h1:OMaBUb4X9IFPLbGbCHsMU+kw/BPCrewaVwWGIBc0I4A=
-github.com/smartcontractkit/sqlx v1.3.5-0.20210805004948-4be295aacbeb/go.mod h1:HNUu4cJekUdsJbwRBCiOybtkPJEfGRELQPe2tkoDEyk=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE=
github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg=
github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ=
@@ -2411,7 +2416,6 @@ github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
@@ -2422,7 +2426,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
@@ -2474,8 +2477,8 @@ github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e/go.mod h1:
github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
-github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg=
-github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
+github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -2570,7 +2573,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
@@ -2713,6 +2715,7 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -2756,7 +2759,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -2794,7 +2796,6 @@ golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -2893,8 +2894,8 @@ golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
-golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
-golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
+golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
+golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -3000,7 +3001,6 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -3185,8 +3185,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
-gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
-gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
+gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
+gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
@@ -3400,7 +3400,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230717213848-3f92550aa753/go.
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
diff --git a/integration-tests/load/functions/config.go b/integration-tests/load/functions/config.go
index 5c622401aba..ad7e7446afb 100644
--- a/integration-tests/load/functions/config.go
+++ b/integration-tests/load/functions/config.go
@@ -1,12 +1,14 @@
package loadfunctions
import (
+ "fmt"
+ "math/big"
+ "os"
+
"github.com/pelletier/go-toml/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
+
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "math/big"
- "os"
)
const (
@@ -103,22 +105,21 @@ func ReadConfig() (*PerformanceConfig, error) {
var cfg *PerformanceConfig
d, err := os.ReadFile(DefaultConfigFilename)
if err != nil {
- return nil, errors.Wrap(err, ErrReadPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err)
}
err = toml.Unmarshal(d, &cfg)
if err != nil {
- return nil, errors.Wrap(err, ErrUnmarshalPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err)
}
log.Debug().Interface("PerformanceConfig", cfg).Msg("Parsed performance config")
mpk := os.Getenv("MUMBAI_KEYS")
murls := os.Getenv("MUMBAI_URLS")
snet := os.Getenv("SELECTED_NETWORKS")
if mpk == "" || murls == "" || snet == "" {
- return nil, errors.New(
+ return nil, fmt.Errorf(
"ensure variables are set:\nMUMBAI_KEYS variable, private keys, comma separated\nSELECTED_NETWORKS=MUMBAI\nMUMBAI_URLS variable, websocket urls, comma separated",
)
- } else {
- cfg.MumbaiPrivateKey = mpk
}
+ cfg.MumbaiPrivateKey = mpk
return cfg, nil
}
diff --git a/integration-tests/load/functions/functions_test.go b/integration-tests/load/functions/functions_test.go
index 7822035208e..dc52846d3c9 100644
--- a/integration-tests/load/functions/functions_test.go
+++ b/integration-tests/load/functions/functions_test.go
@@ -1,10 +1,11 @@
package loadfunctions
import (
- "github.com/smartcontractkit/wasp"
- "github.com/stretchr/testify/require"
"testing"
"time"
+
+ "github.com/smartcontractkit/wasp"
+ "github.com/stretchr/testify/require"
)
func TestFunctionsLoad(t *testing.T) {
diff --git a/integration-tests/load/functions/gateway.go b/integration-tests/load/functions/gateway.go
index aefe4fbedc2..ac5f895ac18 100644
--- a/integration-tests/load/functions/gateway.go
+++ b/integration-tests/load/functions/gateway.go
@@ -8,16 +8,17 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
+ "time"
+
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/go-resty/resty/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
+
"github.com/smartcontractkit/chainlink/v2/core/services/gateway/api"
"github.com/smartcontractkit/chainlink/v2/core/services/gateway/handlers/functions"
"github.com/smartcontractkit/chainlink/v2/core/services/s4"
- "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
- "time"
)
type RPCResponse struct {
@@ -115,7 +116,7 @@ func UploadS4Secrets(rc *resty.Client, s4Cfg *S4SecretsCfg) (uint8, uint64, erro
log.Debug().Interface("Result", result).Msg("S4 secrets_set response result")
for _, nodeResponse := range result.Result.Body.Payload.NodeResponses {
if !nodeResponse.Body.Payload.Success {
- return 0, 0, fmt.Errorf("node response was not succesful")
+ return 0, 0, fmt.Errorf("node response was not successful")
}
}
return uint8(envelope.SlotID), envelope.Version, nil
@@ -182,12 +183,12 @@ func EncryptS4Secrets(deployerPk *ecdsa.PrivateKey, tdh2Pk *tdh2easy.PublicKey,
donKey = bytes.Join([][]byte{b, donKey}, nil)
donPubKey, err := crypto.UnmarshalPubkey(donKey)
if err != nil {
- return "", errors.Wrap(err, "failed to unmarshal DON key")
+ return "", fmt.Errorf("failed to unmarshal DON key: %w", err)
}
eciesDONPubKey := ecies.ImportECDSAPublic(donPubKey)
signature, err := deployerPk.Sign(rand.Reader, []byte(msgJSON), nil)
if err != nil {
- return "", errors.Wrap(err, "failed to sign the msg with Ethereum key")
+ return "", fmt.Errorf("failed to sign the msg with Ethereum key: %w", err)
}
signedSecrets, err := json.Marshal(struct {
Signature []byte `json:"signature"`
@@ -197,29 +198,29 @@ func EncryptS4Secrets(deployerPk *ecdsa.PrivateKey, tdh2Pk *tdh2easy.PublicKey,
Message: msgJSON,
})
if err != nil {
- return "", errors.Wrap(err, "failed to marshal signed secrets")
+ return "", fmt.Errorf("failed to marshal signed secrets: %w", err)
}
ct, err := ecies.Encrypt(rand.Reader, eciesDONPubKey, signedSecrets, nil, nil)
if err != nil {
- return "", errors.Wrap(err, "failed to encrypt with DON key")
+ return "", fmt.Errorf("failed to encrypt with DON key: %w", err)
}
ct0xFormat, err := json.Marshal(map[string]interface{}{"0x0": base64.StdEncoding.EncodeToString(ct)})
if err != nil {
- return "", errors.Wrap(err, "failed to marshal DON key encrypted format")
+ return "", fmt.Errorf("failed to marshal DON key encrypted format: %w", err)
}
ctTDH2Format, err := tdh2easy.Encrypt(tdh2Pk, ct0xFormat)
if err != nil {
- return "", errors.Wrap(err, "failed to encrypt with TDH2 public key")
+ return "", fmt.Errorf("failed to encrypt with TDH2 public key: %w", err)
}
tdh2Message, err := ctTDH2Format.Marshal()
if err != nil {
- return "", errors.Wrap(err, "failed to marshal TDH2 encrypted msg")
+ return "", fmt.Errorf("failed to marshal TDH2 encrypted msg: %w", err)
}
finalMsg, err := json.Marshal(map[string]interface{}{
"encryptedSecrets": "0x" + hex.EncodeToString(tdh2Message),
})
if err != nil {
- return "", errors.Wrap(err, "failed to marshal secrets msg")
+ return "", fmt.Errorf("failed to marshal secrets msg: %w", err)
}
return string(finalMsg), nil
}
diff --git a/integration-tests/load/functions/gateway_gun.go b/integration-tests/load/functions/gateway_gun.go
index fd13922d0a7..3dafb458a50 100644
--- a/integration-tests/load/functions/gateway_gun.go
+++ b/integration-tests/load/functions/gateway_gun.go
@@ -3,14 +3,15 @@ package loadfunctions
import (
"crypto/ecdsa"
"fmt"
- "github.com/go-resty/resty/v2"
- "github.com/rs/zerolog/log"
- "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
- "github.com/smartcontractkit/wasp"
"math/rand"
"os"
"strconv"
"time"
+
+ "github.com/go-resty/resty/v2"
+ "github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
+ "github.com/smartcontractkit/wasp"
)
/* SingleFunctionCallGun is a gun that constantly requests randomness for one feed */
diff --git a/integration-tests/load/functions/onchain_monitoring.go b/integration-tests/load/functions/onchain_monitoring.go
index 0a8b4cef46a..c4b4bdb78c0 100644
--- a/integration-tests/load/functions/onchain_monitoring.go
+++ b/integration-tests/load/functions/onchain_monitoring.go
@@ -1,10 +1,11 @@
package loadfunctions
import (
- "github.com/rs/zerolog/log"
- "github.com/smartcontractkit/wasp"
"testing"
"time"
+
+ "github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/wasp"
)
/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */
diff --git a/integration-tests/load/functions/request_gun.go b/integration-tests/load/functions/request_gun.go
index d9987eaa756..bd4cf5f35aa 100644
--- a/integration-tests/load/functions/request_gun.go
+++ b/integration-tests/load/functions/request_gun.go
@@ -13,16 +13,15 @@ const (
)
type SingleFunctionCallGun struct {
- ft *FunctionsTest
- mode TestMode
- times uint32
- source string
- slotID uint8
- slotVersion uint64
- encryptedSecrets []byte
- args []string
- subscriptionId uint64
- jobId [32]byte
+ ft *FunctionsTest
+ mode TestMode
+ times uint32
+ source string
+ slotID uint8
+ slotVersion uint64
+ args []string
+ subscriptionId uint64
+ jobId [32]byte
}
func NewSingleFunctionCallGun(
diff --git a/integration-tests/load/functions/setup.go b/integration-tests/load/functions/setup.go
index 5253c531eee..c0be47ca836 100644
--- a/integration-tests/load/functions/setup.go
+++ b/integration-tests/load/functions/setup.go
@@ -2,6 +2,7 @@ package loadfunctions
import (
"crypto/ecdsa"
+ "fmt"
"math/big"
mrand "math/rand"
"os"
@@ -10,11 +11,11 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/go-resty/resty/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink-testing-framework/networks"
- "github.com/smartcontractkit/tdh2/go/tdh2/tdh2easy"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
chainlinkutils "github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -91,41 +92,41 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) {
log.Info().Msg("Creating new subscription")
subID, err := router.CreateSubscriptionWithConsumer(loadTestClient.Address())
if err != nil {
- return nil, errors.Wrap(err, "failed to create a new subscription")
+ return nil, fmt.Errorf("failed to create a new subscription: %w", err)
}
encodedSubId, err := chainlinkutils.ABIEncode(`[{"type":"uint64"}]`, subID)
if err != nil {
- return nil, errors.Wrap(err, "failed to encode subscription ID for funding")
+ return nil, fmt.Errorf("failed to encode subscription ID for funding: %w", err)
}
_, err = lt.TransferAndCall(router.Address(), big.NewInt(0).Mul(cfg.Common.Funding.SubFunds, big.NewInt(1e18)), encodedSubId)
if err != nil {
- return nil, errors.Wrap(err, "failed to transferAndCall router, LINK funding")
+ return nil, fmt.Errorf("failed to transferAndCall router, LINK funding: %w", err)
}
cfg.Common.SubscriptionID = subID
}
pKey, pubKey, err := parseEthereumPrivateKey(os.Getenv("MUMBAI_KEYS"))
if err != nil {
- return nil, errors.Wrap(err, "failed to load Ethereum private key")
+ return nil, fmt.Errorf("failed to load Ethereum private key: %w", err)
}
tpk, err := coord.GetThresholdPublicKey()
if err != nil {
- return nil, errors.Wrap(err, "failed to get Threshold public key")
+ return nil, fmt.Errorf("failed to get Threshold public key: %w", err)
}
log.Info().Hex("ThresholdPublicKeyBytesHex", tpk).Msg("Loaded coordinator keys")
donPubKey, err := coord.GetDONPublicKey()
if err != nil {
- return nil, errors.Wrap(err, "failed to get DON public key")
+ return nil, fmt.Errorf("failed to get DON public key: %w", err)
}
log.Info().Hex("DONPublicKeyHex", donPubKey).Msg("Loaded DON key")
tdh2pk, err := ParseTDH2Key(tpk)
if err != nil {
- return nil, errors.Wrap(err, "failed to unmarshal tdh2 public key")
+ return nil, fmt.Errorf("failed to unmarshal tdh2 public key: %w", err)
}
var encryptedSecrets string
if cfg.Common.Secrets != "" {
encryptedSecrets, err = EncryptS4Secrets(pKey, tdh2pk, donPubKey, cfg.Common.Secrets)
if err != nil {
- return nil, errors.Wrap(err, "failed to generate tdh2 secrets")
+ return nil, fmt.Errorf("failed to generate tdh2 secrets: %w", err)
}
slotID, slotVersion, err := UploadS4Secrets(resty.New(), &S4SecretsCfg{
GatewayURL: cfg.Common.GatewayURL,
@@ -139,7 +140,7 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) {
S4SetPayload: encryptedSecrets,
})
if err != nil {
- return nil, errors.Wrap(err, "failed to upload secrets to S4")
+ return nil, fmt.Errorf("failed to upload secrets to S4: %w", err)
}
cfg.Common.SecretsSlotID = slotID
cfg.Common.SecretsVersionID = slotVersion
@@ -168,13 +169,13 @@ func SetupLocalLoadTestEnv(cfg *PerformanceConfig) (*FunctionsTest, error) {
func parseEthereumPrivateKey(pk string) (*ecdsa.PrivateKey, *ecdsa.PublicKey, error) {
pKey, err := crypto.HexToECDSA(pk)
if err != nil {
- return nil, nil, errors.Wrap(err, "failed to convert Ethereum key from hex")
+ return nil, nil, fmt.Errorf("failed to convert Ethereum key from hex: %w", err)
}
publicKey := pKey.Public()
pubKey, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
- return nil, nil, errors.Wrap(err, "failed to get public key from Ethereum private key")
+ return nil, nil, fmt.Errorf("failed to get public key from Ethereum private key: %w", err)
}
log.Info().Str("Address", crypto.PubkeyToAddress(*pubKey).Hex()).Msg("Parsed private key for address")
return pKey, pubKey, nil
diff --git a/integration-tests/load/log_poller/config.toml b/integration-tests/load/log_poller/config.toml
new file mode 100644
index 00000000000..2e328001943
--- /dev/null
+++ b/integration-tests/load/log_poller/config.toml
@@ -0,0 +1,22 @@
+[general]
+generator = "looped"
+contracts = 10
+events_per_tx = 10
+
+[chaos]
+experiment_count = 10
+
+[looped]
+[looped.contract]
+execution_count = 300
+
+[looped.fuzz]
+min_emit_wait_time_ms = 100
+max_emit_wait_time_ms = 500
+
+[wasp]
+[wasp.load]
+call_timeout = "3m"
+rate_limit_unit_duration = "2s"
+LPS = 30
+duration = "1m"
\ No newline at end of file
diff --git a/integration-tests/load/log_poller/log_poller_test.go b/integration-tests/load/log_poller/log_poller_test.go
new file mode 100644
index 00000000000..04366848f0e
--- /dev/null
+++ b/integration-tests/load/log_poller/log_poller_test.go
@@ -0,0 +1,25 @@
+package logpoller
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+
+ "github.com/stretchr/testify/require"
+
+ lp_helpers "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller"
+)
+
+func TestLoadTestLogPoller(t *testing.T) {
+ cfg, err := lp_helpers.ReadConfig(lp_helpers.DefaultConfigFilename)
+ require.NoError(t, err)
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range lp_helpers.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ lp_helpers.ExecuteBasicLogPollerTest(t, cfg)
+}
diff --git a/integration-tests/load/vrfv2/cmd/dashboard.go b/integration-tests/load/vrfv2/cmd/dashboard.go
index 3035da0422f..0fb7be2b78b 100644
--- a/integration-tests/load/vrfv2/cmd/dashboard.go
+++ b/integration-tests/load/vrfv2/cmd/dashboard.go
@@ -1,6 +1,8 @@
package main
import (
+ "os"
+
"github.com/K-Phoen/grabana/dashboard"
"github.com/K-Phoen/grabana/logs"
"github.com/K-Phoen/grabana/row"
@@ -8,7 +10,6 @@ import (
"github.com/K-Phoen/grabana/timeseries"
"github.com/K-Phoen/grabana/timeseries/axis"
"github.com/smartcontractkit/wasp"
- "os"
)
func main() {
diff --git a/integration-tests/load/vrfv2/config.go b/integration-tests/load/vrfv2/config.go
index ee5f3ff80dd..0a595f753c2 100644
--- a/integration-tests/load/vrfv2/config.go
+++ b/integration-tests/load/vrfv2/config.go
@@ -1,12 +1,14 @@
package loadvrfv2
import (
+ "fmt"
+ "math/big"
+ "os"
+
"github.com/pelletier/go-toml/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
+
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "math/big"
- "os"
)
const (
@@ -63,11 +65,11 @@ func ReadConfig() (*PerformanceConfig, error) {
var cfg *PerformanceConfig
d, err := os.ReadFile(DefaultConfigFilename)
if err != nil {
- return nil, errors.Wrap(err, ErrReadPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err)
}
err = toml.Unmarshal(d, &cfg)
if err != nil {
- return nil, errors.Wrap(err, ErrUnmarshalPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err)
}
log.Debug().Interface("PerformanceConfig", cfg).Msg("Parsed performance config")
return cfg, nil
diff --git a/integration-tests/load/vrfv2/gun.go b/integration-tests/load/vrfv2/gun.go
index d6a8977738b..8100baaa7f7 100644
--- a/integration-tests/load/vrfv2/gun.go
+++ b/integration-tests/load/vrfv2/gun.go
@@ -1,9 +1,10 @@
package loadvrfv2
import (
+ "github.com/smartcontractkit/wasp"
+
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants"
- "github.com/smartcontractkit/wasp"
)
/* SingleHashGun is a gun that constantly requests randomness for one feed */
@@ -21,7 +22,7 @@ func SingleFeedGun(contracts *vrfv2_actions.VRFV2Contracts, keyHash [32]byte) *S
}
// Call implements example gun call, assertions on response bodies should be done here
-func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult {
+func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.CallResult {
err := m.contracts.LoadTestConsumer.RequestRandomness(
m.keyHash,
vrfConst.SubID,
diff --git a/integration-tests/load/vrfv2/onchain_monitoring.go b/integration-tests/load/vrfv2/onchain_monitoring.go
index b4503d27fad..879c7089e16 100644
--- a/integration-tests/load/vrfv2/onchain_monitoring.go
+++ b/integration-tests/load/vrfv2/onchain_monitoring.go
@@ -1,12 +1,14 @@
package loadvrfv2
import (
- "context"
- "github.com/rs/zerolog/log"
- "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
- "github.com/smartcontractkit/wasp"
"testing"
"time"
+
+ "github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/wasp"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */
@@ -34,7 +36,7 @@ func MonitorLoadStats(t *testing.T, vrfv2Contracts *vrfv2_actions.VRFV2Contracts
}
for {
time.Sleep(1 * time.Second)
- metrics, err := vrfv2Contracts.LoadTestConsumer.GetLoadTestMetrics(context.Background())
+ metrics, err := vrfv2Contracts.LoadTestConsumer.GetLoadTestMetrics(utils.TestContext(t))
if err != nil {
log.Error().Err(err).Msg(ErrMetrics)
}
diff --git a/integration-tests/load/vrfv2/vrfv2_test.go b/integration-tests/load/vrfv2/vrfv2_test.go
index a9fb80a72ad..44325965bd7 100644
--- a/integration-tests/load/vrfv2/vrfv2_test.go
+++ b/integration-tests/load/vrfv2/vrfv2_test.go
@@ -3,9 +3,10 @@ package loadvrfv2
import (
"testing"
- "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
"github.com/smartcontractkit/wasp"
"github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
)
func TestVRFV2Load(t *testing.T) {
diff --git a/integration-tests/load/vrfv2/vu.go b/integration-tests/load/vrfv2/vu.go
index df05a9168e2..7eb02ae330f 100644
--- a/integration-tests/load/vrfv2/vu.go
+++ b/integration-tests/load/vrfv2/vu.go
@@ -1,13 +1,15 @@
package loadvrfv2
import (
- "github.com/pkg/errors"
+ "fmt"
+ "time"
+
+ "github.com/smartcontractkit/wasp"
+
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions"
vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants"
"github.com/smartcontractkit/chainlink/integration-tests/client"
- "github.com/smartcontractkit/wasp"
- "time"
)
/* JobVolumeVU is a "virtual user" that creates a VRFv2 job and constantly requesting new randomness only for this job instance */
@@ -54,7 +56,7 @@ func (m *JobVolumeVU) Clone(_ *wasp.Generator) wasp.VirtualUser {
func (m *JobVolumeVU) Setup(_ *wasp.Generator) error {
jobs, err := vrfv2_actions.CreateVRFV2Jobs(m.nodes, m.contracts.Coordinator, m.bc, m.minIncomingConfirmations)
if err != nil {
- return errors.Wrap(err, "failed to create VRFv2 jobs in setup")
+ return fmt.Errorf("failed to create VRFv2 jobs in setup: %w", err)
}
m.jobs = jobs
m.keyHash = jobs[0].KeyHash
diff --git a/integration-tests/load/vrfv2plus/cmd/dashboard.go b/integration-tests/load/vrfv2plus/cmd/dashboard.go
index 9a0ba682a18..049ee9ff2e9 100644
--- a/integration-tests/load/vrfv2plus/cmd/dashboard.go
+++ b/integration-tests/load/vrfv2plus/cmd/dashboard.go
@@ -1,6 +1,8 @@
package main
import (
+ "os"
+
"github.com/K-Phoen/grabana/dashboard"
"github.com/K-Phoen/grabana/logs"
"github.com/K-Phoen/grabana/row"
@@ -8,7 +10,6 @@ import (
"github.com/K-Phoen/grabana/timeseries"
"github.com/K-Phoen/grabana/timeseries/axis"
"github.com/smartcontractkit/wasp"
- "os"
)
func main() {
diff --git a/integration-tests/load/vrfv2plus/config.go b/integration-tests/load/vrfv2plus/config.go
index 50003c82865..96dbf99c6b2 100644
--- a/integration-tests/load/vrfv2plus/config.go
+++ b/integration-tests/load/vrfv2plus/config.go
@@ -2,12 +2,14 @@ package loadvrfv2plus
import (
"encoding/base64"
+ "fmt"
+ "os"
+
"github.com/pelletier/go-toml/v2"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
+
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "os"
)
const (
@@ -57,8 +59,8 @@ type Funding struct {
}
type SubFunding struct {
- SubFundsLink int64 `toml:"sub_funds_link"`
- SubFundsNative int64 `toml:"sub_funds_native"`
+ SubFundsLink float64 `toml:"sub_funds_link"`
+ SubFundsNative float64 `toml:"sub_funds_native"`
}
type Soak struct {
@@ -95,18 +97,21 @@ func ReadConfig() (*PerformanceConfig, error) {
if rawConfig == "" {
d, err = os.ReadFile(DefaultConfigFilename)
if err != nil {
- return nil, errors.Wrap(err, ErrReadPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err)
}
} else {
d, err = base64.StdEncoding.DecodeString(rawConfig)
+ if err != nil {
+ return nil, fmt.Errorf("%s, err: %w", ErrReadPerfConfig, err)
+ }
}
err = toml.Unmarshal(d, &cfg)
if err != nil {
- return nil, errors.Wrap(err, ErrUnmarshalPerfConfig)
+ return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalPerfConfig, err)
}
if cfg.Soak.RandomnessRequestCountPerRequest <= cfg.Soak.RandomnessRequestCountPerRequestDeviation {
- return nil, errors.Wrap(err, ErrDeviationShouldBeLessThanOriginal)
+ return nil, fmt.Errorf("%s, err: %w", ErrDeviationShouldBeLessThanOriginal, err)
}
log.Debug().Interface("Config", cfg).Msg("Parsed config")
diff --git a/integration-tests/load/vrfv2plus/config.toml b/integration-tests/load/vrfv2plus/config.toml
index 05e22bd51e8..e3200fafe22 100644
--- a/integration-tests/load/vrfv2plus/config.toml
+++ b/integration-tests/load/vrfv2plus/config.toml
@@ -9,8 +9,8 @@ node_funds = 10
[ExistingEnvConfig]
coordinator_address = "0x27b61f155F772b291D1d9B478BeAd37B2Ae447b0"
-consumer_address = "0x087F232165D9bA1A602f148025e5D0666953F64a"
-sub_id = "52116875585187328970776211988181422347535732407068188096422095950800466618218"
+#consumer_address = "0x087F232165D9bA1A602f148025e5D0666953F64a"
+#sub_id = "52116875585187328970776211988181422347535732407068188096422095950800466618218"
key_hash = "0x787d74caea10b2b357790d5b5247c2f63d1d91572a9846f780606e4d953677ae"
create_fund_subs_and_add_consumers = true
link_address = "0x779877A7B0D9E8603169DdbD7836e478b4624789"
@@ -31,7 +31,7 @@ rate_limit_unit_duration = "3s"
rps = 1
randomness_request_count_per_request = 3 # amount of randomness requests to make per one TX request
randomness_request_count_per_request_deviation = 2 #NOTE - deviation should be less than randomness_request_count_per_request setting
-number_of_sub_to_create = 5
+number_of_sub_to_create = 1
# approx 540 RPM - 3 tx requests per second with 4 rand requests in each tx
[Stress]
@@ -39,7 +39,7 @@ rate_limit_unit_duration = "1s"
rps = 3
randomness_request_count_per_request = 4 # amount of randomness requests to make per one TX request
randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting
-number_of_sub_to_create = 5
+number_of_sub_to_create = 1
# approx 150 RPM - 1 tx request with 150 rand requests in each tx every 60 seconds
[Spike]
diff --git a/integration-tests/load/vrfv2plus/gun.go b/integration-tests/load/vrfv2plus/gun.go
index c9947fa32f5..8ab278b73e9 100644
--- a/integration-tests/load/vrfv2plus/gun.go
+++ b/integration-tests/load/vrfv2plus/gun.go
@@ -1,12 +1,14 @@
package loadvrfv2plus
import (
+ "math/big"
+ "math/rand"
+
"github.com/rs/zerolog"
+ "github.com/smartcontractkit/wasp"
+
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
- "github.com/smartcontractkit/wasp"
- "math/big"
- "math/rand"
)
/* SingleHashGun is a gun that constantly requests randomness for one feed */
@@ -15,7 +17,7 @@ type SingleHashGun struct {
contracts *vrfv2plus.VRFV2_5Contracts
keyHash [32]byte
subIDs []*big.Int
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig
logger zerolog.Logger
}
@@ -23,7 +25,7 @@ func NewSingleHashGun(
contracts *vrfv2plus.VRFV2_5Contracts,
keyHash [32]byte,
subIDs []*big.Int,
- vrfv2PlusConfig *vrfv2plus_config.VRFV2PlusConfig,
+ vrfv2PlusConfig vrfv2plus_config.VRFV2PlusConfig,
logger zerolog.Logger,
) *SingleHashGun {
return &SingleHashGun{
@@ -36,7 +38,7 @@ func NewSingleHashGun(
}
// Call implements example gun call, assertions on response bodies should be done here
-func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult {
+func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.CallResult {
//todo - should work with multiple consumers and consumers having different keyhashes and wallets
//randomly increase/decrease randomness request count per TX
@@ -52,6 +54,7 @@ func (m *SingleHashGun) Call(l *wasp.Generator) *wasp.CallResult {
randBool(),
randomnessRequestCountPerRequest,
m.vrfv2PlusConfig,
+ m.vrfv2PlusConfig.RandomWordsFulfilledEventTimeout,
m.logger,
)
if err != nil {
diff --git a/integration-tests/load/vrfv2plus/onchain_monitoring.go b/integration-tests/load/vrfv2plus/onchain_monitoring.go
index c56d835234e..c911546af0c 100644
--- a/integration-tests/load/vrfv2plus/onchain_monitoring.go
+++ b/integration-tests/load/vrfv2plus/onchain_monitoring.go
@@ -2,11 +2,13 @@ package loadvrfv2plus
import (
"context"
- "github.com/rs/zerolog/log"
- "github.com/smartcontractkit/chainlink/integration-tests/contracts"
- "github.com/smartcontractkit/wasp"
"testing"
"time"
+
+ "github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/wasp"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts"
)
/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */
diff --git a/integration-tests/load/vrfv2plus/vrfv2plus_test.go b/integration-tests/load/vrfv2plus/vrfv2plus_test.go
index e619cf78fd3..e7734fee0d5 100644
--- a/integration-tests/load/vrfv2plus/vrfv2plus_test.go
+++ b/integration-tests/load/vrfv2plus/vrfv2plus_test.go
@@ -1,20 +1,22 @@
package loadvrfv2plus
import (
- "context"
- "github.com/ethereum/go-ethereum/common"
- "github.com/kelseyhightower/envconfig"
- "github.com/rs/zerolog/log"
- "github.com/smartcontractkit/chainlink-testing-framework/logging"
- "github.com/smartcontractkit/chainlink/integration-tests/testreporters"
- "github.com/smartcontractkit/wasp"
- "github.com/stretchr/testify/require"
"math/big"
"os"
"sync"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/kelseyhightower/envconfig"
+ "github.com/rs/zerolog/log"
+ "github.com/smartcontractkit/wasp"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink/integration-tests/testreporters"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
+
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
@@ -27,6 +29,7 @@ var (
vrfv2PlusContracts *vrfv2plus.VRFV2_5Contracts
vrfv2PlusData *vrfv2plus.VRFV2PlusData
subIDs []*big.Int
+ eoaWalletAddress string
labels = map[string]string{
"branch": "vrfv2Plus_healthcheck",
@@ -85,6 +88,32 @@ func TestVRFV2PlusPerformance(t *testing.T) {
WithCustomCleanup(
func() {
teardown(t, vrfv2PlusContracts.LoadTestConsumers[0], lc, updatedLabels, testReporter, testType, vrfv2PlusConfig)
+ if env.EVMClient.NetworkSimulated() {
+ l.Info().
+ Str("Network Name", env.EVMClient.GetNetworkName()).
+ Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.")
+ } else {
+ //cancel subs and return funds to sub owner
+ for _, subID := range subIDs {
+ l.Info().
+ Str("Returning funds from SubID", subID.String()).
+ Str("Returning funds to", eoaWalletAddress).
+ Msg("Canceling subscription and returning funds to subscription owner")
+ pendingRequestsExist, err := vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subID)
+ if err != nil {
+ l.Error().Err(err).Msg("Error checking if pending requests exist")
+ }
+ if !pendingRequestsExist {
+ _, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress))
+ if err != nil {
+ l.Error().Err(err).Msg("Error canceling subscription")
+ }
+ } else {
+ l.Error().Str("Sub ID", subID.String()).Msg("Pending requests exist for subscription, cannot cancel subscription and return funds")
+ }
+
+ }
+ }
}).
Build()
@@ -99,7 +128,14 @@ func TestVRFV2PlusPerformance(t *testing.T) {
require.NoError(t, err)
consumers, err = vrfv2plus.DeployVRFV2PlusConsumers(env.ContractDeployer, coordinator, 1)
require.NoError(t, err)
- subIDs, err = vrfv2plus.CreateFundSubsAndAddConsumers(env, &vrfv2PlusConfig, linkToken, coordinator, consumers, vrfv2PlusConfig.NumberOfSubToCreate)
+ subIDs, err = vrfv2plus.CreateFundSubsAndAddConsumers(
+ env,
+ vrfv2PlusConfig,
+ linkToken,
+ coordinator,
+ consumers,
+ vrfv2PlusConfig.NumberOfSubToCreate,
+ )
require.NoError(t, err)
} else {
consumer, err := env.ContractLoader.LoadVRFv2PlusLoadTestConsumer(vrfv2PlusConfig.ConsumerAddress)
@@ -141,6 +177,25 @@ func TestVRFV2PlusPerformance(t *testing.T) {
WithCustomCleanup(
func() {
teardown(t, vrfv2PlusContracts.LoadTestConsumers[0], lc, updatedLabels, testReporter, testType, vrfv2PlusConfig)
+
+ if env.EVMClient.NetworkSimulated() {
+ l.Info().
+ Str("Network Name", env.EVMClient.GetNetworkName()).
+ Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.")
+ } else {
+ for _, subID := range subIDs {
+ l.Info().
+ Str("Returning funds from SubID", subID.String()).
+ Str("Returning funds to", eoaWalletAddress).
+ Msg("Canceling subscription and returning funds to subscription owner")
+ _, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress))
+ if err != nil {
+ l.Error().Err(err).Msg("Error canceling subscription")
+ }
+ }
+ //err = vrfv2plus.ReturnFundsForFulfilledRequests(env.EVMClient, vrfv2PlusContracts.Coordinator, l)
+ //l.Error().Err(err).Msg("Error returning funds for fulfilled requests")
+ }
if err := env.Cleanup(); err != nil {
l.Error().Err(err).Msg("Error cleaning up test environment")
}
@@ -160,19 +215,22 @@ func TestVRFV2PlusPerformance(t *testing.T) {
vrfv2PlusContracts, subIDs, vrfv2PlusData, err = vrfv2plus.SetupVRFV2_5Environment(
env,
- &vrfv2PlusConfig,
+ vrfv2PlusConfig,
linkToken,
mockETHLinkFeed,
+ //register proving key against EOA address in order to return funds to this address
+ env.EVMClient.GetDefaultWallet().Address(),
1,
vrfv2PlusConfig.NumberOfSubToCreate,
l,
)
require.NoError(t, err, "error setting up VRF v2_5 env")
}
+ eoaWalletAddress = env.EVMClient.GetDefaultWallet().Address()
l.Debug().Int("Number of Subs", len(subIDs)).Msg("Subs involved in the test")
for _, subID := range subIDs {
- subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information for subscription %s", subID.String())
vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator)
}
@@ -186,7 +244,7 @@ func TestVRFV2PlusPerformance(t *testing.T) {
vrfv2PlusContracts,
vrfv2PlusData.KeyHash,
subIDs,
- &vrfv2PlusConfig,
+ vrfv2PlusConfig,
l,
),
Labels: labels,
diff --git a/integration-tests/migration/upgrade_version_test.go b/integration-tests/migration/upgrade_version_test.go
index bf97f43d058..c851f36ec62 100644
--- a/integration-tests/migration/upgrade_version_test.go
+++ b/integration-tests/migration/upgrade_version_test.go
@@ -1,13 +1,12 @@
package migration
import (
+ "os"
"testing"
- "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/stretchr/testify/require"
- "os"
-
+ "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
)
diff --git a/integration-tests/performance/cron_test.go b/integration-tests/performance/cron_test.go
index e700a66e1f8..7e90d29221d 100644
--- a/integration-tests/performance/cron_test.go
+++ b/integration-tests/performance/cron_test.go
@@ -20,7 +20,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver"
mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/smartcontractkit/chainlink-testing-framework/networks"
@@ -44,7 +43,7 @@ func CleanupPerformanceTest(
if chainClient != nil {
chainClient.GasStats().PrintStats()
}
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, &testReporter, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, &testReporter, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
}
diff --git a/integration-tests/performance/directrequest_test.go b/integration-tests/performance/directrequest_test.go
index d229f9fb3ee..1a3f1d2a010 100644
--- a/integration-tests/performance/directrequest_test.go
+++ b/integration-tests/performance/directrequest_test.go
@@ -1,7 +1,6 @@
package performance
import (
- "context"
"fmt"
"math/big"
"strings"
@@ -25,6 +24,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testsetups"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
"github.com/google/uuid"
)
@@ -108,7 +108,7 @@ func TestDirectRequestPerformance(t *testing.T) {
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
- d, err := consumer.Data(context.Background())
+ d, err := consumer.Data(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail")
g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil")
l.Debug().Int64("Data", d.Int64()).Msg("Found on chain")
diff --git a/integration-tests/performance/flux_test.go b/integration-tests/performance/flux_test.go
index be536450a76..18b13ab9076 100644
--- a/integration-tests/performance/flux_test.go
+++ b/integration-tests/performance/flux_test.go
@@ -1,7 +1,6 @@
package performance
import (
- "context"
"fmt"
"math/big"
"strings"
@@ -26,6 +25,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testsetups"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestFluxPerformance(t *testing.T) {
@@ -83,7 +83,7 @@ func TestFluxPerformance(t *testing.T) {
require.NoError(t, err, "Setting oracle options in the Flux Aggregator contract shouldn't fail")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- oracles, err := fluxInstance.GetOracles(context.Background())
+ oracles, err := fluxInstance.GetOracles(utils.TestContext(t))
require.NoError(t, err, "Getting oracle details from the Flux aggregator contract shouldn't fail")
l.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set")
@@ -120,7 +120,7 @@ func TestFluxPerformance(t *testing.T) {
chainClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound)
err = chainClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- data, err := fluxInstance.GetContractData(context.Background())
+ data, err := fluxInstance.GetContractData(utils.TestContext(t))
require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail")
l.Info().Interface("Data", data).Msg("Round data")
require.Equal(t, int64(1e5), data.LatestRoundData.Answer.Int64(),
@@ -140,7 +140,7 @@ func TestFluxPerformance(t *testing.T) {
require.NoError(t, err, "Setting value path in mock server shouldn't fail")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- data, err = fluxInstance.GetContractData(context.Background())
+ data, err = fluxInstance.GetContractData(utils.TestContext(t))
require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail")
require.Equal(t, int64(1e10), data.LatestRoundData.Answer.Int64(),
"Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64())
@@ -153,7 +153,7 @@ func TestFluxPerformance(t *testing.T) {
l.Info().Interface("data", data).Msg("Round data")
for _, oracleAddr := range nodeAddresses {
- payment, _ := fluxInstance.WithdrawablePayment(context.Background(), oracleAddr)
+ payment, _ := fluxInstance.WithdrawablePayment(utils.TestContext(t), oracleAddr)
require.Equal(t, int64(2), payment.Int64(),
"Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64())
}
diff --git a/integration-tests/performance/keeper_test.go b/integration-tests/performance/keeper_test.go
index cd9818f99d3..8e273a96f69 100644
--- a/integration-tests/performance/keeper_test.go
+++ b/integration-tests/performance/keeper_test.go
@@ -2,7 +2,6 @@ package performance
//revive:disable:dot-imports
import (
- "context"
"fmt"
"math/big"
"strings"
@@ -26,6 +25,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
"github.com/smartcontractkit/chainlink/integration-tests/testsetups"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
var keeperDefaultRegistryConfig = contracts.KeeperRegistrySettings{
@@ -74,7 +74,7 @@ func TestKeeperPerformance(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 10
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)),
"Expected consumer counter to be greater than 10, but got %d", counter.Int64())
@@ -84,7 +84,7 @@ func TestKeeperPerformance(t *testing.T) {
// Cancel all the registered upkeeps via the registry
for i := 0; i < len(upkeepIDs); i++ {
- err := registry.CancelUpkeep(upkeepIDs[i])
+ err = registry.CancelUpkeep(upkeepIDs[i])
require.NoError(t, err, "Could not cancel upkeep at index %d", i)
}
@@ -95,7 +95,7 @@ func TestKeeperPerformance(t *testing.T) {
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterCancellation[i], err = consumers[i].Counter(context.Background())
+ countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int("Index", i).Int64("Upkeeps Performed", countersAfterCancellation[i].Int64()).Msg("Cancelled Upkeep")
}
@@ -103,7 +103,7 @@ func TestKeeperPerformance(t *testing.T) {
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
// Expect the counter to remain constant because the upkeep was cancelled, so it shouldn't increase anymore
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterCancellation[i].Int64()),
"Expected consumer counter to remain constant at %d, but got %d",
diff --git a/integration-tests/performance/ocr_test.go b/integration-tests/performance/ocr_test.go
index e81cc91cf7f..47879cebb81 100644
--- a/integration-tests/performance/ocr_test.go
+++ b/integration-tests/performance/ocr_test.go
@@ -1,7 +1,6 @@
package performance
import (
- "context"
"fmt"
"math/big"
"strings"
@@ -25,6 +24,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testsetups"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestOCRBasic(t *testing.T) {
@@ -53,7 +53,7 @@ func TestOCRBasic(t *testing.T) {
err = actions.FundChainlinkNodes(chainlinkNodes, chainClient, big.NewFloat(.05))
require.NoError(t, err, "Error funding Chainlink nodes")
- ocrInstances, err := actions.DeployOCRContracts(1, linkTokenContract, contractDeployer, bootstrapNode, workerNodes, chainClient)
+ ocrInstances, err := actions.DeployOCRContracts(1, linkTokenContract, contractDeployer, workerNodes, chainClient)
require.NoError(t, err)
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting for events")
@@ -64,7 +64,7 @@ func TestOCRBasic(t *testing.T) {
err = actions.StartNewRound(1, ocrInstances, chainClient, l)
require.NoError(t, err)
- answer, err := ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64())
@@ -73,7 +73,7 @@ func TestOCRBasic(t *testing.T) {
err = actions.StartNewRound(2, ocrInstances, chainClient, l)
require.NoError(t, err)
- answer, err = ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Error getting latest OCR answer")
require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64())
}
diff --git a/integration-tests/performance/vrf_test.go b/integration-tests/performance/vrf_test.go
index eeaceffaaf5..7a38a454955 100644
--- a/integration-tests/performance/vrf_test.go
+++ b/integration-tests/performance/vrf_test.go
@@ -1,7 +1,6 @@
package performance
import (
- "context"
"fmt"
"math/big"
"strings"
@@ -23,6 +22,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testsetups"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestVRFBasic(t *testing.T) {
@@ -97,7 +97,7 @@ func TestVRFBasic(t *testing.T) {
encodedProvingKeys := make([][2]*big.Int, 0)
encodedProvingKeys = append(encodedProvingKeys, provingKey)
- requestHash, err := coordinator.HashOfKey(context.Background(), encodedProvingKeys[0])
+ requestHash, err := coordinator.HashOfKey(utils.TestContext(t), encodedProvingKeys[0])
require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail")
err = consumer.RequestRandomness(requestHash, big.NewInt(1))
require.NoError(t, err, "Requesting randomness shouldn't fail")
@@ -108,7 +108,7 @@ func TestVRFBasic(t *testing.T) {
jobRuns, err := chainlinkNodes[0].MustReadRunsByJob(job.Data.ID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail")
- out, err := consumer.RandomnessOutput(context.Background())
+ out, err := consumer.RandomnessOutput(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail")
// Checks that the job has actually run
g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1),
diff --git a/integration-tests/reorg/automation_reorg_test.go b/integration-tests/reorg/automation_reorg_test.go
index 697ae28ce3b..58cd147201e 100644
--- a/integration-tests/reorg/automation_reorg_test.go
+++ b/integration-tests/reorg/automation_reorg_test.go
@@ -2,7 +2,6 @@ package reorg
//revive:disable:dot-imports
import (
- "context"
"fmt"
"math/big"
"testing"
@@ -19,12 +18,12 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
"github.com/smartcontractkit/chainlink-testing-framework/networks"
- "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
var (
@@ -133,6 +132,8 @@ func TestAutomationReorg(t *testing.T) {
}
for name, registryVersion := range registryVersions {
+ name := name
+ registryVersion := registryVersion
t.Run(name, func(t *testing.T) {
t.Parallel()
network := networks.MustGetSelectedNetworksFromEnv()[0]
@@ -167,7 +168,7 @@ func TestAutomationReorg(t *testing.T) {
// Register cleanup for any test
t.Cleanup(func() {
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -209,7 +210,7 @@ func TestAutomationReorg(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(it_utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 5
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
@@ -240,7 +241,7 @@ func TestAutomationReorg(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 10
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(it_utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 10
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
@@ -250,13 +251,14 @@ func TestAutomationReorg(t *testing.T) {
}, "5m", "1s").Should(gomega.Succeed())
l.Info().Msg("Upkeep performed during unstable chain, waiting for reorg to finish")
- rc.WaitDepthReached()
+ err = rc.WaitDepthReached()
+ require.NoError(t, err)
l.Info().Msg("Reorg finished, chain should be stable now. Expecting upkeeps to keep getting performed")
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 20
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(it_utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 20
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
diff --git a/integration-tests/reorg/log_poller_maybe_reorg_test.go b/integration-tests/reorg/log_poller_maybe_reorg_test.go
new file mode 100644
index 00000000000..d319e39aa20
--- /dev/null
+++ b/integration-tests/reorg/log_poller_maybe_reorg_test.go
@@ -0,0 +1,43 @@
+package reorg
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+
+ logpoller "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller"
+)
+
+func TestLogPollerFromEnv(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 100,
+ UseFinalityTag: true,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 400,
+ MaxEmitWaitTimeMs: 600,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+ err := cfg.OverrideFromEnv()
+ if err != nil {
+ t.Errorf("failed to override config from env: %v", err)
+ t.FailNow()
+ }
+
+ logpoller.ExecuteCILogPollerTest(t, &cfg)
+}
diff --git a/integration-tests/reorg/reorg_confirmer.go b/integration-tests/reorg/reorg_confirmer.go
index be535d2a6da..2193131680a 100644
--- a/integration-tests/reorg/reorg_confirmer.go
+++ b/integration-tests/reorg/reorg_confirmer.go
@@ -2,20 +2,21 @@ package reorg
import (
"context"
+ "fmt"
"math/big"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
"github.com/rs/zerolog/log"
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/chaos"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/environment"
- a "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/alias"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
// The steps are:
@@ -69,7 +70,7 @@ type ReorgController struct {
// NewReorgController creates a type that can create reorg chaos and confirm reorg has happened
func NewReorgController(cfg *ReorgConfig) (*ReorgController, error) {
if len(cfg.Network.GetClients()) == 1 {
- return nil, errors.New("need at least 3 nodes to re-org")
+ return nil, fmt.Errorf("need at least 3 nodes to re-org")
}
ctx, ctxCancel := context.WithTimeout(context.Background(), cfg.Timeout)
rc := &ReorgController{
@@ -164,7 +165,7 @@ func (rc *ReorgController) VerifyReorgComplete() error {
}
}
if rc.currentVerifiedBlocks+1 < rc.ReorgDepth {
- return errors.New("Reorg depth has not met")
+ return fmt.Errorf("Reorg depth has not met")
}
return nil
}
@@ -216,7 +217,7 @@ func (rc *ReorgController) Wait() error {
if rc.complete {
return nil
}
- return errors.New("timeout waiting for reorg to complete")
+ return fmt.Errorf("timeout waiting for reorg to complete")
}
// forkNetwork stomp the network between target reorged node and the rest
@@ -231,8 +232,8 @@ func (rc *ReorgController) forkNetwork(header blockchain.NodeHeader) error {
rc.cfg.Env.Cfg.Namespace,
&chaos.Props{
DurationStr: "999h",
- FromLabels: &map[string]*string{"app": a.Str(reorg.TXNodesAppLabel)},
- ToLabels: &map[string]*string{"app": a.Str(reorg.MinerNodesAppLabel)},
+ FromLabels: &map[string]*string{"app": utils.Ptr(reorg.TXNodesAppLabel)},
+ ToLabels: &map[string]*string{"app": utils.Ptr(reorg.MinerNodesAppLabel)},
},
))
rc.chaosExperimentName = expName
diff --git a/integration-tests/reorg/reorg_test.go b/integration-tests/reorg/reorg_test.go
index f92becfa50a..d5fefdbc562 100644
--- a/integration-tests/reorg/reorg_test.go
+++ b/integration-tests/reorg/reorg_test.go
@@ -1,7 +1,6 @@
package reorg
import (
- "context"
"fmt"
"math/big"
"os"
@@ -22,15 +21,16 @@ import (
mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg"
"github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/reorg"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/onsi/gomega"
"github.com/rs/zerolog/log"
+
"github.com/smartcontractkit/chainlink-testing-framework/networks"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
const (
@@ -85,7 +85,7 @@ func CleanupReorgTest(
if chainClient != nil {
chainClient.GasStats().PrintStats()
}
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.PanicLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
}
@@ -221,7 +221,7 @@ func TestDirectRequestReorg(t *testing.T) {
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
- d, err := consumer.Data(context.Background())
+ d, err := consumer.Data(it_utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail")
g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil")
log.Debug().Int64("Data", d.Int64()).Msg("Found on chain")
diff --git a/integration-tests/runner_helpers.go b/integration-tests/runner_helpers.go
index 43268a703ac..def2ebdc1d4 100644
--- a/integration-tests/runner_helpers.go
+++ b/integration-tests/runner_helpers.go
@@ -122,7 +122,7 @@ func collectBranchesAndTags(results chan []string, errChan chan error) {
go func() {
stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/branches", chainlinkRepo), "-q", ".[][\"name\"]", "--paginate")
if err != nil {
- errChan <- fmt.Errorf("%v: %s", err, stdErr.String())
+ errChan <- fmt.Errorf("%w: %s", err, stdErr.String())
}
branches := strings.Split(stdOut.String(), "\n")
cleanBranches := []string{}
@@ -139,7 +139,7 @@ func collectBranchesAndTags(results chan []string, errChan chan error) {
go func() {
stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/tags", chainlinkRepo), "-q", ".[][\"name\"]", "--paginate")
if err != nil {
- errChan <- fmt.Errorf("%v: %s", err, stdErr.String())
+ errChan <- fmt.Errorf("%w: %s", err, stdErr.String())
}
tags := strings.Split(stdOut.String(), "\n")
cleanTags := []string{}
diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go
index 17373e6a95f..1a093a88159 100644
--- a/integration-tests/smoke/automation_test.go
+++ b/integration-tests/smoke/automation_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"encoding/json"
"fmt"
"math/big"
@@ -11,9 +10,8 @@ import (
"testing"
"time"
- "github.com/kelseyhightower/envconfig"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/kelseyhightower/envconfig"
"github.com/onsi/gomega"
"github.com/stretchr/testify/require"
@@ -33,7 +31,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
- it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
var utilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI)
@@ -111,7 +109,6 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
upgradeImage string
upgradeVersion string
err error
- testName = "basic-upkeep"
)
if nodeUpgrade {
upgradeImage = os.Getenv("UPGRADE_IMAGE")
@@ -119,7 +116,6 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
if len(upgradeImage) == 0 || len(upgradeVersion) == 0 {
t.Fatal("UPGRADE_IMAGE and UPGRADE_VERSION must be set to upgrade nodes")
}
- testName = "node-upgrade"
}
// Use the name to determine if this is a log trigger or mercury
@@ -129,7 +125,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
isMercury := isMercuryV02 || isMercuryV03
chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupAutomationTestDocker(
- t, testName, registryVersion, defaultOCRRegistryConfig, nodeUpgrade, isMercuryV02, isMercuryV03,
+ t, registryVersion, defaultOCRRegistryConfig, isMercuryV02, isMercuryV03,
)
consumers, upkeepIDs := actions.DeployConsumers(
@@ -173,7 +169,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 5
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed")
@@ -188,13 +184,14 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
expect := 5
// Upgrade the nodes one at a time and check that the upkeeps are still being performed
for i := 0; i < 5; i++ {
- actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.ClCluster.Nodes[i])
+ err = actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.ClCluster.Nodes[i])
+ require.NoError(t, err, "Error when upgrading node %d", i)
time.Sleep(time.Second * 10)
expect = expect + 5
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are increasing by 5 in each step within 5 minutes
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed")
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)),
@@ -217,7 +214,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterCancellation[i], err = consumers[i].Counter(context.Background())
+ countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int64("Upkeep Count", countersAfterCancellation[i].Int64()).Int("Upkeep Index", i).Msg("Cancelled upkeep")
}
@@ -226,7 +223,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) {
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
// Expect the counter to remain constant (At most increase by 1 to account for stale performs) because the upkeep was cancelled
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterCancellation[i].Int64()+1),
"Expected consumer counter to remain less than or equal to %d, but got %d",
@@ -242,7 +239,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
l := logging.GetTestLogger(t)
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "set-trigger-config", ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, false, false, false,
+ t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(
@@ -272,7 +269,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := 5
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed")
@@ -329,7 +326,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
time.Sleep(10 * time.Second)
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterSetNoMatch[i], err = consumers[i].Counter(context.Background())
+ countersAfterSetNoMatch[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int64("Upkeep Count", countersAfterSetNoMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep")
}
@@ -339,7 +336,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
for i := 0; i < len(upkeepIDs); i++ {
// Expect the counter to remain constant (At most increase by 2 to account for stale performs) because the upkeep trigger config is not met
bufferCount := int64(2)
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterSetNoMatch[i].Int64()+bufferCount),
"Expected consumer counter to remain less than or equal to %d, but got %d",
@@ -375,7 +372,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterSetMatch[i], err = consumers[i].Counter(context.Background())
+ countersAfterSetMatch[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int64("Upkeep Count", countersAfterSetMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep")
}
@@ -394,7 +391,7 @@ func TestSetUpkeepTriggerConfig(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
expect := int64(5)
l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed")
@@ -417,7 +414,7 @@ func TestAutomationAddFunds(t *testing.T) {
t.Run(name, func(t *testing.T) {
t.Parallel()
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "add-funds", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(1), automationDefaultUpkeepGasLimit, false, false)
@@ -425,7 +422,7 @@ func TestAutomationAddFunds(t *testing.T) {
gom := gomega.NewGomegaWithT(t)
// Since the upkeep is currently underfunded, check that it doesn't get executed
gom.Consistently(func(g gomega.Gomega) {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)),
"Expected consumer counter to remain zero, but got %d", counter.Int64())
@@ -445,7 +442,7 @@ func TestAutomationAddFunds(t *testing.T) {
// Now the new upkeep should be performing because we added enough funds
gom.Eventually(func(g gomega.Gomega) {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64())
@@ -468,7 +465,7 @@ func TestAutomationPauseUnPause(t *testing.T) {
t.Parallel()
l := logging.GetTestLogger(t)
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "pause-unpause", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false)
@@ -477,7 +474,7 @@ func TestAutomationPauseUnPause(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)),
"Expected consumer counter to be greater than 5, but got %d", counter.Int64())
@@ -497,7 +494,7 @@ func TestAutomationPauseUnPause(t *testing.T) {
var countersAfterPause = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterPause[i], err = consumers[i].Counter(context.Background())
+ countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int("Upkeep Index", i).Int64("Upkeeps Performed", countersAfterPause[i].Int64()).Msg("Paused Upkeep")
}
@@ -506,7 +503,7 @@ func TestAutomationPauseUnPause(t *testing.T) {
for i := 0; i < len(upkeepIDs); i++ {
// In most cases counters should remain constant, but there might be a straggling perform tx which
// gets committed later and increases counter by 1
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+1),
"Expected consumer counter not have increased more than %d, but got %d",
@@ -526,7 +523,7 @@ func TestAutomationPauseUnPause(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", countersAfterPause[i].Int64()+1),
"Expected consumer counter to be greater than %d, but got %d", countersAfterPause[i].Int64()+1, counter.Int64())
@@ -551,7 +548,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) {
t.Parallel()
l := logging.GetTestLogger(t)
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "register-upkeep", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false)
@@ -562,7 +559,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) {
// store the value of their initial counters in order to compare later on that the value increased.
gom.Eventually(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
initialCounters[i] = counter
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
@@ -582,7 +579,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) {
// Test that the newly registered upkeep is also performing.
gom.Eventually(func(g gomega.Gomega) {
- counter, err := newUpkeep.Counter(context.Background())
+ counter, err := newUpkeep.Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64())
@@ -591,7 +588,7 @@ func TestAutomationRegisterUpkeep(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- currentCounter, err := consumers[i].Counter(context.Background())
+ currentCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
l.Info().
@@ -622,7 +619,7 @@ func TestAutomationPauseRegistry(t *testing.T) {
t.Run(name, func(t *testing.T) {
t.Parallel()
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "pause-registry", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false)
@@ -631,7 +628,7 @@ func TestAutomationPauseRegistry(t *testing.T) {
// Observe that the upkeeps which are initially registered are performing
gom.Eventually(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d")
@@ -647,7 +644,7 @@ func TestAutomationPauseRegistry(t *testing.T) {
// Store how many times each upkeep performed once the registry was successfully paused
var countersAfterPause = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
- countersAfterPause[i], err = consumers[i].Counter(context.Background())
+ countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
}
@@ -655,7 +652,7 @@ func TestAutomationPauseRegistry(t *testing.T) {
// because they are no longer getting serviced
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()),
"Expected consumer counter to remain constant at %d, but got %d",
@@ -680,7 +677,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) {
t.Parallel()
l := logging.GetTestLogger(t)
chainClient, chainlinkNodes, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "keeper-nodes-down", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, defaultAmountOfUpkeeps, big.NewInt(automationDefaultLinkFunds), automationDefaultUpkeepGasLimit, false, false)
@@ -692,7 +689,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) {
// Watch upkeeps being performed and store their counters in order to compare them later in the test
gom.Eventually(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
initialCounters[i] = counter
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
@@ -711,7 +708,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) {
// Assert that upkeeps are still performed and their counters have increased
gom.Eventually(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- currentCounter, err := consumers[i].Counter(context.Background())
+ currentCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()),
"Expected counter to have increased from initial value of %s, but got %s",
@@ -732,7 +729,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) {
// See how many times each upkeep was executed
var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
- countersAfterNoMoreNodes[i], err = consumers[i].Counter(context.Background())
+ countersAfterNoMoreNodes[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int("Upkeep Index", i).Int64("Performed", countersAfterNoMoreNodes[i].Int64()).Msg("Upkeeps Performed")
}
@@ -741,7 +738,7 @@ func TestAutomationKeeperNodesDown(t *testing.T) {
// all the nodes were taken down
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterNoMoreNodes[i].Int64()+1),
"Expected consumer counter to not have increased more than %d, but got %d",
@@ -765,7 +762,7 @@ func TestAutomationPerformSimulation(t *testing.T) {
t.Run(name, func(t *testing.T) {
t.Parallel()
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "perform-simulation", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumersPerformance, _ := actions.DeployPerformanceConsumers(
@@ -790,7 +787,7 @@ func TestAutomationPerformSimulation(t *testing.T) {
// Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed
gom.Consistently(func(g gomega.Gomega) {
// Consumer count should remain at 0
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.Equal(int64(0)),
"Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(),
@@ -798,14 +795,14 @@ func TestAutomationPerformSimulation(t *testing.T) {
}, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion
// Set performGas on consumer to be low, so that performUpkeep starts becoming successful
- err := consumerPerformance.SetPerformGasToBurn(context.Background(), big.NewInt(100000))
+ err := consumerPerformance.SetPerformGasToBurn(utils.TestContext(t), big.NewInt(100000))
require.NoError(t, err, "Perform gas should be set successfully on consumer")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting for set perform gas tx")
// Upkeep should now start performing
gom.Eventually(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d", cnt.Int64(),
@@ -829,7 +826,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
t.Parallel()
l := logging.GetTestLogger(t)
chainClient, chainlinkNodes, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "gas-limit", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
consumersPerformance, upkeepIDs := actions.DeployPerformanceConsumers(
@@ -855,7 +852,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
// Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed
gom.Consistently(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(
gomega.Equal(int64(0)),
@@ -871,7 +868,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
// Upkeep should now start performing
gom.Eventually(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d", cnt.Int64(),
@@ -879,19 +876,19 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
}, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer
// Now increase the checkGasBurn on consumer, upkeep should stop performing
- err = consumerPerformance.SetCheckGasToBurn(context.Background(), big.NewInt(3000000))
+ err = consumerPerformance.SetCheckGasToBurn(utils.TestContext(t), big.NewInt(3000000))
require.NoError(t, err, "Check gas burn should be set successfully on consumer")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx")
// Get existing performed count
- existingCnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ existingCnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
require.NoError(t, err, "Calling consumer's counter shouldn't fail")
l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when check gas increased")
// In most cases count should remain constant, but it might increase by upto 1 due to pending perform
gom.Consistently(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(
gomega.BeNumerically("<=", existingCnt.Int64()+1),
@@ -899,7 +896,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
)
}, "1m", "1s").Should(gomega.Succeed())
- existingCnt, err = consumerPerformance.GetUpkeepCount(context.Background())
+ existingCnt, err = consumerPerformance.GetUpkeepCount(utils.TestContext(t))
require.NoError(t, err, "Calling consumer's counter shouldn't fail")
existingCntInt := existingCnt.Int64()
l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished")
@@ -919,7 +916,7 @@ func TestAutomationCheckPerformGasLimit(t *testing.T) {
// Upkeep should start performing again, and it should get regularly performed
gom.Eventually(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt),
"Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(),
@@ -943,7 +940,7 @@ func TestUpdateCheckData(t *testing.T) {
t.Parallel()
l := logging.GetTestLogger(t)
chainClient, _, contractDeployer, linkToken, registry, registrar, _ := setupAutomationTestDocker(
- t, "update-check-data", registryVersion, defaultOCRRegistryConfig, false, false, false,
+ t, registryVersion, defaultOCRRegistryConfig, false, false,
)
performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerConsumers(
@@ -963,7 +960,7 @@ func TestUpdateCheckData(t *testing.T) {
gom.Consistently(func(g gomega.Gomega) {
// expect the counter to remain 0 because perform data does not match
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := performDataChecker[i].Counter(context.Background())
+ counter, err := performDataChecker[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker"+
" for upkeep at index "+strconv.Itoa(i))
g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)),
@@ -982,7 +979,7 @@ func TestUpdateCheckData(t *testing.T) {
// retrieve new check data for all upkeeps
for i := 0; i < len(upkeepIDs); i++ {
- upkeep, err := registry.GetUpkeepInfo(context.Background(), upkeepIDs[i])
+ upkeep, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepIDs[i])
require.NoError(t, err, "Failed to get upkeep info at index %d", i)
require.Equal(t, []byte(automationExpectedData), upkeep.CheckData, "Upkeep data not as expected")
}
@@ -990,7 +987,7 @@ func TestUpdateCheckData(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := performDataChecker[i].Counter(context.Background())
+ counter, err := performDataChecker[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter"+
" for upkeep at index "+strconv.Itoa(i))
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
@@ -1008,10 +1005,8 @@ type TestConfig struct {
func setupAutomationTestDocker(
t *testing.T,
- testName string,
registryVersion ethereum.KeeperRegistryVersion,
registryConfig contracts.KeeperRegistrySettings,
- statefulDb bool,
isMercuryV02 bool,
isMercuryV03 bool,
) (
@@ -1033,11 +1028,11 @@ func setupAutomationTestDocker(
// build the node config
clNodeConfig := node.NewConfig(node.NewBaseConfig())
syncInterval := models.MustMakeDuration(5 * time.Minute)
- clNodeConfig.Feature.LogPoller = it_utils.Ptr[bool](true)
- clNodeConfig.OCR2.Enabled = it_utils.Ptr[bool](true)
- clNodeConfig.Keeper.TurnLookBack = it_utils.Ptr[int64](int64(0))
+ clNodeConfig.Feature.LogPoller = utils.Ptr[bool](true)
+ clNodeConfig.OCR2.Enabled = utils.Ptr[bool](true)
+ clNodeConfig.Keeper.TurnLookBack = utils.Ptr[int64](int64(0))
clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval
- clNodeConfig.Keeper.Registry.PerformGasOverhead = it_utils.Ptr[uint32](uint32(150000))
+ clNodeConfig.Keeper.Registry.PerformGasOverhead = utils.Ptr[uint32](uint32(150000))
clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"}
clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"}
@@ -1071,8 +1066,8 @@ func setupAutomationTestDocker(
var httpUrls []string
var wsUrls []string
if network.Simulated {
- httpUrls = []string{env.Geth.InternalHttpUrl}
- wsUrls = []string{env.Geth.InternalWsUrl}
+ httpUrls = []string{env.RpcProvider.PrivateHttpUrls()[0]}
+ wsUrls = []string{env.RpcProvider.PrivateWsUrsl()[0]}
} else {
httpUrls = network.HTTPURLs
wsUrls = network.URLs
@@ -1087,11 +1082,13 @@ func setupAutomationTestDocker(
if isMercuryV02 {
output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}`
- env.MockAdapter.SetStringValuePath("/client", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output)
+ err = env.MockAdapter.SetStringValuePath("/client", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output)
+ require.NoError(t, err)
}
if isMercuryV03 {
output := `{"reports":[{"feedID":"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000","validFromTimestamp":0,"observationsTimestamp":0,"fullReport":"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}]}`
- env.MockAdapter.SetStringValuePath("/api/v1/reports/bulk", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output)
+ err = env.MockAdapter.SetStringValuePath("/api/v1/reports/bulk", []string{http.MethodGet, http.MethodPost}, map[string]string{"Content-Type": "application/json"}, output)
+ require.NoError(t, err)
}
} else {
env, err = test_env.NewCLTestEnvBuilder().
diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go
index 8c2b3638bff..2997ff1c74a 100644
--- a/integration-tests/smoke/flux_test.go
+++ b/integration-tests/smoke/flux_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"net/http"
@@ -19,6 +18,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestFluxBasic(t *testing.T) {
@@ -74,7 +74,7 @@ func TestFluxBasic(t *testing.T) {
err = env.EVMClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- oracles, err := fluxInstance.GetOracles(context.Background())
+ oracles, err := fluxInstance.GetOracles(utils.TestContext(t))
require.NoError(t, err, "Getting oracle details from the Flux aggregator contract shouldn't fail")
l.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set")
@@ -108,7 +108,7 @@ func TestFluxBasic(t *testing.T) {
env.EVMClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound)
err = env.EVMClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- data, err := fluxInstance.GetContractData(context.Background())
+ data, err := fluxInstance.GetContractData(utils.TestContext(t))
require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail")
require.Equal(t, int64(1e5), data.LatestRoundData.Answer.Int64(),
"Expected latest round answer to be %d, but found %d", int64(1e5), data.LatestRoundData.Answer.Int64())
@@ -127,7 +127,7 @@ func TestFluxBasic(t *testing.T) {
require.NoError(t, err, "Setting value path in mock server shouldn't fail")
err = env.EVMClient.WaitForEvents()
require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail")
- data, err = fluxInstance.GetContractData(context.Background())
+ data, err = fluxInstance.GetContractData(utils.TestContext(t))
require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail")
require.Equal(t, int64(1e10), data.LatestRoundData.Answer.Int64(),
"Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64())
@@ -140,7 +140,7 @@ func TestFluxBasic(t *testing.T) {
l.Info().Interface("data", data).Msg("Round data")
for _, oracleAddr := range nodeAddresses {
- payment, _ := fluxInstance.WithdrawablePayment(context.Background(), oracleAddr)
+ payment, _ := fluxInstance.WithdrawablePayment(utils.TestContext(t), oracleAddr)
require.Equal(t, int64(2), payment.Int64(),
"Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64())
}
diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go
index 727b83a601a..7203e031780 100644
--- a/integration-tests/smoke/forwarder_ocr_test.go
+++ b/integration-tests/smoke/forwarder_ocr_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"math/big"
"testing"
@@ -12,6 +11,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestForwarderOCRBasic(t *testing.T) {
@@ -72,7 +72,7 @@ func TestForwarderOCRBasic(t *testing.T) {
err = env.EVMClient.WaitForEvents()
require.NoError(t, err, "Error waiting for events")
- answer, err := ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64())
@@ -83,7 +83,7 @@ func TestForwarderOCRBasic(t *testing.T) {
err = env.EVMClient.WaitForEvents()
require.NoError(t, err, "Error waiting for events")
- answer, err = ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Error getting latest OCR answer")
require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64())
}
diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go
index baa5a781f6b..be87eb56292 100644
--- a/integration-tests/smoke/forwarders_ocr2_test.go
+++ b/integration-tests/smoke/forwarders_ocr2_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"net/http"
@@ -17,6 +16,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestForwarderOCR2Basic(t *testing.T) {
@@ -92,7 +92,7 @@ func TestForwarderOCR2Basic(t *testing.T) {
err = actions.StartNewOCR2Round(1, ocrInstances, env.EVMClient, time.Minute*10, l)
require.NoError(t, err)
- answer, err := ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Getting latest answer from OCRv2 contract shouldn't fail")
require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCRw contract to be 5 but got %d", answer.Int64())
@@ -103,7 +103,7 @@ func TestForwarderOCR2Basic(t *testing.T) {
err = actions.StartNewOCR2Round(int64(i), ocrInstances, env.EVMClient, time.Minute*10, l)
require.NoError(t, err)
- answer, err = ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Error getting latest OCRv2 answer")
require.Equal(t, int64(ocrRoundVal), answer.Int64(), fmt.Sprintf("Expected latest answer from OCRv2 contract to be %d but got %d", ocrRoundVal, answer.Int64()))
}
diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go
index d42944fd558..b28ab1ff101 100644
--- a/integration-tests/smoke/keeper_test.go
+++ b/integration-tests/smoke/keeper_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"strconv"
@@ -23,6 +22,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
const (
@@ -109,7 +109,7 @@ func TestKeeperBasicSmoke(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) error {
// Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)),
"Expected consumer counter to be greater than 10, but got %d", counter.Int64())
@@ -131,7 +131,7 @@ func TestKeeperBasicSmoke(t *testing.T) {
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterCancellation[i], err = consumers[i].Counter(context.Background())
+ countersAfterCancellation[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i)
l.Info().Int("Index", i).Int64("Upkeeps Performed", countersAfterCancellation[i].Int64()).Msg("Cancelled Upkeep")
}
@@ -139,7 +139,7 @@ func TestKeeperBasicSmoke(t *testing.T) {
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
// Expect the counter to remain constant because the upkeep was cancelled, so it shouldn't increase anymore
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterCancellation[i].Int64()),
"Expected consumer counter to remain constant at %d, but got %d",
@@ -187,11 +187,11 @@ func TestKeeperBlockCountPerTurn(t *testing.T) {
// Wait for upkeep to be performed twice by different keepers (buddies)
gom.Eventually(func(g gomega.Gomega) error {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed")
- upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID)
+ upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail")
latestKeeper := upkeepInfo.LastKeeper
@@ -205,7 +205,7 @@ func TestKeeperBlockCountPerTurn(t *testing.T) {
}, "1m", "1s").Should(gomega.Succeed())
gom.Eventually(func(g gomega.Gomega) error {
- upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID)
+ upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail")
latestKeeper := upkeepInfo.LastKeeper
@@ -219,7 +219,7 @@ func TestKeeperBlockCountPerTurn(t *testing.T) {
// Expect no new keepers to perform for a while
gom.Consistently(func(g gomega.Gomega) {
- upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID)
+ upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail")
latestKeeper := upkeepInfo.LastKeeper
@@ -235,11 +235,11 @@ func TestKeeperBlockCountPerTurn(t *testing.T) {
// Expect a new keeper to perform
gom.Eventually(func(g gomega.Gomega) error {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Num upkeeps performed")
- upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID)
+ upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail")
latestKeeper := upkeepInfo.LastKeeper
@@ -296,7 +296,7 @@ func TestKeeperSimulation(t *testing.T) {
// Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed
gom.Consistently(func(g gomega.Gomega) {
// Consumer count should remain at 0
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(
gomega.Equal(int64(0)),
@@ -304,20 +304,20 @@ func TestKeeperSimulation(t *testing.T) {
)
// Not even reverted upkeeps should be performed. Last keeper for the upkeep should be 0 address
- upkeepInfo, err := registry.GetUpkeepInfo(context.Background(), upkeepID)
+ upkeepInfo, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail")
g.Expect(upkeepInfo.LastKeeper).Should(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be zero address")
}, "1m", "1s").Should(gomega.Succeed())
// Set performGas on consumer to be low, so that performUpkeep starts becoming successful
- err = consumerPerformance.SetPerformGasToBurn(context.Background(), big.NewInt(100000))
+ err = consumerPerformance.SetPerformGasToBurn(utils.TestContext(t), big.NewInt(100000))
require.NoError(t, err, "Error setting PerformGasToBurn")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting to set PerformGasToBurn")
// Upkeep should now start performing
gom.Eventually(func(g gomega.Gomega) error {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d", cnt.Int64(),
@@ -368,7 +368,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
// Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed
gom.Consistently(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(
gomega.Equal(int64(0)),
@@ -384,7 +384,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
// Upkeep should now start performing
gom.Eventually(func(g gomega.Gomega) error {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d", cnt.Int64(),
@@ -393,13 +393,13 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
}, "1m", "1s").Should(gomega.Succeed())
// Now increase the checkGasBurn on consumer, upkeep should stop performing
- err = consumerPerformance.SetCheckGasToBurn(context.Background(), big.NewInt(3000000))
+ err = consumerPerformance.SetCheckGasToBurn(utils.TestContext(t), big.NewInt(3000000))
require.NoError(t, err, "Error setting CheckGasToBurn")
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx")
// Get existing performed count
- existingCnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ existingCnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
require.NoError(t, err, "Error calling consumer's counter")
l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Check Gas Increased")
@@ -407,7 +407,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
// gets committed later. Since every keeper node cannot have more than 1 straggling tx, it
// is sufficient to check that the upkeep count does not increase by more than 6.
gom.Consistently(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(cnt.Int64()).Should(
gomega.BeNumerically("<=", existingCnt.Int64()+numUpkeepsAllowedForStragglingTxs),
@@ -415,7 +415,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
)
}, "3m", "1s").Should(gomega.Succeed())
- existingCnt, err = consumerPerformance.GetUpkeepCount(context.Background())
+ existingCnt, err = consumerPerformance.GetUpkeepCount(utils.TestContext(t))
require.NoError(t, err, "Error calling consumer's counter")
existingCntInt := existingCnt.Int64()
l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished")
@@ -430,7 +430,7 @@ func TestKeeperCheckPerformGasLimit(t *testing.T) {
// Upkeep should start performing again, and it should get regularly performed
gom.Eventually(func(g gomega.Gomega) {
- cnt, err := consumerPerformance.GetUpkeepCount(context.Background())
+ cnt, err := consumerPerformance.GetUpkeepCount(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail")
g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt),
"Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(),
@@ -478,7 +478,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) {
// store the value of their initial counters in order to compare later on that the value increased.
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
initialCounters[i] = counter
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+
" for upkeep at index "+strconv.Itoa(i))
@@ -500,7 +500,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) {
// Test that the newly registered upkeep is also performing.
gom.Eventually(func(g gomega.Gomega) error {
- counter, err := newUpkeep.Counter(context.Background())
+ counter, err := newUpkeep.Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64())
@@ -510,7 +510,7 @@ func TestKeeperRegisterUpkeep(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- currentCounter, err := consumers[i].Counter(context.Background())
+ currentCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
l.Info().
@@ -563,7 +563,7 @@ func TestKeeperAddFunds(t *testing.T) {
// Since the upkeep is currently underfunded, check that it doesn't get executed
gom.Consistently(func(g gomega.Gomega) {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)),
"Expected consumer counter to remain zero, but got %d", counter.Int64())
@@ -583,7 +583,7 @@ func TestKeeperAddFunds(t *testing.T) {
// Now the new upkeep should be performing because we added enough funds
gom.Eventually(func(g gomega.Gomega) {
- counter, err := consumers[0].Counter(context.Background())
+ counter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64())
@@ -628,7 +628,7 @@ func TestKeeperRemove(t *testing.T) {
// Make sure the upkeeps are running before we remove a keeper
gom.Eventually(func(g gomega.Gomega) error {
for upkeepID := 0; upkeepID < len(upkeepIDs); upkeepID++ {
- counter, err := consumers[upkeepID].Counter(context.Background())
+ counter, err := consumers[upkeepID].Counter(utils.TestContext(t))
initialCounters[upkeepID] = counter
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+
" for upkeep with ID "+strconv.Itoa(upkeepID))
@@ -637,7 +637,7 @@ func TestKeeperRemove(t *testing.T) {
return nil
}, "1m", "1s").Should(gomega.Succeed())
- keepers, err := registry.GetKeeperList(context.Background())
+ keepers, err := registry.GetKeeperList(utils.TestContext(t))
require.NoError(t, err, "Error getting list of Keepers")
// Remove the first keeper from the list
@@ -660,7 +660,7 @@ func TestKeeperRemove(t *testing.T) {
// The upkeeps should still perform and their counters should have increased compared to the first check
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Cmp(initialCounters[i]) == 1, "Expected consumer counter to be greater "+
"than initial counter which was %s, but got %s", initialCounters[i], counter)
@@ -705,7 +705,7 @@ func TestKeeperPauseRegistry(t *testing.T) {
// Observe that the upkeeps which are initially registered are performing
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %d")
@@ -722,7 +722,7 @@ func TestKeeperPauseRegistry(t *testing.T) {
// Store how many times each upkeep performed once the registry was successfully paused
var countersAfterPause = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
- countersAfterPause[i], err = consumers[i].Counter(context.Background())
+ countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Error retrieving consumer at index %d", i)
}
@@ -730,7 +730,7 @@ func TestKeeperPauseRegistry(t *testing.T) {
// because they are no longer getting serviced
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Error retrieving consumer contract at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()),
"Expected consumer counter to remain constant at %d, but got %d",
@@ -791,7 +791,7 @@ func TestKeeperMigrateRegistry(t *testing.T) {
// Check that the first upkeep from the first registry is performing (before being migrated)
gom.Eventually(func(g gomega.Gomega) error {
- counterBeforeMigration, err := consumers[0].Counter(context.Background())
+ counterBeforeMigration, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(counterBeforeMigration.Int64()).Should(gomega.BeNumerically(">", int64(0)),
"Expected consumer counter to be greater than 0, but got %s", counterBeforeMigration)
@@ -810,12 +810,12 @@ func TestKeeperMigrateRegistry(t *testing.T) {
err = chainClient.WaitForEvents()
require.NoError(t, err, "Error waiting to pause first registry")
- counterAfterMigration, err := consumers[0].Counter(context.Background())
+ counterAfterMigration, err := consumers[0].Counter(utils.TestContext(t))
require.NoError(t, err, "Error calling consumer's counter")
// Check that once we migrated the upkeep, the counter has increased
gom.Eventually(func(g gomega.Gomega) error {
- currentCounter, err := consumers[0].Counter(context.Background())
+ currentCounter, err := consumers[0].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail")
g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", counterAfterMigration.Int64()),
"Expected counter to have increased, but stayed constant at %s", counterAfterMigration)
@@ -860,7 +860,7 @@ func TestKeeperNodeDown(t *testing.T) {
// Watch upkeeps being performed and store their counters in order to compare them later in the test
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
initialCounters[i] = counter
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)),
@@ -882,7 +882,7 @@ func TestKeeperNodeDown(t *testing.T) {
// Assert that upkeeps are still performed and their counters have increased
gom.Eventually(func(g gomega.Gomega) error {
for i := 0; i < len(upkeepIDs); i++ {
- currentCounter, err := consumers[i].Counter(context.Background())
+ currentCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()),
"Expected counter to have increased from initial value of %s, but got %s",
@@ -908,7 +908,7 @@ func TestKeeperNodeDown(t *testing.T) {
// See how many times each upkeep was executed
var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
- countersAfterNoMoreNodes[i], err = consumers[i].Counter(context.Background())
+ countersAfterNoMoreNodes[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Error retrieving consumer counter %d", i)
l.Info().
Int("Index", i).
@@ -921,7 +921,7 @@ func TestKeeperNodeDown(t *testing.T) {
// so a +6 on the upper limit side should be sufficient.
gom.Consistently(func(g gomega.Gomega) {
for i := 0; i < len(upkeepIDs); i++ {
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=",
countersAfterNoMoreNodes[i].Int64()+numUpkeepsAllowedForStragglingTxs,
@@ -964,7 +964,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) error {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)),
"Expected consumer counter to be greater than 5, but got %d", counter.Int64())
@@ -985,7 +985,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) {
var countersAfterPause = make([]*big.Int, len(upkeepIDs))
for i := 0; i < len(upkeepIDs); i++ {
// Obtain the amount of times the upkeep has been executed so far
- countersAfterPause[i], err = consumers[i].Counter(context.Background())
+ countersAfterPause[i], err = consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Error retrieving upkeep count at index %d", i)
l.Info().
Int("Index", i).
@@ -998,7 +998,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) {
// In most cases counters should remain constant, but there might be a straggling perform tx which
// gets committed later. Since every keeper node cannot have more than 1 straggling tx, it
// is sufficient to check that the upkeep count does not increase by more than 6.
- latestCounter, err := consumers[i].Counter(context.Background())
+ latestCounter, err := consumers[i].Counter(utils.TestContext(t))
require.NoError(t, err, "Error retrieving counter at index %d", i)
g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+numUpkeepsAllowedForStragglingTxs),
"Expected consumer counter not have increased more than %d, but got %d",
@@ -1018,7 +1018,7 @@ func TestKeeperPauseUnPauseUpkeep(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) error {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := consumers[i].Counter(context.Background())
+ counter, err := consumers[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+
" for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)+countersAfterPause[i].Int64()),
@@ -1055,7 +1055,7 @@ func TestKeeperUpdateCheckData(t *testing.T) {
gom.Consistently(func(g gomega.Gomega) {
// expect the counter to remain 0 because perform data does not match
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := performDataChecker[i].Counter(context.Background())
+ counter, err := performDataChecker[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)),
"Expected perform data checker counter to be 0, but got %d", counter.Int64())
@@ -1073,7 +1073,7 @@ func TestKeeperUpdateCheckData(t *testing.T) {
// retrieve new check data for all upkeeps
for i := 0; i < len(upkeepIDs); i++ {
- upkeep, err := registry.GetUpkeepInfo(context.Background(), upkeepIDs[i])
+ upkeep, err := registry.GetUpkeepInfo(utils.TestContext(t), upkeepIDs[i])
require.NoError(t, err, "Error getting upkeep info from index %d", i)
require.Equal(t, []byte(keeperExpectedData), upkeep.CheckData, "Check data not as expected")
}
@@ -1081,7 +1081,7 @@ func TestKeeperUpdateCheckData(t *testing.T) {
gom.Eventually(func(g gomega.Gomega) error {
// Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5
for i := 0; i < len(upkeepIDs); i++ {
- counter, err := performDataChecker[i].Counter(context.Background())
+ counter, err := performDataChecker[i].Counter(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter for upkeep at index %d", i)
g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)),
"Expected perform data checker counter to be greater than 5, but got %d", counter.Int64())
diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go
new file mode 100644
index 00000000000..03a287ee6b7
--- /dev/null
+++ b/integration-tests/smoke/log_poller_test.go
@@ -0,0 +1,261 @@
+package smoke
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+
+ logpoller "github.com/smartcontractkit/chainlink/integration-tests/universal/log_poller"
+)
+
+// consistency test with no network disruptions with approximate emission of 1500-1600 logs per second for ~110-120 seconds
+// 6 filters are registered
+func TestLogPollerFewFiltersFixedDepth(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 4,
+ UseFinalityTag: false,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+func TestLogPollerFewFiltersFinalityTag(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 4,
+ UseFinalityTag: true,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+// consistency test with no network disruptions with approximate emission of 1000-1100 logs per second for ~110-120 seconds
+// 900 filters are registered
+func TestLogManyFiltersPollerFixedDepth(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 300,
+ EventsPerTx: 3,
+ UseFinalityTag: false,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 30,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+func TestLogManyFiltersPollerFinalityTag(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 300,
+ EventsPerTx: 3,
+ UseFinalityTag: true,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 30,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+// consistency test that introduces random distruptions by pausing either Chainlink or Postgres containers for random interval of 5-20 seconds
+// with approximate emission of 520-550 logs per second for ~110 seconds
+// 6 filters are registered
+func TestLogPollerWithChaosFixedDepth(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 100,
+ UseFinalityTag: false,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ ChaosConfig: &logpoller.ChaosConfig{
+ ExperimentCount: 10,
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+func TestLogPollerWithChaosFinalityTag(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 100,
+ UseFinalityTag: true,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ ChaosConfig: &logpoller.ChaosConfig{
+ ExperimentCount: 10,
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+
+ logpoller.ExecuteBasicLogPollerTest(t, &cfg)
+}
+
+// consistency test that registers filters after events were emitted and then triggers replay via API
+// unfortunately there is no way to make sure that logs that are indexed are only picked up by replay
+// and not by backup poller
+// with approximate emission of 24 logs per second for ~110 seconds
+// 6 filters are registered
+func TestLogPollerReplayFixedDepth(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 4,
+ UseFinalityTag: false,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+ consistencyTimeout := "5m"
+
+ logpoller.ExecuteLogPollerReplay(t, &cfg, consistencyTimeout)
+}
+
+func TestLogPollerReplayFinalityTag(t *testing.T) {
+ cfg := logpoller.Config{
+ General: &logpoller.General{
+ Generator: logpoller.GeneratorType_Looped,
+ Contracts: 2,
+ EventsPerTx: 4,
+ UseFinalityTag: false,
+ },
+ LoopedConfig: &logpoller.LoopedConfig{
+ ContractConfig: logpoller.ContractConfig{
+ ExecutionCount: 100,
+ },
+ FuzzConfig: logpoller.FuzzConfig{
+ MinEmitWaitTimeMs: 200,
+ MaxEmitWaitTimeMs: 500,
+ },
+ },
+ }
+
+ eventsToEmit := []abi.Event{}
+ for _, event := range logpoller.EmitterABI.Events {
+ eventsToEmit = append(eventsToEmit, event)
+ }
+
+ cfg.General.EventsToEmit = eventsToEmit
+ consistencyTimeout := "5m"
+
+ logpoller.ExecuteLogPollerReplay(t, &cfg, consistencyTimeout)
+}
diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go
index 1b33cdce769..5950e9febb6 100644
--- a/integration-tests/smoke/ocr2_test.go
+++ b/integration-tests/smoke/ocr2_test.go
@@ -1,31 +1,21 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"net/http"
- "strings"
"testing"
"time"
"github.com/stretchr/testify/require"
- "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
- "github.com/smartcontractkit/chainlink-testing-framework/k8s/environment"
- "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/chainlink"
- "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum"
- "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver"
- mockservercfg "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/mockserver-cfg"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
- "github.com/smartcontractkit/chainlink-testing-framework/networks"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
- "github.com/smartcontractkit/chainlink/integration-tests/client"
- "github.com/smartcontractkit/chainlink/integration-tests/config"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
// Tests a basic OCRv2 median feed
@@ -83,7 +73,7 @@ func TestOCRv2Basic(t *testing.T) {
err = actions.StartNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err, "Error starting new OCR2 round")
- roundData, err := aggregatorContracts[0].GetRound(context.Background(), big.NewInt(1))
+ roundData, err := aggregatorContracts[0].GetRound(utils.TestContext(t), big.NewInt(1))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
require.Equal(t, int64(5), roundData.Answer.Int64(),
"Expected latest answer from OCR contract to be 5 but got %d",
@@ -95,49 +85,10 @@ func TestOCRv2Basic(t *testing.T) {
err = actions.StartNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err)
- roundData, err = aggregatorContracts[0].GetRound(context.Background(), big.NewInt(2))
+ roundData, err = aggregatorContracts[0].GetRound(utils.TestContext(t), big.NewInt(2))
require.NoError(t, err, "Error getting latest OCR answer")
require.Equal(t, int64(10), roundData.Answer.Int64(),
"Expected latest answer from OCR contract to be 10 but got %d",
roundData.Answer.Int64(),
)
}
-
-func setupOCR2Test(t *testing.T, forwardersEnabled bool) (
- testEnvironment *environment.Environment,
- testNetwork blockchain.EVMNetwork,
-) {
- testNetwork = networks.MustGetSelectedNetworksFromEnv()[0]
- evmConfig := ethereum.New(nil)
- if !testNetwork.Simulated {
- evmConfig = ethereum.New(ðereum.Props{
- NetworkName: testNetwork.Name,
- Simulated: testNetwork.Simulated,
- WsURLs: testNetwork.URLs,
- })
- }
-
- var toml string
- if forwardersEnabled {
- toml = client.AddNetworkDetailedConfig(config.BaseOCR2Config, config.ForwarderNetworkDetailConfig, testNetwork)
- } else {
- toml = client.AddNetworksConfig(config.BaseOCR2Config, testNetwork)
- }
-
- chainlinkChart := chainlink.New(0, map[string]interface{}{
- "replicas": 6,
- "toml": toml,
- })
-
- testEnvironment = environment.New(&environment.Config{
- NamespacePrefix: fmt.Sprintf("smoke-ocr2-%s", strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-")),
- Test: t,
- }).
- AddHelm(mockservercfg.New(nil)).
- AddHelm(mockserver.New(nil)).
- AddHelm(evmConfig).
- AddHelm(chainlinkChart)
- err := testEnvironment.Run()
- require.NoError(t, err, "Error running test environment")
- return testEnvironment, testNetwork
-}
diff --git a/integration-tests/smoke/ocr2vrf_test.go b/integration-tests/smoke/ocr2vrf_test.go
index 0d6a77a1157..57bd5412b14 100644
--- a/integration-tests/smoke/ocr2vrf_test.go
+++ b/integration-tests/smoke/ocr2vrf_test.go
@@ -15,7 +15,6 @@ import (
eth "github.com/smartcontractkit/chainlink-testing-framework/k8s/pkg/helm/ethereum"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
"github.com/smartcontractkit/chainlink-testing-framework/networks"
- "github.com/smartcontractkit/chainlink-testing-framework/utils"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/actions/ocr2vrf_actions"
@@ -23,6 +22,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/config"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestOCR2VRFRedeemModel(t *testing.T) {
@@ -44,7 +44,7 @@ func TestOCR2VRFRedeemModel(t *testing.T) {
require.NoError(t, err, "Retreiving on-chain wallet addresses for chainlink nodes shouldn't fail")
t.Cleanup(func() {
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -80,7 +80,7 @@ func TestOCR2VRFRedeemModel(t *testing.T) {
)
for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ {
- randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i)))
+ randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i)))
require.NoError(t, err)
l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract")
require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0")
@@ -106,7 +106,7 @@ func TestOCR2VRFFulfillmentModel(t *testing.T) {
require.NoError(t, err, "Retreiving on-chain wallet addresses for chainlink nodes shouldn't fail")
t.Cleanup(func() {
- err := actions.TeardownSuite(t, testEnvironment, utils.ProjectRoot, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient)
+ err := actions.TeardownSuite(t, testEnvironment, chainlinkNodes, nil, zapcore.ErrorLevel, chainClient)
require.NoError(t, err, "Error tearing down environment")
})
@@ -141,7 +141,7 @@ func TestOCR2VRFFulfillmentModel(t *testing.T) {
)
for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ {
- randomness, err := consumerContract.GetRandomnessByRequestId(nil, requestID, big.NewInt(int64(i)))
+ randomness, err := consumerContract.GetRandomnessByRequestId(it_utils.TestContext(t), requestID, big.NewInt(int64(i)))
require.NoError(t, err, "Error getting Randomness result from Consumer Contract")
l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness Fulfillment retrieved from Consumer contract")
require.NotEqual(t, 0, randomness.Uint64(), "Randomness Fulfillment retrieved from Consumer contract give an answer other than 0")
diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go
index 8d71c5d08f8..45205565e21 100644
--- a/integration-tests/smoke/ocr_test.go
+++ b/integration-tests/smoke/ocr_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"math/big"
"testing"
@@ -11,6 +10,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestOCRBasic(t *testing.T) {
@@ -22,7 +22,7 @@ func TestOCRBasic(t *testing.T) {
WithGeth().
WithMockAdapter().
WithCLNodes(6).
- WithFunding(big.NewFloat(.1)).
+ WithFunding(big.NewFloat(.01)).
WithStandardCleanup().
Build()
require.NoError(t, err)
@@ -46,7 +46,7 @@ func TestOCRBasic(t *testing.T) {
err = actions.StartNewRound(1, ocrInstances, env.EVMClient, l)
require.NoError(t, err)
- answer, err := ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err := ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64())
@@ -55,7 +55,7 @@ func TestOCRBasic(t *testing.T) {
err = actions.StartNewRound(2, ocrInstances, env.EVMClient, l)
require.NoError(t, err)
- answer, err = ocrInstances[0].GetLatestAnswer(context.Background())
+ answer, err = ocrInstances[0].GetLatestAnswer(utils.TestContext(t))
require.NoError(t, err, "Error getting latest OCR answer")
require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64())
}
diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go
index f29cb4bc893..20389da378f 100644
--- a/integration-tests/smoke/runlog_test.go
+++ b/integration-tests/smoke/runlog_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"net/http"
@@ -16,6 +15,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestRunLogBasic(t *testing.T) {
@@ -87,7 +87,7 @@ func TestRunLogBasic(t *testing.T) {
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
- d, err := consumer.Data(context.Background())
+ d, err := consumer.Data(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail")
g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil")
l.Debug().Int64("Data", d.Int64()).Msg("Found on chain")
diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go
index 444d1ce20ee..61d2c5cdd70 100644
--- a/integration-tests/smoke/vrf_test.go
+++ b/integration-tests/smoke/vrf_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"fmt"
"math/big"
"testing"
@@ -17,6 +16,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv1"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestVRFBasic(t *testing.T) {
@@ -81,7 +81,7 @@ func TestVRFBasic(t *testing.T) {
encodedProvingKeys := make([][2]*big.Int, 0)
encodedProvingKeys = append(encodedProvingKeys, provingKey)
- requestHash, err := contracts.Coordinator.HashOfKey(context.Background(), encodedProvingKeys[0])
+ requestHash, err := contracts.Coordinator.HashOfKey(utils.TestContext(t), encodedProvingKeys[0])
require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail")
err = contracts.Consumer.RequestRandomness(requestHash, big.NewInt(1))
require.NoError(t, err, "Requesting randomness shouldn't fail")
@@ -92,7 +92,7 @@ func TestVRFBasic(t *testing.T) {
jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID)
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail")
- out, err := contracts.Consumer.RandomnessOutput(context.Background())
+ out, err := contracts.Consumer.RandomnessOutput(utils.TestContext(t))
g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail")
// Checks that the job has actually run
g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1),
diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go
index c960bb6c691..714ed752a36 100644
--- a/integration-tests/smoke/vrfv2_test.go
+++ b/integration-tests/smoke/vrfv2_test.go
@@ -1,7 +1,6 @@
package smoke
import (
- "context"
"math/big"
"testing"
"time"
@@ -16,6 +15,7 @@ import (
vrfConst "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions/vrfv2_constants"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
"github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
func TestVRFv2Basic(t *testing.T) {
@@ -97,11 +97,11 @@ func TestVRFv2Basic(t *testing.T) {
jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfV2jobs[0].Job.Data.ID)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically("==", 1))
- lastRequestID, err = vrfv2Contracts.LoadTestConsumer.GetLastRequestId(context.Background())
+ lastRequestID, err = vrfv2Contracts.LoadTestConsumer.GetLastRequestId(utils.TestContext(t))
l.Debug().Interface("Last Request ID", lastRequestID).Msg("Last Request ID Received")
g.Expect(err).ShouldNot(gomega.HaveOccurred())
- status, err := vrfv2Contracts.LoadTestConsumer.GetRequestStatus(context.Background(), lastRequestID)
+ status, err := vrfv2Contracts.LoadTestConsumer.GetRequestStatus(utils.TestContext(t), lastRequestID)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
g.Expect(status.Fulfilled).Should(gomega.BeTrue())
l.Debug().Interface("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status")
diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go
index 408e5a95ed3..cfeca0a66a3 100644
--- a/integration-tests/smoke/vrfv2plus_test.go
+++ b/integration-tests/smoke/vrfv2plus_test.go
@@ -1,19 +1,19 @@
package smoke
import (
- "context"
+ "fmt"
"math/big"
"testing"
"time"
- "github.com/kelseyhightower/envconfig"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
"github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
+ "github.com/kelseyhightower/envconfig"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus"
@@ -46,17 +46,21 @@ func TestVRFv2Plus(t *testing.T) {
linkToken, err := actions.DeployLINKToken(env.ContractDeployer)
require.NoError(t, err, "error deploying LINK contract")
- vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, &vrfv2PlusConfig, linkToken, mockETHLinkFeed, 1, 1, l)
+ // register proving key against oracle address (sending key) in order to test oracleWithdraw
+ defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address()
+
+ vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, vrfv2PlusConfig, linkToken, mockETHLinkFeed, defaultWalletAddress, 1, 1, l)
require.NoError(t, err, "error setting up VRF v2_5 env")
subID := subIDs[0]
- subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information")
vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator)
- t.Run("VRFV2 Plus With Link Billing", func(t *testing.T) {
+ t.Run("Link Billing", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
var isNativeBilling = false
subBalanceBeforeRequest := subscription.Balance
@@ -70,14 +74,15 @@ func TestVRFv2Plus(t *testing.T) {
vrfv2PlusData,
subID,
isNativeBilling,
- vrfv2PlusConfig.RandomnessRequestCountPerRequest,
- &vrfv2PlusConfig,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
- subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information")
subBalanceAfterRequest := subscription.Balance
require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest)
@@ -86,19 +91,19 @@ func TestVRFv2Plus(t *testing.T) {
require.NoError(t, err, "error reading job runs")
require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data))
- status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId)
+ status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, status.Fulfilled)
l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status")
- require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(status.RandomWords)))
+ require.Equal(t, testConfig.NumberOfWords, uint32(len(status.RandomWords)))
for _, w := range status.RandomWords {
l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
}
})
-
- t.Run("VRFV2 Plus With Native Billing", func(t *testing.T) {
+ t.Run("Native Billing", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
var isNativeBilling = true
subNativeTokenBalanceBeforeRequest := subscription.NativeBalance
@@ -112,13 +117,14 @@ func TestVRFv2Plus(t *testing.T) {
vrfv2PlusData,
subID,
isNativeBilling,
- vrfv2PlusConfig.RandomnessRequestCountPerRequest,
- &vrfv2PlusConfig,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
expectedSubBalanceWei := new(big.Int).Sub(subNativeTokenBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
- subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ subscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err)
subBalanceAfterRequest := subscription.NativeBalance
require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest)
@@ -127,125 +133,457 @@ func TestVRFv2Plus(t *testing.T) {
require.NoError(t, err, "error reading job runs")
require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data))
- status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId)
+ status, err := vrfv2PlusContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId)
require.NoError(t, err, "error getting rand request status")
require.True(t, status.Fulfilled)
l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status")
- require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(status.RandomWords)))
+ require.Equal(t, testConfig.NumberOfWords, uint32(len(status.RandomWords)))
for _, w := range status.RandomWords {
l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
}
})
+ t.Run("Direct Funding (VRFV2PlusWrapper)", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment(
+ env,
+ testConfig,
+ linkToken,
+ mockETHLinkFeed,
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData.KeyHash,
+ 1,
+ )
+ require.NoError(t, err)
- wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment(
- env,
- &vrfv2PlusConfig,
- linkToken,
- mockETHLinkFeed,
- vrfv2PlusContracts.Coordinator,
- vrfv2PlusData.KeyHash,
- 1,
- )
- require.NoError(t, err)
+ t.Run("Link Billing", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ var isNativeBilling = false
+
+ wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(utils.TestContext(t), wrapperContracts.LoadTestConsumers[0].Address())
+ require.NoError(t, err, "error getting wrapper consumer balance")
+
+ wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID)
+ require.NoError(t, err, "error getting subscription information")
+ subBalanceBeforeRequest := wrapperSubscription.Balance
+
+ randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
+ wrapperContracts.LoadTestConsumers[0],
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData,
+ wrapperSubID,
+ isNativeBilling,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
+ l,
+ )
+ require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
+
+ expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
+ wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID)
+ require.NoError(t, err, "error getting subscription information")
+ subBalanceAfterRequest := wrapperSubscription.Balance
+ require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest)
+
+ consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId)
+ require.NoError(t, err, "error getting rand request status")
+ require.True(t, consumerStatus.Fulfilled)
+
+ expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid)
+
+ wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(utils.TestContext(t), wrapperContracts.LoadTestConsumers[0].Address())
+ require.NoError(t, err, "error getting wrapper consumer balance")
+ require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest)
+
+ //todo: uncomment when VRF-651 will be fixed
+ //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub")
+ vrfv2plus.LogFulfillmentDetailsLinkBilling(l, wrapperConsumerJuelsBalanceBeforeRequest, wrapperConsumerJuelsBalanceAfterRequest, consumerStatus, randomWordsFulfilledEvent)
+
+ require.Equal(t, testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords)))
+ for _, w := range consumerStatus.RandomWords {
+ l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
+ require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
+ }
+ })
+ t.Run("Native Billing", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ var isNativeBilling = true
+
+ wrapperConsumerBalanceBeforeRequestWei, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()))
+ require.NoError(t, err, "error getting wrapper consumer balance")
+
+ wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID)
+ require.NoError(t, err, "error getting subscription information")
+ subBalanceBeforeRequest := wrapperSubscription.NativeBalance
+
+ randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
+ wrapperContracts.LoadTestConsumers[0],
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData,
+ wrapperSubID,
+ isNativeBilling,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
+ l,
+ )
+ require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
+
+ expectedSubBalanceWei := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
+ wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), wrapperSubID)
+ require.NoError(t, err, "error getting subscription information")
+ subBalanceAfterRequest := wrapperSubscription.NativeBalance
+ require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest)
+
+ consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(utils.TestContext(t), randomWordsFulfilledEvent.RequestId)
+ require.NoError(t, err, "error getting rand request status")
+ require.True(t, consumerStatus.Fulfilled)
+
+ expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid)
+
+ wrapperConsumerBalanceAfterRequestWei, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()))
+ require.NoError(t, err, "error getting wrapper consumer balance")
+ require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei)
+
+ //todo: uncomment when VRF-651 will be fixed
+ //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub")
+ vrfv2plus.LogFulfillmentDetailsNativeBilling(l, wrapperConsumerBalanceBeforeRequestWei, wrapperConsumerBalanceAfterRequestWei, consumerStatus, randomWordsFulfilledEvent)
+
+ require.Equal(t, testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords)))
+ for _, w := range consumerStatus.RandomWords {
+ l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
+ require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
+ }
+ })
+ })
+ t.Run("Canceling Sub And Returning Funds", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers(
+ env,
+ testConfig,
+ linkToken,
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusContracts.LoadTestConsumers,
+ 1,
+ )
+ require.NoError(t, err)
+ subIDForCancelling := subIDsForCancelling[0]
- t.Run("VRFV2 Plus With Direct Funding (VRFV2PlusWrapper) - Link Billing", func(t *testing.T) {
- var isNativeBilling = false
+ testWalletAddress, err := actions.GenerateWallet()
+ require.NoError(t, err)
+
+ testWalletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), testWalletAddress)
+ require.NoError(t, err)
- wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(context.Background(), wrapperContracts.LoadTestConsumers[0].Address())
- require.NoError(t, err, "error getting wrapper consumer balance")
+ testWalletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), testWalletAddress.String())
+ require.NoError(t, err)
- wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID)
+ subscriptionForCancelling, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling)
require.NoError(t, err, "error getting subscription information")
- subBalanceBeforeRequest := wrapperSubscription.Balance
- randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ subBalanceLink := subscriptionForCancelling.Balance
+ subBalanceNative := subscriptionForCancelling.NativeBalance
+ l.Info().
+ Str("Subscription Amount Native", subBalanceNative.String()).
+ Str("Subscription Amount Link", subBalanceLink.String()).
+ Str("Returning funds from SubID", subIDForCancelling.String()).
+ Str("Returning funds to", testWalletAddress.String()).
+ Msg("Canceling subscription and returning funds to subscription owner")
+ tx, err := vrfv2PlusContracts.Coordinator.CancelSubscription(subIDForCancelling, testWalletAddress)
+ require.NoError(t, err, "Error canceling subscription")
+
+ subscriptionCanceledEvent, err := vrfv2PlusContracts.Coordinator.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30)
+ require.NoError(t, err, "error waiting for subscription canceled event")
+
+ cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash())
+ require.NoError(t, err, "error getting tx cancellation Tx Receipt")
+
+ txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed)
+ cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice)
+
+ l.Info().
+ Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()).
+ Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()).
+ Uint64("Gas Used", cancellationTxReceipt.GasUsed).
+ Msg("Cancellation TX Receipt")
+
+ l.Info().
+ Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()).
+ Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()).
+ Str("SubID", subscriptionCanceledEvent.SubId.String()).
+ Str("Returned to", subscriptionCanceledEvent.To.String()).
+ Msg("Subscription Canceled Event")
+
+ require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription")
+ require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event LINK amount is not equal to sub amount while canceling subscription")
+
+ testWalletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), testWalletAddress)
+ require.NoError(t, err)
+
+ testWalletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), testWalletAddress.String())
+ require.NoError(t, err)
+
+ //Verify that sub was deleted from Coordinator
+ _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling)
+ require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration")
+
+ subFundsReturnedNativeActual := new(big.Int).Sub(testWalletBalanceNativeAfterSubCancelling, testWalletBalanceNativeBeforeSubCancelling)
+ subFundsReturnedLinkActual := new(big.Int).Sub(testWalletBalanceLinkAfterSubCancelling, testWalletBalanceLinkBeforeSubCancelling)
+
+ subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei)
+ deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual)
+ l.Info().
+ Str("Sub Balance - Native", subBalanceNative.String()).
+ Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()).
+ Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()).
+ Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()).
+ Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()).
+ Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()).
+ Str("Sub Balance - Link", subBalanceLink.String()).
+ Msg("Sub funds returned")
+
+ //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed
+ //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled")
+ require.Equal(t, 1, testWalletBalanceNativeAfterSubCancelling.Cmp(testWalletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation")
+ require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned LINK funds are not equal to sub balance that was cancelled")
+
+ })
+ t.Run("Owner Canceling Sub And Returning Funds While Having Pending Requests", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ //underfund subs in order rand fulfillments to fail
+ testConfig.SubscriptionFundingAmountNative = float64(0.000000000000000001) //1 Wei
+ testConfig.SubscriptionFundingAmountLink = float64(0.000000000000000001) //1 Juels
+
+ subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers(
+ env,
+ testConfig,
+ linkToken,
vrfv2PlusContracts.Coordinator,
- vrfv2PlusData,
- wrapperSubID,
- isNativeBilling,
- &vrfv2PlusConfig,
- l,
+ vrfv2PlusContracts.LoadTestConsumers,
+ 1,
)
- require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
+ require.NoError(t, err)
- expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
- wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID)
+ subIDForCancelling := subIDsForCancelling[0]
+
+ subscriptionForCancelling, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling)
require.NoError(t, err, "error getting subscription information")
- subBalanceAfterRequest := wrapperSubscription.Balance
- require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest)
- consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId)
- require.NoError(t, err, "error getting rand request status")
- require.True(t, consumerStatus.Fulfilled)
+ vrfv2plus.LogSubDetails(l, subscriptionForCancelling, subIDForCancelling, vrfv2PlusContracts.Coordinator)
- expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid)
+ activeSubscriptionIdsBeforeSubCancellation, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0))
+ require.NoError(t, err)
- wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(context.Background(), wrapperContracts.LoadTestConsumers[0].Address())
- require.NoError(t, err, "error getting wrapper consumer balance")
- require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest)
+ require.True(t, utils.BigIntSliceContains(activeSubscriptionIdsBeforeSubCancellation, subIDForCancelling))
- //todo: uncomment when VRF-651 will be fixed
- //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub")
- vrfv2plus.LogFulfillmentDetailsLinkBilling(l, wrapperConsumerJuelsBalanceBeforeRequest, wrapperConsumerJuelsBalanceAfterRequest, consumerStatus, randomWordsFulfilledEvent)
+ pendingRequestsExist, err := vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subIDForCancelling)
+ require.NoError(t, err)
+ require.False(t, pendingRequestsExist, "Pending requests should not exist")
- require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords)))
- for _, w := range consumerStatus.RandomWords {
- l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
- require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
- }
- })
+ _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment(
+ vrfv2PlusContracts.LoadTestConsumers[0],
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData,
+ subIDForCancelling,
+ false,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ 5*time.Second,
+ l,
+ )
- t.Run("VRFV2 Plus With Direct Funding (VRFV2PlusWrapper) - Native Billing", func(t *testing.T) {
- var isNativeBilling = true
+ require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance")
- wrapperConsumerBalanceBeforeRequestWei, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()))
- require.NoError(t, err, "error getting wrapper consumer balance")
+ _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment(
+ vrfv2PlusContracts.LoadTestConsumers[0],
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData,
+ subIDForCancelling,
+ true,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
+ l,
+ )
+
+ require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance")
+
+ pendingRequestsExist, err = vrfv2PlusContracts.Coordinator.PendingRequestsExist(utils.TestContext(t), subIDForCancelling)
+ require.NoError(t, err)
+ require.True(t, pendingRequestsExist, "Pending requests should exist after unfulfilled rand requests due to low sub balance")
+
+ walletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress))
+ require.NoError(t, err)
- wrapperSubscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID)
+ walletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress)
+ require.NoError(t, err)
+
+ subscriptionForCancelling, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling)
require.NoError(t, err, "error getting subscription information")
- subBalanceBeforeRequest := wrapperSubscription.NativeBalance
- randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment(
- wrapperContracts.LoadTestConsumers[0],
+ subBalanceLink := subscriptionForCancelling.Balance
+ subBalanceNative := subscriptionForCancelling.NativeBalance
+ l.Info().
+ Str("Subscription Amount Native", subBalanceNative.String()).
+ Str("Subscription Amount Link", subBalanceLink.String()).
+ Str("Returning funds from SubID", subIDForCancelling.String()).
+ Str("Returning funds to", defaultWalletAddress).
+ Msg("Canceling subscription and returning funds to subscription owner")
+ tx, err := vrfv2PlusContracts.Coordinator.OwnerCancelSubscription(subIDForCancelling)
+ require.NoError(t, err, "Error canceling subscription")
+
+ subscriptionCanceledEvent, err := vrfv2PlusContracts.Coordinator.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30)
+ require.NoError(t, err, "error waiting for subscription canceled event")
+
+ cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash())
+ require.NoError(t, err, "error getting tx cancellation Tx Receipt")
+
+ txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed)
+ cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice)
+
+ l.Info().
+ Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()).
+ Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()).
+ Uint64("Gas Used", cancellationTxReceipt.GasUsed).
+ Msg("Cancellation TX Receipt")
+
+ l.Info().
+ Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()).
+ Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()).
+ Str("SubID", subscriptionCanceledEvent.SubId.String()).
+ Str("Returned to", subscriptionCanceledEvent.To.String()).
+ Msg("Subscription Canceled Event")
+
+ require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription")
+ require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event LINK amount is not equal to sub amount while canceling subscription")
+
+ walletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress))
+ require.NoError(t, err)
+
+ walletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress)
+ require.NoError(t, err)
+
+ //Verify that sub was deleted from Coordinator
+ _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subIDForCancelling)
+ fmt.Println("err", err)
+ require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration")
+
+ subFundsReturnedNativeActual := new(big.Int).Sub(walletBalanceNativeAfterSubCancelling, walletBalanceNativeBeforeSubCancelling)
+ subFundsReturnedLinkActual := new(big.Int).Sub(walletBalanceLinkAfterSubCancelling, walletBalanceLinkBeforeSubCancelling)
+
+ subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei)
+ deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual)
+ l.Info().
+ Str("Sub Balance - Native", subBalanceNative.String()).
+ Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()).
+ Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()).
+ Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()).
+ Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()).
+ Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()).
+ Str("Sub Balance - Link", subBalanceLink.String()).
+ Str("walletBalanceNativeBeforeSubCancelling", walletBalanceNativeBeforeSubCancelling.String()).
+ Str("walletBalanceNativeAfterSubCancelling", walletBalanceNativeAfterSubCancelling.String()).
+ Msg("Sub funds returned")
+
+ //todo - need to use different wallet for each test to verify exact amount of Native/LINK returned
+ //todo - as defaultWallet is used in other tests in parallel which might affect the balance
+ //require.Equal(t, 1, walletBalanceNativeAfterSubCancelling.Cmp(walletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation")
+
+ //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed
+ //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled")
+ require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned LINK funds are not equal to sub balance that was cancelled")
+
+ activeSubscriptionIdsAfterSubCancellation, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0))
+ require.NoError(t, err, "error getting active subscription ids")
+
+ require.False(
+ t,
+ utils.BigIntSliceContains(activeSubscriptionIdsAfterSubCancellation, subIDForCancelling),
+ "Active subscription ids should not contain sub id after sub cancellation",
+ )
+ })
+ t.Run("Oracle Withdraw", func(t *testing.T) {
+ testConfig := vrfv2PlusConfig
+ subIDsForOracleWithDraw, err := vrfv2plus.CreateFundSubsAndAddConsumers(
+ env,
+ testConfig,
+ linkToken,
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusContracts.LoadTestConsumers,
+ 1,
+ )
+ require.NoError(t, err)
+ subIDForOracleWithdraw := subIDsForOracleWithDraw[0]
+
+ fulfilledEventLink, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment(
+ vrfv2PlusContracts.LoadTestConsumers[0],
vrfv2PlusContracts.Coordinator,
vrfv2PlusData,
- wrapperSubID,
- isNativeBilling,
- &vrfv2PlusConfig,
+ subIDForOracleWithdraw,
+ false,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
l,
)
- require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
+ require.NoError(t, err)
- expectedSubBalanceWei := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment)
- wrapperSubscription, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), wrapperSubID)
- require.NoError(t, err, "error getting subscription information")
- subBalanceAfterRequest := wrapperSubscription.NativeBalance
- require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest)
+ fulfilledEventNative, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment(
+ vrfv2PlusContracts.LoadTestConsumers[0],
+ vrfv2PlusContracts.Coordinator,
+ vrfv2PlusData,
+ subIDForOracleWithdraw,
+ true,
+ testConfig.RandomnessRequestCountPerRequest,
+ testConfig,
+ testConfig.RandomWordsFulfilledEventTimeout,
+ l,
+ )
+ require.NoError(t, err)
+ amountToWithdrawLink := fulfilledEventLink.Payment
- consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(context.Background(), randomWordsFulfilledEvent.RequestId)
- require.NoError(t, err, "error getting rand request status")
- require.True(t, consumerStatus.Fulfilled)
+ defaultWalletBalanceNativeBeforeOracleWithdraw, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress))
+ require.NoError(t, err)
- expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid)
+ defaultWalletBalanceLinkBeforeOracleWithdraw, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress)
+ require.NoError(t, err)
- wrapperConsumerBalanceAfterRequestWei, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()))
- require.NoError(t, err, "error getting wrapper consumer balance")
- require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei)
+ l.Info().
+ Str("Returning to", defaultWalletAddress).
+ Str("Amount", amountToWithdrawLink.String()).
+ Msg("Invoking Oracle Withdraw for LINK")
- //todo: uncomment when VRF-651 will be fixed
- //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub")
- vrfv2plus.LogFulfillmentDetailsNativeBilling(l, wrapperConsumerBalanceBeforeRequestWei, wrapperConsumerBalanceAfterRequestWei, consumerStatus, randomWordsFulfilledEvent)
+ err = vrfv2PlusContracts.Coordinator.OracleWithdraw(
+ common.HexToAddress(defaultWalletAddress),
+ amountToWithdrawLink,
+ )
+ require.NoError(t, err, "error withdrawing LINK from coordinator to default wallet")
+ amountToWithdrawNative := fulfilledEventNative.Payment
- require.Equal(t, vrfv2PlusConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords)))
- for _, w := range consumerStatus.RandomWords {
- l.Info().Str("Output", w.String()).Msg("Randomness fulfilled")
- require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0")
- }
- })
+ l.Info().
+ Str("Returning to", defaultWalletAddress).
+ Str("Amount", amountToWithdrawNative.String()).
+ Msg("Invoking Oracle Withdraw for Native")
+
+ err = vrfv2PlusContracts.Coordinator.OracleWithdrawNative(
+ common.HexToAddress(defaultWalletAddress),
+ amountToWithdrawNative,
+ )
+ require.NoError(t, err, "error withdrawing Native tokens from coordinator to default wallet")
+
+ err = env.EVMClient.WaitForEvents()
+ require.NoError(t, err, vrfv2plus.ErrWaitTXsComplete)
+
+ defaultWalletBalanceNativeAfterOracleWithdraw, err := env.EVMClient.BalanceAt(utils.TestContext(t), common.HexToAddress(defaultWalletAddress))
+ require.NoError(t, err)
+
+ defaultWalletBalanceLinkAfterOracleWithdraw, err := linkToken.BalanceOf(utils.TestContext(t), defaultWalletAddress)
+ require.NoError(t, err)
+ //not possible to verify exact amount of Native/LINK returned as defaultWallet is used in other tests in parallel which might affect the balance
+ require.Equal(t, 1, defaultWalletBalanceNativeAfterOracleWithdraw.Cmp(defaultWalletBalanceNativeBeforeOracleWithdraw), "Native funds were not returned after oracle withdraw native")
+ require.Equal(t, 1, defaultWalletBalanceLinkAfterOracleWithdraw.Cmp(defaultWalletBalanceLinkBeforeOracleWithdraw), "LINK funds were not returned after oracle withdraw")
+ })
}
func TestVRFv2PlusMigration(t *testing.T) {
@@ -271,22 +609,25 @@ func TestVRFv2PlusMigration(t *testing.T) {
linkAddress, err := actions.DeployLINKToken(env.ContractDeployer)
require.NoError(t, err, "error deploying LINK contract")
- vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, &vrfv2PlusConfig, linkAddress, mockETHLinkFeedAddress, 2, 1, l)
+ nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress()
+ require.NoError(t, err, "error getting primary eth address")
+
+ vrfv2PlusContracts, subIDs, vrfv2PlusData, err := vrfv2plus.SetupVRFV2_5Environment(env, vrfv2PlusConfig, linkAddress, mockETHLinkFeedAddress, nativeTokenPrimaryKeyAddress, 2, 1, l)
require.NoError(t, err, "error setting up VRF v2_5 env")
subID := subIDs[0]
- subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ subscription, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information")
vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.Coordinator)
- activeSubIdsOldCoordinatorBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0))
+ activeSubIdsOldCoordinatorBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0))
require.NoError(t, err, "error occurred getting active sub ids")
require.Len(t, activeSubIdsOldCoordinatorBeforeMigration, 1, "Active Sub Ids length is not equal to 1")
require.Equal(t, subID, activeSubIdsOldCoordinatorBeforeMigration[0])
- oldSubscriptionBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ oldSubscriptionBeforeMigration, err := vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information")
//Migration Process
@@ -297,7 +638,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
require.NoError(t, err, vrfv2plus.ErrWaitTXsComplete)
_, err = vrfv2plus.VRFV2PlusUpgradedVersionRegisterProvingKey(vrfv2PlusData.VRFKey, vrfv2PlusData.PrimaryEthAddress, newCoordinator)
- require.NoError(t, err, errors.Wrap(err, vrfv2plus.ErrRegisteringProvingKey))
+ require.NoError(t, err, fmt.Errorf("%s, err: %w", vrfv2plus.ErrRegisteringProvingKey, err))
err = newCoordinator.SetConfig(
vrfv2PlusConfig.MinimumConfirmations,
@@ -310,6 +651,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
FulfillmentFlatFeeNativePPM: vrfv2PlusConfig.FulfillmentFlatFeeNativePPM,
},
)
+ require.NoError(t, err)
err = newCoordinator.SetLINKAndLINKNativeFeed(linkAddress.Address(), mockETHLinkFeedAddress.Address())
require.NoError(t, err, vrfv2plus.ErrSetLinkNativeLinkFeed)
@@ -356,14 +698,14 @@ func TestVRFv2PlusMigration(t *testing.T) {
migratedCoordinatorLinkTotalBalanceAfterMigration, migratedCoordinatorEthTotalBalanceAfterMigration, err := vrfv2plus.GetUpgradedCoordinatorTotalBalance(newCoordinator)
require.NoError(t, err)
- migratedSubscription, err := newCoordinator.GetSubscription(context.Background(), subID)
+ migratedSubscription, err := newCoordinator.GetSubscription(utils.TestContext(t), subID)
require.NoError(t, err, "error getting subscription information")
vrfv2plus.LogSubDetailsAfterMigration(l, newCoordinator, subID, migratedSubscription)
//Verify that Coordinators were updated in Consumers
for _, consumer := range vrfv2PlusContracts.LoadTestConsumers {
- coordinatorAddressInConsumerAfterMigration, err := consumer.GetCoordinator(context.Background())
+ coordinatorAddressInConsumerAfterMigration, err := consumer.GetCoordinator(utils.TestContext(t))
require.NoError(t, err, "error getting Coordinator from Consumer contract")
require.Equal(t, newCoordinator.Address(), coordinatorAddressInConsumerAfterMigration.String())
l.Debug().
@@ -379,13 +721,13 @@ func TestVRFv2PlusMigration(t *testing.T) {
require.Equal(t, oldSubscriptionBeforeMigration.Consumers, migratedSubscription.Consumers)
//Verify that old sub was deleted from old Coordinator
- _, err = vrfv2PlusContracts.Coordinator.GetSubscription(context.Background(), subID)
+ _, err = vrfv2PlusContracts.Coordinator.GetSubscription(utils.TestContext(t), subID)
require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration")
- _, err = vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0))
+ _, err = vrfv2PlusContracts.Coordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0))
require.Error(t, err, "error not occurred getting active sub ids. Should occur since it should revert when sub id array is empty")
- activeSubIdsMigratedCoordinator, err := newCoordinator.GetActiveSubscriptionIds(context.Background(), big.NewInt(0), big.NewInt(0))
+ activeSubIdsMigratedCoordinator, err := newCoordinator.GetActiveSubscriptionIds(utils.TestContext(t), big.NewInt(0), big.NewInt(0))
require.NoError(t, err, "error occurred getting active sub ids")
require.Len(t, activeSubIdsMigratedCoordinator, 1, "Active Sub Ids length is not equal to 1 for Migrated Coordinator after migration")
require.Equal(t, subID, activeSubIdsMigratedCoordinator[0])
@@ -396,10 +738,10 @@ func TestVRFv2PlusMigration(t *testing.T) {
expectedLinkTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorLinkTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.Balance)
expectedEthTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorEthTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.NativeBalance)
- require.Equal(t, expectedLinkTotalBalanceForMigratedCoordinator, migratedCoordinatorLinkTotalBalanceAfterMigration)
- require.Equal(t, expectedEthTotalBalanceForMigratedCoordinator, migratedCoordinatorEthTotalBalanceAfterMigration)
- require.Equal(t, expectedLinkTotalBalanceForOldCoordinator, oldCoordinatorLinkTotalBalanceAfterMigration)
- require.Equal(t, expectedEthTotalBalanceForOldCoordinator, oldCoordinatorEthTotalBalanceAfterMigration)
+ require.Equal(t, 0, expectedLinkTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorLinkTotalBalanceAfterMigration))
+ require.Equal(t, 0, expectedEthTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorEthTotalBalanceAfterMigration))
+ require.Equal(t, 0, expectedLinkTotalBalanceForOldCoordinator.Cmp(oldCoordinatorLinkTotalBalanceAfterMigration))
+ require.Equal(t, 0, expectedEthTotalBalanceForOldCoordinator.Cmp(oldCoordinatorEthTotalBalanceAfterMigration))
//Verify rand requests fulfills with Link Token billing
_, err = vrfv2plus.RequestRandomnessAndWaitForFulfillmentUpgraded(
@@ -408,7 +750,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
vrfv2PlusData,
subID,
false,
- &vrfv2PlusConfig,
+ vrfv2PlusConfig,
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
@@ -420,7 +762,7 @@ func TestVRFv2PlusMigration(t *testing.T) {
vrfv2PlusData,
subID,
true,
- &vrfv2PlusConfig,
+ vrfv2PlusConfig,
l,
)
require.NoError(t, err, "error requesting randomness and waiting for fulfilment")
diff --git a/integration-tests/soak/ocr_test.go b/integration-tests/soak/ocr_test.go
index b2375f13ac2..9973c23808e 100644
--- a/integration-tests/soak/ocr_test.go
+++ b/integration-tests/soak/ocr_test.go
@@ -16,6 +16,7 @@ func TestOCRSoak(t *testing.T) {
// Use this variable to pass in any custom EVM specific TOML values to your Chainlink nodes
customNetworkTOML := ``
// Uncomment below for debugging TOML issues on the node
+ // network := networks.MustGetSelectedNetworksFromEnv()[0]
// fmt.Println("Using Chainlink TOML\n---------------------")
// fmt.Println(client.AddNetworkDetailedConfig(config.BaseOCRP2PV1Config, customNetworkTOML, network))
// fmt.Println("---------------------")
diff --git a/integration-tests/testreporters/keeper_benchmark.go b/integration-tests/testreporters/keeper_benchmark.go
index c800eb37be2..e9f2eaad7c5 100644
--- a/integration-tests/testreporters/keeper_benchmark.go
+++ b/integration-tests/testreporters/keeper_benchmark.go
@@ -183,7 +183,7 @@ func (k *KeeperBenchmarkTestReporter) WriteReport(folderLocation string) error {
}
for contractIndex, report := range k.Reports {
- avg, median, ninetyPct, ninetyNinePct, max := intListStats(report.AllCheckDelays)
+ avg, median, ninetyPct, ninetyNinePct, max = intListStats(report.AllCheckDelays)
err = keeperReportWriter.Write([]string{
fmt.Sprint(contractIndex),
report.RegistryAddress,
@@ -305,6 +305,8 @@ func (k *KeeperBenchmarkTestReporter) SendSlackNotification(t *testing.T, slackC
}
// intListStats helper calculates some statistics on an int list: avg, median, 90pct, 99pct, max
+//
+//nolint:revive
func intListStats(in []int64) (float64, int64, int64, int64, int64) {
length := len(in)
if length == 0 {
diff --git a/integration-tests/testreporters/ocr.go b/integration-tests/testreporters/ocr.go
index a04718ea228..abbb261fa74 100644
--- a/integration-tests/testreporters/ocr.go
+++ b/integration-tests/testreporters/ocr.go
@@ -67,9 +67,7 @@ func (e *OCRRoundState) Time() time.Time {
// CSV returns a CSV representation of the test state and all events
func (e *OCRRoundState) CSV() [][]string {
rows := [][]string{{e.StartTime.Format("2006-01-02 15:04:05.00 MST"), fmt.Sprintf("Expecting new Answer: %d", e.Answer)}}
- for _, anomaly := range e.anomalies {
- rows = append(rows, anomaly)
- }
+ rows = append(rows, e.anomalies...)
return rows
}
diff --git a/integration-tests/testreporters/profile.go b/integration-tests/testreporters/profile.go
index 9ac7713e94d..ab9dec138e4 100644
--- a/integration-tests/testreporters/profile.go
+++ b/integration-tests/testreporters/profile.go
@@ -54,7 +54,7 @@ func (c *ChainlinkProfileTestReporter) WriteReport(folderLocation string) error
}
// SendNotification hasn't been implemented for this test
-func (c *ChainlinkProfileTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client) error {
+func (c *ChainlinkProfileTestReporter) SendSlackNotification(_ *testing.T, _ *slack.Client) error {
log.Warn().Msg("No Slack notification integration for Chainlink profile tests")
return nil
}
diff --git a/integration-tests/testreporters/vrfv2plus.go b/integration-tests/testreporters/vrfv2plus.go
index 83d4678dfdd..38220ca8821 100644
--- a/integration-tests/testreporters/vrfv2plus.go
+++ b/integration-tests/testreporters/vrfv2plus.go
@@ -2,12 +2,13 @@ package testreporters
import (
"fmt"
- "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
"math/big"
"os"
"testing"
"time"
+ "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2plus/vrfv2plus_config"
+
"github.com/slack-go/slack"
"github.com/smartcontractkit/chainlink-testing-framework/testreporters"
@@ -53,7 +54,7 @@ func (o *VRFV2PlusTestReporter) SendSlackNotification(t *testing.T, slackClient
headerText = fmt.Sprintf(":x: VRF %s Test FAILED :x:", o.TestType)
}
- messageBlocks := testreporters.SlackNotifyBlocks(headerText, fmt.Sprintf("%s", os.Getenv("SELECTED_NETWORKS")), []string{
+ messageBlocks := testreporters.SlackNotifyBlocks(headerText, os.Getenv("SELECTED_NETWORKS"), []string{
fmt.Sprintf(
"Summary\n"+
"Perf Test Type: %s\n"+
diff --git a/integration-tests/testsetups/keeper_benchmark.go b/integration-tests/testsetups/keeper_benchmark.go
index f786cca9bb5..bb6c582c137 100644
--- a/integration-tests/testsetups/keeper_benchmark.go
+++ b/integration-tests/testsetups/keeper_benchmark.go
@@ -37,6 +37,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
"github.com/smartcontractkit/chainlink/integration-tests/testreporters"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
// KeeperBenchmarkTest builds a test to check that chainlink nodes are able to upkeep a specified amount of Upkeep
@@ -229,7 +230,7 @@ func (k *KeeperBenchmarkTest) Run() {
"NumberOfRegistries": len(k.keeperRegistries),
}
inputs := k.Inputs
- startingBlock, err := k.chainClient.LatestBlockNumber(context.Background())
+ startingBlock, err := k.chainClient.LatestBlockNumber(utils.TestContext(k.t))
require.NoError(k.t, err, "Error getting latest block number")
k.startingBlock = big.NewInt(0).SetUint64(startingBlock)
startTime := time.Now()
@@ -305,7 +306,7 @@ func (k *KeeperBenchmarkTest) Run() {
err = fmt.Errorf("initial error") // to ensure our for loop runs at least once
)
for err != nil { // This RPC call can possibly time out or otherwise die. Failure is not an option, keep retrying to get our stats.
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ ctx, cancel := context.WithTimeout(utils.TestContext(k.t), timeout)
logs, err = k.chainClient.FilterLogs(ctx, filterQuery)
cancel()
if err != nil {
@@ -407,12 +408,13 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() {
FromBlock: k.startingBlock,
}
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ ctx, cancel := context.WithTimeout(utils.TestContext(k.t), 5*time.Second)
sub, err := k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs)
cancel()
require.NoError(k.t, err, "Subscribing to upkeep performed events log shouldn't fail")
interruption := make(chan os.Signal, 1)
+ //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal
signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM)
go func() {
@@ -429,7 +431,7 @@ func (k *KeeperBenchmarkTest) observeUpkeepEvents() {
Str("Backoff", backoff.String()).
Msg("Error while subscribing to Keeper Event Logs. Resubscribing...")
- ctx, cancel := context.WithTimeout(context.Background(), backoff)
+ ctx, cancel := context.WithTimeout(utils.TestContext(k.t), backoff)
sub, err = k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs)
cancel()
if err != nil {
diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go
index ee8116f3f99..3fb9dd9844a 100644
--- a/integration-tests/testsetups/ocr.go
+++ b/integration-tests/testsetups/ocr.go
@@ -42,6 +42,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/config"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testreporters"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
const (
@@ -163,7 +164,7 @@ func (o *OCRSoakTest) DeployEnvironment(customChainlinkNetworkTOML string) {
}
// LoadEnvironment loads an existing test environment using the provided URLs
-func (o *OCRSoakTest) LoadEnvironment(chainlinkURLs []string, chainURL, mockServerURL string) {
+func (o *OCRSoakTest) LoadEnvironment(chainlinkURLs []string, mockServerURL string) {
var (
network = networks.MustGetSelectedNetworksFromEnv()[0]
err error
@@ -241,7 +242,6 @@ func (o *OCRSoakTest) Setup() {
o.Inputs.NumberOfContracts,
linkTokenContract,
contractDeployer,
- o.bootstrapNode,
o.workerNodes,
o.chainClient,
)
@@ -258,7 +258,7 @@ func (o *OCRSoakTest) Setup() {
// Run starts the OCR soak test
func (o *OCRSoakTest) Run() {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ ctx, cancel := context.WithTimeout(utils.TestContext(o.t), time.Second*5)
latestBlockNum, err := o.chainClient.LatestBlockNumber(ctx)
cancel()
require.NoError(o.t, err, "Error getting current block number")
@@ -343,7 +343,7 @@ func (o *OCRSoakTest) SaveState() error {
if err != nil {
return err
}
- // #nosec G306 - let everyone read
+ //nolint:gosec // G306 - let everyone read
if err = os.WriteFile(saveFileLocation, data, 0644); err != nil {
return err
}
@@ -468,6 +468,7 @@ func (o *OCRSoakTest) Interrupted() bool {
func (o *OCRSoakTest) testLoop(testDuration time.Duration, newValue int) {
endTest := time.After(testDuration)
interruption := make(chan os.Signal, 1)
+ //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal
signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM)
lastValue := 0
newRoundTrigger := time.NewTimer(0) // Want to trigger a new round ASAP
@@ -558,7 +559,7 @@ func (o *OCRSoakTest) setFilterQuery() {
// WARNING: Should only be used for observation and logging. This is not a reliable way to collect events.
func (o *OCRSoakTest) observeOCREvents() error {
eventLogs := make(chan types.Log)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ ctx, cancel := context.WithTimeout(utils.TestContext(o.t), 5*time.Second)
eventSub, err := o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs)
cancel()
if err != nil {
@@ -592,7 +593,7 @@ func (o *OCRSoakTest) observeOCREvents() error {
Str("Backoff", backoff.String()).
Interface("Query", o.filterQuery).
Msg("Error while subscribed to OCR Logs. Resubscribing")
- ctx, cancel = context.WithTimeout(context.Background(), backoff)
+ ctx, cancel = context.WithTimeout(utils.TestContext(o.t), backoff)
eventSub, err = o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs)
cancel()
if err != nil {
@@ -645,12 +646,12 @@ func (o *OCRSoakTest) collectEvents() error {
timeout := time.Second * 15
o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events")
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ ctx, cancel := context.WithTimeout(utils.TestContext(o.t), timeout)
contractEvents, err := o.chainClient.FilterLogs(ctx, o.filterQuery)
cancel()
for err != nil {
o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events")
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ ctx, cancel := context.WithTimeout(utils.TestContext(o.t), timeout)
contractEvents, err = o.chainClient.FilterLogs(ctx, o.filterQuery)
cancel()
if err != nil {
diff --git a/integration-tests/testsetups/vrfv2.go b/integration-tests/testsetups/vrfv2.go
index 194c7ff4e6c..8c5fde72168 100644
--- a/integration-tests/testsetups/vrfv2.go
+++ b/integration-tests/testsetups/vrfv2.go
@@ -22,6 +22,7 @@ import (
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/testreporters"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
)
// VRFV2SoakTest defines a typical VRFV2 soak test
@@ -87,7 +88,8 @@ func (v *VRFV2SoakTest) Run(t *testing.T) {
Msg("Starting VRFV2 Soak Test")
// set the requests to only run for a certain amount of time
- testContext, testCancel := context.WithTimeout(context.Background(), v.Inputs.TestDuration)
+ ctx := utils.TestContext(t)
+ testContext, testCancel := context.WithTimeout(ctx, v.Inputs.TestDuration)
defer testCancel()
v.NumberOfRandRequests = 0
@@ -126,7 +128,7 @@ func (v *VRFV2SoakTest) Run(t *testing.T) {
//todo - need to find better way for this
time.Sleep(1 * time.Minute)
- loadTestMetrics, err := v.Inputs.ConsumerContract.GetLoadTestMetrics(nil)
+ loadTestMetrics, err := v.Inputs.ConsumerContract.GetLoadTestMetrics(ctx)
if err != nil {
l.Error().Err(err).Msg("Error Occurred when getting Load Test Metrics from Consumer contract")
}
diff --git a/integration-tests/types/envcommon/common.go b/integration-tests/types/envcommon/common.go
index 607c481f33f..bdabcaf96b0 100644
--- a/integration-tests/types/envcommon/common.go
+++ b/integration-tests/types/envcommon/common.go
@@ -2,7 +2,7 @@ package envcommon
import (
"encoding/json"
- "io/ioutil"
+ "io"
"os"
)
@@ -12,7 +12,7 @@ func ParseJSONFile(path string, v any) error {
return err
}
defer jsonFile.Close()
- b, _ := ioutil.ReadAll(jsonFile)
+ b, _ := io.ReadAll(jsonFile)
err = json.Unmarshal(b, v)
if err != nil {
return err
diff --git a/integration-tests/universal/log_poller/config.go b/integration-tests/universal/log_poller/config.go
new file mode 100644
index 00000000000..78a0da46bc6
--- /dev/null
+++ b/integration-tests/universal/log_poller/config.go
@@ -0,0 +1,249 @@
+package logpoller
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "cosmossdk.io/errors"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/pelletier/go-toml/v2"
+ "github.com/rs/zerolog/log"
+
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
+)
+
+const (
+ DefaultConfigFilename = "config.toml"
+
+ ErrReadPerfConfig = "failed to read TOML config for performance tests"
+ ErrUnmarshalPerfConfig = "failed to unmarshal TOML config for performance tests"
+)
+
+type GeneratorType = string
+
+const (
+ GeneratorType_WASP = "wasp"
+ GeneratorType_Looped = "looped"
+)
+
+type Config struct {
+ General *General `toml:"general"`
+ ChaosConfig *ChaosConfig `toml:"chaos"`
+ Wasp *WaspConfig `toml:"wasp"`
+ LoopedConfig *LoopedConfig `toml:"looped"`
+}
+
+type LoopedConfig struct {
+ ContractConfig `toml:"contract"`
+ FuzzConfig `toml:"fuzz"`
+}
+
+type ContractConfig struct {
+ ExecutionCount int `toml:"execution_count"`
+}
+
+type FuzzConfig struct {
+ MinEmitWaitTimeMs int `toml:"min_emit_wait_time_ms"`
+ MaxEmitWaitTimeMs int `toml:"max_emit_wait_time_ms"`
+}
+
+type General struct {
+ Generator string `toml:"generator"`
+ EventsToEmit []abi.Event `toml:"-"`
+ Contracts int `toml:"contracts"`
+ EventsPerTx int `toml:"events_per_tx"`
+ UseFinalityTag bool `toml:"use_finality_tag"`
+}
+
+type ChaosConfig struct {
+ ExperimentCount int `toml:"experiment_count"`
+}
+
+type WaspConfig struct {
+ Load *Load `toml:"load"`
+}
+
+type Load struct {
+ RPS int64 `toml:"rps"`
+ LPS int64 `toml:"lps"`
+ RateLimitUnitDuration *models.Duration `toml:"rate_limit_unit_duration"`
+ Duration *models.Duration `toml:"duration"`
+ CallTimeout *models.Duration `toml:"call_timeout"`
+}
+
+func ReadConfig(configName string) (*Config, error) {
+ var cfg *Config
+ d, err := os.ReadFile(configName)
+ if err != nil {
+ return nil, errors.Wrap(err, ErrReadPerfConfig)
+ }
+ err = toml.Unmarshal(d, &cfg)
+ if err != nil {
+ return nil, errors.Wrap(err, ErrUnmarshalPerfConfig)
+ }
+
+ if err := cfg.validate(); err != nil {
+ return nil, err
+ }
+
+ log.Debug().Interface("Config", cfg).Msg("Parsed config")
+ return cfg, nil
+}
+
+func (c *Config) OverrideFromEnv() error {
+ if contr := os.Getenv("CONTRACTS"); contr != "" {
+ c.General.Contracts = mustParseInt(contr)
+ }
+
+ if eventsPerTx := os.Getenv("EVENTS_PER_TX"); eventsPerTx != "" {
+ c.General.EventsPerTx = mustParseInt(eventsPerTx)
+ }
+
+ if useFinalityTag := os.Getenv("USE_FINALITY_TAG"); useFinalityTag != "" {
+ c.General.UseFinalityTag = mustParseBool(useFinalityTag)
+ }
+
+ if duration := os.Getenv("LOAD_DURATION"); duration != "" {
+ d, err := models.ParseDuration(duration)
+ if err != nil {
+ return err
+ }
+
+ if c.General.Generator == GeneratorType_WASP {
+ c.Wasp.Load.Duration = &d
+ } else {
+ // this is completely arbitrary and practice shows that even with this values
+ // test executes much longer than specified, probably due to network latency
+ c.LoopedConfig.FuzzConfig.MinEmitWaitTimeMs = 400
+ c.LoopedConfig.FuzzConfig.MaxEmitWaitTimeMs = 600
+ // divide by 4 based on past runs, but we should do it in a better way
+ c.LoopedConfig.ContractConfig.ExecutionCount = int(d.Duration().Seconds() / 4)
+ }
+ }
+
+ return nil
+}
+
+func (c *Config) validate() error {
+ if c.General == nil {
+ return fmt.Errorf("General config is nil")
+ }
+
+ err := c.General.validate()
+ if err != nil {
+ return fmt.Errorf("General config validation failed: %w", err)
+ }
+
+ switch c.General.Generator {
+ case GeneratorType_WASP:
+ if c.Wasp == nil {
+ return fmt.Errorf("wasp config is nil")
+ }
+ if c.Wasp.Load == nil {
+ return fmt.Errorf("wasp load config is nil")
+ }
+
+ err = c.Wasp.validate()
+ if err != nil {
+ return fmt.Errorf("wasp config validation failed: %w", err)
+ }
+ case GeneratorType_Looped:
+ if c.LoopedConfig == nil {
+ return fmt.Errorf("looped config is nil")
+ }
+
+ err = c.LoopedConfig.validate()
+ if err != nil {
+ return fmt.Errorf("looped config validation failed: %w", err)
+ }
+ default:
+ return fmt.Errorf("unknown generator type: %s", c.General.Generator)
+ }
+
+ return nil
+}
+
+func (g *General) validate() error {
+ if g.Generator == "" {
+ return fmt.Errorf("generator is empty")
+ }
+
+ if g.Contracts == 0 {
+ return fmt.Errorf("contracts is 0, but must be > 0")
+ }
+
+ if g.EventsPerTx == 0 {
+ return fmt.Errorf("events_per_tx is 0, but must be > 0")
+ }
+
+ return nil
+}
+
+func (w *WaspConfig) validate() error {
+ if w.Load == nil {
+ return fmt.Errorf("Load config is nil")
+ }
+
+ err := w.Load.validate()
+ if err != nil {
+ return fmt.Errorf("Load config validation failed: %w", err)
+ }
+
+ return nil
+}
+
+func (l *Load) validate() error {
+ if l.RPS == 0 && l.LPS == 0 {
+ return fmt.Errorf("either RPS or LPS needs to be set")
+ }
+
+ if l.RPS != 0 && l.LPS != 0 {
+ return fmt.Errorf("only one of RPS or LPS can be set")
+ }
+
+ if l.Duration == nil {
+ return fmt.Errorf("duration is nil")
+ }
+
+ if l.CallTimeout == nil {
+ return fmt.Errorf("call_timeout is nil")
+ }
+ if l.RateLimitUnitDuration == nil {
+ return fmt.Errorf("rate_limit_unit_duration is nil")
+ }
+
+ return nil
+}
+
+func (l *LoopedConfig) validate() error {
+ if l.ExecutionCount == 0 {
+ return fmt.Errorf("execution_count is 0, but must be > 0")
+ }
+
+ if l.MinEmitWaitTimeMs == 0 {
+ return fmt.Errorf("min_emit_wait_time_ms is 0, but must be > 0")
+ }
+
+ if l.MaxEmitWaitTimeMs == 0 {
+ return fmt.Errorf("max_emit_wait_time_ms is 0, but must be > 0")
+ }
+
+ return nil
+}
+
+func mustParseInt(s string) int {
+ i, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseBool(s string) bool {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/integration-tests/universal/log_poller/gun.go b/integration-tests/universal/log_poller/gun.go
new file mode 100644
index 00000000000..39286f1b53e
--- /dev/null
+++ b/integration-tests/universal/log_poller/gun.go
@@ -0,0 +1,79 @@
+package logpoller
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/rs/zerolog"
+
+ "github.com/smartcontractkit/wasp"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts"
+)
+
+/* LogEmitterGun is a gun that constantly emits logs from a contract */
+type LogEmitterGun struct {
+ contract *contracts.LogEmitter
+ eventsToEmit []abi.Event
+ logger zerolog.Logger
+ eventsPerTx int
+}
+
+type Counter struct {
+ mu *sync.Mutex
+ value int
+}
+
+func NewLogEmitterGun(
+ contract *contracts.LogEmitter,
+ eventsToEmit []abi.Event,
+ eventsPerTx int,
+ logger zerolog.Logger,
+) *LogEmitterGun {
+ return &LogEmitterGun{
+ contract: contract,
+ eventsToEmit: eventsToEmit,
+ eventsPerTx: eventsPerTx,
+ logger: logger,
+ }
+}
+
+func (m *LogEmitterGun) Call(l *wasp.Generator) *wasp.CallResult {
+ localCounter := 0
+ logEmitter := (*m.contract)
+ address := logEmitter.Address()
+ for _, event := range m.eventsToEmit {
+ m.logger.Debug().Str("Emitter address", address.String()).Str("Event type", event.Name).Msg("Emitting log from emitter")
+ var err error
+ switch event.Name {
+ case "Log1":
+ _, err = logEmitter.EmitLogInts(getIntSlice(m.eventsPerTx))
+ case "Log2":
+ _, err = logEmitter.EmitLogIntsIndexed(getIntSlice(m.eventsPerTx))
+ case "Log3":
+ _, err = logEmitter.EmitLogStrings(getStringSlice(m.eventsPerTx))
+ default:
+ err = fmt.Errorf("unknown event name: %s", event.Name)
+ }
+
+ if err != nil {
+ return &wasp.CallResult{Error: err.Error(), Failed: true}
+ }
+ localCounter++
+ }
+
+ // I don't think that will work as expected, I should atomically read the value and save it, so maybe just a mutex?
+ if counter, ok := l.InputSharedData().(*Counter); ok {
+ counter.mu.Lock()
+ defer counter.mu.Unlock()
+ counter.value += localCounter
+ } else {
+ return &wasp.CallResult{
+ Error: "SharedData did not contain a Counter",
+ Failed: true,
+ }
+ }
+
+ return &wasp.CallResult{}
+}
diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go
new file mode 100644
index 00000000000..08ceb4a7be4
--- /dev/null
+++ b/integration-tests/universal/log_poller/helpers.go
@@ -0,0 +1,1121 @@
+package logpoller
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ geth "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ geth_types "github.com/ethereum/go-ethereum/core/types"
+ "github.com/jmoiron/sqlx"
+ "github.com/rs/zerolog"
+ "github.com/scylladb/go-reflectx"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/wasp"
+
+ "github.com/smartcontractkit/chainlink-testing-framework/blockchain"
+ ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env"
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink-testing-framework/networks"
+
+ evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ cltypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_utils_2_1"
+ le "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/log_emitter"
+ core_logger "github.com/smartcontractkit/chainlink/v2/core/logger"
+ "github.com/smartcontractkit/chainlink/v2/core/services/pg"
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/actions"
+ "github.com/smartcontractkit/chainlink/integration-tests/client"
+
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
+ "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
+ "github.com/smartcontractkit/chainlink/integration-tests/types/config/node"
+
+ it_utils "github.com/smartcontractkit/chainlink/integration-tests/utils"
+)
+
+var (
+ EmitterABI, _ = abi.JSON(strings.NewReader(le.LogEmitterABI))
+ automationUtilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI)
+ bytes0 = [32]byte{
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ } // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000
+
+)
+
+var registerSingleTopicFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topic common.Hash) error {
+ logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{
+ ContractAddress: emitterAddress,
+ FilterSelector: 0,
+ Topic0: topic,
+ Topic1: bytes0,
+ Topic2: bytes0,
+ Topic3: bytes0,
+ }
+ encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct)
+ if err != nil {
+ return err
+ }
+
+ err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Currently Unused November 8, 2023, Might be useful in the near future so keeping it here for now
+// this is not really possible, log trigger doesn't support multiple topics, even if log poller does
+// var registerMultipleTopicsFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topics []abi.Event) error {
+// if len(topics) > 4 {
+// return errors.New("Cannot register more than 4 topics")
+// }
+
+// var getTopic = func(topics []abi.Event, i int) common.Hash {
+// if i > len(topics)-1 {
+// return bytes0
+// }
+
+// return topics[i].ID
+// }
+
+// var getFilterSelector = func(topics []abi.Event) (uint8, error) {
+// switch len(topics) {
+// case 0:
+// return 0, errors.New("Cannot register filter with 0 topics")
+// case 1:
+// return 0, nil
+// case 2:
+// return 1, nil
+// case 3:
+// return 3, nil
+// case 4:
+// return 7, nil
+// default:
+// return 0, errors.New("Cannot register filter with more than 4 topics")
+// }
+// }
+
+// filterSelector, err := getFilterSelector(topics)
+// if err != nil {
+// return err
+// }
+
+// logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{
+// ContractAddress: emitterAddress,
+// FilterSelector: filterSelector,
+// Topic0: getTopic(topics, 0),
+// Topic1: getTopic(topics, 1),
+// Topic2: getTopic(topics, 2),
+// Topic3: getTopic(topics, 3),
+// }
+// encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct)
+// if err != nil {
+// return err
+// }
+
+// err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig)
+// if err != nil {
+// return err
+// }
+
+// return nil
+// }
+
+func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (*logpoller.DbORM, *sqlx.DB, error) {
+ dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", "127.0.0.1", postgresDb.ExternalPort, postgresDb.User, postgresDb.Password, postgresDb.DbName)
+ db, err := sqlx.Open("postgres", dsn)
+ if err != nil {
+ return nil, db, err
+ }
+
+ db.MapperFunc(reflectx.CamelToSnakeASCII)
+ return logpoller.NewORM(chainID, db, logger, pg.NewQConfig(false)), db, nil
+}
+
+type ExpectedFilter struct {
+ emitterAddress common.Address
+ topic common.Hash
+}
+
+func getExpectedFilters(logEmitters []*contracts.LogEmitter, cfg *Config) []ExpectedFilter {
+ expectedFilters := make([]ExpectedFilter, 0)
+ for _, emitter := range logEmitters {
+ for _, event := range cfg.General.EventsToEmit {
+ expectedFilters = append(expectedFilters, ExpectedFilter{
+ emitterAddress: (*emitter).Address(),
+ topic: event.ID,
+ })
+ }
+ }
+
+ return expectedFilters
+}
+
+var nodeHasExpectedFilters = func(expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, error) {
+ orm, db, err := NewOrm(logger, chainID, postgresDb)
+ if err != nil {
+ return false, err
+ }
+
+ defer db.Close()
+ knownFilters, err := orm.LoadFilters()
+ if err != nil {
+ return false, err
+ }
+
+ for _, expectedFilter := range expectedFilters {
+ filterFound := false
+ for _, knownFilter := range knownFilters {
+ if bytes.Equal(expectedFilter.emitterAddress.Bytes(), knownFilter.Addresses[0].Bytes()) && bytes.Equal(expectedFilter.topic.Bytes(), knownFilter.EventSigs[0].Bytes()) {
+ filterFound = true
+ break
+ }
+ }
+
+ if !filterFound {
+ return false, fmt.Errorf("no filter found for emitter %s and topic %s", expectedFilter.emitterAddress.String(), expectedFilter.topic.Hex())
+ }
+ }
+
+ return true, nil
+}
+
+var randomWait = func(minMilliseconds, maxMilliseconds int) {
+ rand.New(rand.NewSource(time.Now().UnixNano()))
+ randomMilliseconds := rand.Intn(maxMilliseconds-minMilliseconds+1) + minMilliseconds
+ time.Sleep(time.Duration(randomMilliseconds) * time.Millisecond)
+}
+
+type LogEmitterChannel struct {
+ logsEmitted int
+ err error
+ // unused
+ // currentIndex int
+}
+
+func getIntSlice(length int) []int {
+ result := make([]int, length)
+ for i := 0; i < length; i++ {
+ result[i] = i
+ }
+
+ return result
+}
+
+func getStringSlice(length int) []string {
+ result := make([]string, length)
+ for i := 0; i < length; i++ {
+ result[i] = "amazing event"
+ }
+
+ return result
+}
+
+var emitEvents = func(ctx context.Context, l zerolog.Logger, logEmitter *contracts.LogEmitter, cfg *Config, wg *sync.WaitGroup, results chan LogEmitterChannel) {
+ address := (*logEmitter).Address().String()
+ localCounter := 0
+ select {
+ case <-ctx.Done():
+ l.Warn().Str("Emitter address", address).Msg("Context cancelled, not emitting events")
+ return
+ default:
+ defer wg.Done()
+ for i := 0; i < cfg.LoopedConfig.ExecutionCount; i++ {
+ for _, event := range cfg.General.EventsToEmit {
+ l.Debug().Str("Emitter address", address).Str("Event type", event.Name).Str("index", fmt.Sprintf("%d/%d", (i+1), cfg.LoopedConfig.ExecutionCount)).Msg("Emitting log from emitter")
+ var err error
+ switch event.Name {
+ case "Log1":
+ _, err = (*logEmitter).EmitLogInts(getIntSlice(cfg.General.EventsPerTx))
+ case "Log2":
+ _, err = (*logEmitter).EmitLogIntsIndexed(getIntSlice(cfg.General.EventsPerTx))
+ case "Log3":
+ _, err = (*logEmitter).EmitLogStrings(getStringSlice(cfg.General.EventsPerTx))
+ default:
+ err = fmt.Errorf("unknown event name: %s", event.Name)
+ }
+
+ if err != nil {
+ results <- LogEmitterChannel{
+ logsEmitted: 0,
+ err: err,
+ }
+ return
+ }
+ localCounter += cfg.General.EventsPerTx
+
+ randomWait(cfg.LoopedConfig.FuzzConfig.MinEmitWaitTimeMs, cfg.LoopedConfig.FuzzConfig.MaxEmitWaitTimeMs)
+ }
+
+ if (i+1)%10 == 0 {
+ l.Info().Str("Emitter address", address).Str("Index", fmt.Sprintf("%d/%d", i+1, cfg.LoopedConfig.ExecutionCount)).Msg("Emitted all three events")
+ }
+ }
+
+ l.Info().Str("Emitter address", address).Int("Total logs emitted", localCounter).Msg("Finished emitting events")
+
+ results <- LogEmitterChannel{
+ logsEmitted: localCounter,
+ err: nil,
+ }
+ }
+}
+
+var chainHasFinalisedEndBlock = func(l zerolog.Logger, evmClient blockchain.EVMClient, endBlock int64) (bool, error) {
+ effectiveEndBlock := endBlock + 1
+ lastFinalisedBlockHeader, err := evmClient.GetLatestFinalizedBlockHeader(context.Background())
+ if err != nil {
+ return false, err
+ }
+
+ l.Info().Int64("Last finalised block header", lastFinalisedBlockHeader.Number.Int64()).Int64("End block", effectiveEndBlock).Int64("Blocks left till end block", effectiveEndBlock-lastFinalisedBlockHeader.Number.Int64()).Msg("Waiting for the finalized block to move beyond end block")
+
+ return lastFinalisedBlockHeader.Number.Int64() > effectiveEndBlock, nil
+}
+
+var logPollerHasFinalisedEndBlock = func(endBlock int64, chainID *big.Int, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) {
+ wg := &sync.WaitGroup{}
+
+ type boolQueryResult struct {
+ nodeName string
+ hasFinalised bool
+ err error
+ }
+
+ endBlockCh := make(chan boolQueryResult, len(nodes.Nodes)-1)
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+ for i := 1; i < len(nodes.Nodes); i++ {
+ wg.Add(1)
+
+ go func(clNode *test_env.ClNode, r chan boolQueryResult) {
+ defer wg.Done()
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb)
+ if err != nil {
+ r <- boolQueryResult{
+ nodeName: clNode.ContainerName,
+ hasFinalised: false,
+ err: err,
+ }
+ }
+
+ defer db.Close()
+
+ latestBlock, err := orm.SelectLatestBlock()
+ if err != nil {
+ r <- boolQueryResult{
+ nodeName: clNode.ContainerName,
+ hasFinalised: false,
+ err: err,
+ }
+ }
+
+ r <- boolQueryResult{
+ nodeName: clNode.ContainerName,
+ hasFinalised: latestBlock.FinalizedBlockNumber > endBlock,
+ err: nil,
+ }
+
+ }
+ }(nodes.Nodes[i], endBlockCh)
+ }
+
+ var err error
+ allFinalisedCh := make(chan bool, 1)
+
+ go func() {
+ foundMap := make(map[string]bool, 0)
+ for r := range endBlockCh {
+ if r.err != nil {
+ err = r.err
+ cancelFn()
+ return
+ }
+
+ foundMap[r.nodeName] = r.hasFinalised
+ if r.hasFinalised {
+ l.Info().Str("Node name", r.nodeName).Msg("CL node has finalised end block")
+ } else {
+ l.Warn().Str("Node name", r.nodeName).Msg("CL node has not finalised end block yet")
+ }
+
+ if len(foundMap) == len(nodes.Nodes)-1 {
+ allFinalised := true
+ for _, v := range foundMap {
+ if !v {
+ allFinalised = false
+ break
+ }
+ }
+
+ allFinalisedCh <- allFinalised
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+ close(endBlockCh)
+
+ return <-allFinalisedCh, err
+}
+
+var clNodesHaveExpectedLogCount = func(startBlock, endBlock int64, chainID *big.Int, expectedLogCount int, expectedFilters []ExpectedFilter, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) {
+ wg := &sync.WaitGroup{}
+
+ type logQueryResult struct {
+ nodeName string
+ logCount int
+ hasExpectedCount bool
+ err error
+ }
+
+ queryCh := make(chan logQueryResult, len(nodes.Nodes)-1)
+ ctx, cancelFn := context.WithCancel(context.Background())
+
+ for i := 1; i < len(nodes.Nodes); i++ {
+ wg.Add(1)
+
+ go func(clNode *test_env.ClNode, r chan logQueryResult) {
+ defer wg.Done()
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb)
+ if err != nil {
+ r <- logQueryResult{
+ nodeName: clNode.ContainerName,
+ logCount: 0,
+ hasExpectedCount: false,
+ err: err,
+ }
+ }
+
+ defer db.Close()
+ foundLogsCount := 0
+
+ for _, filter := range expectedFilters {
+ logs, err := orm.SelectLogs(startBlock, endBlock, filter.emitterAddress, filter.topic)
+ if err != nil {
+ r <- logQueryResult{
+ nodeName: clNode.ContainerName,
+ logCount: 0,
+ hasExpectedCount: false,
+ err: err,
+ }
+ }
+
+ foundLogsCount += len(logs)
+ }
+
+ r <- logQueryResult{
+ nodeName: clNode.ContainerName,
+ logCount: foundLogsCount,
+ hasExpectedCount: foundLogsCount >= expectedLogCount,
+ err: err,
+ }
+ }
+ }(nodes.Nodes[i], queryCh)
+ }
+
+ var err error
+ allFoundCh := make(chan bool, 1)
+
+ go func() {
+ foundMap := make(map[string]bool, 0)
+ for r := range queryCh {
+ if r.err != nil {
+ err = r.err
+ cancelFn()
+ return
+ }
+
+ foundMap[r.nodeName] = r.hasExpectedCount
+ if r.hasExpectedCount {
+ l.Info().Str("Node name", r.nodeName).Int("Logs count", r.logCount).Msg("Expected log count found in CL node")
+ } else {
+ l.Warn().Str("Node name", r.nodeName).Str("Found/Expected logs", fmt.Sprintf("%d/%d", r.logCount, expectedLogCount)).Int("Missing logs", expectedLogCount-r.logCount).Msg("Too low log count found in CL node")
+ }
+
+ if len(foundMap) == len(nodes.Nodes)-1 {
+ allFound := true
+ for _, v := range foundMap {
+ if !v {
+ allFound = false
+ break
+ }
+ }
+
+ allFoundCh <- allFound
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+ close(queryCh)
+
+ return <-allFoundCh, err
+}
+
+type MissingLogs map[string][]geth_types.Log
+
+func (m *MissingLogs) IsEmpty() bool {
+ for _, v := range *m {
+ if len(v) > 0 {
+ return false
+ }
+ }
+
+ return true
+}
+
+var getMissingLogs = func(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, clnodeCluster *test_env.ClCluster, l zerolog.Logger, coreLogger core_logger.SugaredLogger, cfg *Config) (MissingLogs, error) {
+ wg := &sync.WaitGroup{}
+
+ type dbQueryResult struct {
+ err error
+ nodeName string
+ logs []logpoller.Log
+ }
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ resultCh := make(chan dbQueryResult, len(clnodeCluster.Nodes)-1)
+
+ for i := 1; i < len(clnodeCluster.Nodes); i++ {
+ wg.Add(1)
+
+ go func(ctx context.Context, i int, r chan dbQueryResult) {
+ defer wg.Done()
+ select {
+ case <-ctx.Done():
+ l.Warn().Msg("Context cancelled. Terminating fetching logs from log poller's DB")
+ return
+ default:
+ nodeName := clnodeCluster.Nodes[i].ContainerName
+
+ l.Info().Str("Node name", nodeName).Msg("Fetching log poller logs")
+ orm, db, err := NewOrm(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb)
+ if err != nil {
+ r <- dbQueryResult{
+ err: err,
+ nodeName: nodeName,
+ logs: []logpoller.Log{},
+ }
+ }
+
+ defer db.Close()
+ logs := make([]logpoller.Log, 0)
+
+ for j := 0; j < len(logEmitters); j++ {
+ address := (*logEmitters[j]).Address()
+
+ for _, event := range cfg.General.EventsToEmit {
+ l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching single emitter's logs")
+ result, err := orm.SelectLogs(startBlock, endBlock, address, event.ID)
+ if err != nil {
+ r <- dbQueryResult{
+ err: err,
+ nodeName: nodeName,
+ logs: []logpoller.Log{},
+ }
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].BlockNumber < result[j].BlockNumber
+ })
+
+ logs = append(logs, result...)
+
+ l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(result)).Msg("Logs found per node")
+ }
+ }
+
+ l.Warn().Int("Count", len(logs)).Str("Node name", nodeName).Msg("Fetched log poller logs")
+
+ r <- dbQueryResult{
+ err: nil,
+ nodeName: nodeName,
+ logs: logs,
+ }
+ }
+ }(ctx, i, resultCh)
+ }
+
+ allLogPollerLogs := make(map[string][]logpoller.Log, 0)
+ missingLogs := map[string][]geth_types.Log{}
+ var dbError error
+
+ go func() {
+ for r := range resultCh {
+ if r.err != nil {
+ l.Err(r.err).Str("Node name", r.nodeName).Msg("Error fetching logs from log poller's DB")
+ dbError = r.err
+ cancelFn()
+ return
+ }
+ // use channel for aggregation and then for := range over it after closing resultCh?
+ allLogPollerLogs[r.nodeName] = r.logs
+ }
+ }()
+
+ wg.Wait()
+ close(resultCh)
+
+ if dbError != nil {
+ return nil, dbError
+ }
+
+ allLogsInEVMNode, err := getEVMLogs(startBlock, endBlock, logEmitters, evmClient, l, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ wg = &sync.WaitGroup{}
+
+ type missingLogResult struct {
+ nodeName string
+ logs []geth_types.Log
+ }
+
+ l.Info().Msg("Started comparison of logs from EVM node and CL nodes. This may take a while if there's a lot of logs")
+ missingCh := make(chan missingLogResult, len(clnodeCluster.Nodes)-1)
+ evmLogCount := len(allLogsInEVMNode)
+ for i := 1; i < len(clnodeCluster.Nodes); i++ {
+ wg.Add(1)
+
+ go func(i int, result chan missingLogResult) {
+ defer wg.Done()
+ nodeName := clnodeCluster.Nodes[i].ContainerName
+ l.Info().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("0/%d", evmLogCount)).Msg("Comparing single CL node's logs with EVM logs")
+
+ missingLogs := make([]geth_types.Log, 0)
+ for i, evmLog := range allLogsInEVMNode {
+ logFound := false
+ for _, logPollerLog := range allLogPollerLogs[nodeName] {
+ if logPollerLog.BlockNumber == int64(evmLog.BlockNumber) && logPollerLog.TxHash == evmLog.TxHash && bytes.Equal(logPollerLog.Data, evmLog.Data) && logPollerLog.LogIndex == int64(evmLog.Index) &&
+ logPollerLog.Address == evmLog.Address && logPollerLog.BlockHash == evmLog.BlockHash && bytes.Equal(logPollerLog.Topics[0][:], evmLog.Topics[0].Bytes()) {
+ logFound = true
+ continue
+ }
+ }
+
+ if i%10000 == 0 && i != 0 {
+ l.Info().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("%d/%d", i, evmLogCount)).Msg("Comparing single CL node's logs with EVM logs")
+ }
+
+ if !logFound {
+ missingLogs = append(missingLogs, evmLog)
+ }
+ }
+
+ if len(missingLogs) > 0 {
+ l.Warn().Int("Count", len(missingLogs)).Str("Node name", nodeName).Msg("Some EMV logs were missing from CL node")
+ } else {
+ l.Info().Str("Node name", nodeName).Msg("All EVM logs were found in CL node")
+ }
+
+ result <- missingLogResult{
+ nodeName: nodeName,
+ logs: missingLogs,
+ }
+ }(i, missingCh)
+ }
+
+ wg.Wait()
+ close(missingCh)
+
+ for v := range missingCh {
+ if len(v.logs) > 0 {
+ missingLogs[v.nodeName] = v.logs
+ }
+ }
+
+ expectedTotalLogsEmitted := getExpectedLogCount(cfg)
+ if int64(len(allLogsInEVMNode)) != expectedTotalLogsEmitted {
+ l.Warn().Str("Actual/Expected", fmt.Sprintf("%d/%d", expectedTotalLogsEmitted, len(allLogsInEVMNode))).Msg("Some of the test logs were not found in EVM node. This is a bug in the test")
+ }
+
+ return missingLogs, nil
+}
+
+var printMissingLogsByType = func(missingLogs map[string][]geth_types.Log, l zerolog.Logger, cfg *Config) {
+ var findHumanName = func(topic common.Hash) string {
+ for _, event := range cfg.General.EventsToEmit {
+ if event.ID == topic {
+ return event.Name
+ }
+ }
+
+ return "Unknown event"
+ }
+
+ missingByType := make(map[string]int)
+ for _, logs := range missingLogs {
+ for _, v := range logs {
+ humanName := findHumanName(v.Topics[0])
+ missingByType[humanName]++
+ }
+ }
+
+ for k, v := range missingByType {
+ l.Warn().Str("Event name", k).Int("Missing count", v).Msg("Missing logs by type")
+ }
+}
+
+var getEVMLogs = func(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *Config) ([]geth_types.Log, error) {
+ allLogsInEVMNode := make([]geth_types.Log, 0)
+ for j := 0; j < len(logEmitters); j++ {
+ address := (*logEmitters[j]).Address()
+ for _, event := range cfg.General.EventsToEmit {
+ l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching logs from EVM node")
+ logsInEVMNode, err := evmClient.FilterLogs(context.Background(), geth.FilterQuery{
+ Addresses: []common.Address{(address)},
+ Topics: [][]common.Hash{{event.ID}},
+ FromBlock: big.NewInt(startBlock),
+ ToBlock: big.NewInt(endBlock),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Slice(logsInEVMNode, func(i, j int) bool {
+ return logsInEVMNode[i].BlockNumber < logsInEVMNode[j].BlockNumber
+ })
+
+ allLogsInEVMNode = append(allLogsInEVMNode, logsInEVMNode...)
+ l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(logsInEVMNode)).Msg("Logs found in EVM node")
+ }
+ }
+
+ l.Warn().Int("Count", len(allLogsInEVMNode)).Msg("Logs in EVM node")
+
+ return allLogsInEVMNode, nil
+}
+
+func executeGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) {
+ if cfg.General.Generator == GeneratorType_WASP {
+ return runWaspGenerator(t, cfg, logEmitters)
+ }
+
+ return runLoopedGenerator(t, cfg, logEmitters)
+}
+
+func runWaspGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) {
+ l := logging.GetTestLogger(t)
+
+ var RPSprime int64
+
+ // if LPS is set, we need to calculate based on countract count and events per transaction
+ if cfg.Wasp.Load.LPS > 0 {
+ RPSprime = cfg.Wasp.Load.LPS / int64(cfg.General.Contracts) / int64(cfg.General.EventsPerTx) / int64(len(cfg.General.EventsToEmit))
+
+ if RPSprime < 1 {
+ return 0, fmt.Errorf("invalid load configuration, effective RPS would have been zero. Adjust LPS, contracts count, events per tx or events to emit")
+ }
+ }
+
+ // if RPS is set simply split it between contracts
+ if cfg.Wasp.Load.RPS > 0 {
+ RPSprime = cfg.Wasp.Load.RPS / int64(cfg.General.Contracts)
+ }
+
+ counter := &Counter{
+ mu: &sync.Mutex{},
+ value: 0,
+ }
+
+ p := wasp.NewProfile()
+
+ for _, logEmitter := range logEmitters {
+ g, err := wasp.NewGenerator(&wasp.Config{
+ T: t,
+ LoadType: wasp.RPS,
+ GenName: fmt.Sprintf("log_poller_gen_%s", (*logEmitter).Address().String()),
+ RateLimitUnitDuration: cfg.Wasp.Load.RateLimitUnitDuration.Duration(),
+ CallTimeout: cfg.Wasp.Load.CallTimeout.Duration(),
+ Schedule: wasp.Plain(
+ RPSprime,
+ cfg.Wasp.Load.Duration.Duration(),
+ ),
+ Gun: NewLogEmitterGun(
+ logEmitter,
+ cfg.General.EventsToEmit,
+ cfg.General.EventsPerTx,
+ l,
+ ),
+ SharedData: counter,
+ })
+ p.Add(g, err)
+ }
+
+ _, err := p.Run(true)
+
+ if err != nil {
+ return 0, err
+ }
+
+ return counter.value, nil
+}
+
+func runLoopedGenerator(t *testing.T, cfg *Config, logEmitters []*contracts.LogEmitter) (int, error) {
+ l := logging.GetTestLogger(t)
+
+ // Start emitting events in parallel, each contract is emitting events in a separate goroutine
+ // We will stop as soon as we encounter an error
+ wg := &sync.WaitGroup{}
+ emitterCh := make(chan LogEmitterChannel, len(logEmitters))
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ defer cancelFn()
+
+ for i := 0; i < len(logEmitters); i++ {
+ wg.Add(1)
+ go emitEvents(ctx, l, logEmitters[i], cfg, wg, emitterCh)
+ }
+
+ var emitErr error
+ total := 0
+
+ aggrChan := make(chan int, len(logEmitters))
+
+ go func() {
+ for emitter := range emitterCh {
+ if emitter.err != nil {
+ emitErr = emitter.err
+ cancelFn()
+ return
+ }
+ aggrChan <- emitter.logsEmitted
+ }
+ }()
+
+ wg.Wait()
+ close(emitterCh)
+
+ for i := 0; i < len(logEmitters); i++ {
+ total += <-aggrChan
+ }
+
+ if emitErr != nil {
+ return 0, emitErr
+ }
+
+ return int(total), nil
+}
+
+func getExpectedLogCount(cfg *Config) int64 {
+ if cfg.General.Generator == GeneratorType_WASP {
+ if cfg.Wasp.Load.RPS != 0 {
+ return cfg.Wasp.Load.RPS * int64(cfg.Wasp.Load.Duration.Duration().Seconds()) * int64(cfg.General.EventsPerTx)
+ }
+ return cfg.Wasp.Load.LPS * int64(cfg.Wasp.Load.Duration.Duration().Seconds())
+ }
+
+ return int64(len(cfg.General.EventsToEmit) * cfg.LoopedConfig.ExecutionCount * cfg.General.Contracts * cfg.General.EventsPerTx)
+}
+
+var chaosPauseSyncFn = func(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv) error {
+ rand.New(rand.NewSource(time.Now().UnixNano()))
+ randomBool := rand.Intn(2) == 0
+
+ randomNode := testEnv.ClCluster.Nodes[rand.Intn(len(testEnv.ClCluster.Nodes)-1)+1]
+ var component ctf_test_env.EnvComponent
+
+ if randomBool {
+ component = randomNode.EnvComponent
+ } else {
+ component = randomNode.PostgresDb.EnvComponent
+ }
+
+ pauseTimeSec := rand.Intn(20-5) + 5
+ l.Info().Str("Container", component.ContainerName).Int("Pause time", pauseTimeSec).Msg("Pausing component")
+ pauseTimeDur := time.Duration(pauseTimeSec) * time.Second
+ err := component.ChaosPause(l, pauseTimeDur)
+ l.Info().Str("Container", component.ContainerName).Msg("Component unpaused")
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+var executeChaosExperiment = func(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, cfg *Config, errorCh chan error) {
+ if cfg.ChaosConfig == nil || cfg.ChaosConfig.ExperimentCount == 0 {
+ errorCh <- nil
+ return
+ }
+
+ chaosChan := make(chan error, cfg.ChaosConfig.ExperimentCount)
+
+ wg := &sync.WaitGroup{}
+
+ go func() {
+ // if we wanted to have more than 1 container paused, we'd need to make sure we aren't trying to pause an already paused one
+ guardChan := make(chan struct{}, 1)
+
+ for i := 0; i < cfg.ChaosConfig.ExperimentCount; i++ {
+ i := i
+ wg.Add(1)
+ guardChan <- struct{}{}
+ go func() {
+ defer func() {
+ <-guardChan
+ wg.Done()
+ l.Info().Str("Current/Total", fmt.Sprintf("%d/%d", i, cfg.ChaosConfig.ExperimentCount)).Msg("Done with experiment")
+ }()
+ chaosChan <- chaosPauseSyncFn(l, testEnv)
+ }()
+ }
+
+ wg.Wait()
+
+ close(chaosChan)
+ }()
+
+ go func() {
+ for err := range chaosChan {
+ // This will receive errors until chaosChan is closed
+ if err != nil {
+ // If an error is encountered, log it, send it to the error channel, and return from the function
+ l.Err(err).Msg("Error encountered during chaos experiment")
+ errorCh <- err
+ return // Return on actual error
+ }
+ // No need for an else block here, because if err is nil (which happens when the channel is closed),
+ // the loop will exit and the following log and nil send will execute.
+ }
+
+ // After the loop exits, which it will do when chaosChan is closed, log that all experiments are finished.
+ l.Info().Msg("All chaos experiments finished")
+ errorCh <- nil // Only send nil once, after all errors have been handled and the channel is closed
+ }()
+}
+
+var GetFinalityDepth = func(chainId int64) (int64, error) {
+ var finalityDepth int64
+ switch chainId {
+ // Ethereum Sepolia
+ case 11155111:
+ finalityDepth = 50
+ // Polygon Mumbai
+ case 80001:
+ finalityDepth = 500
+ // Simulated network
+ case 1337:
+ finalityDepth = 10
+ default:
+ return 0, fmt.Errorf("no known finality depth for chain %d", chainId)
+ }
+
+ return finalityDepth, nil
+}
+
+var GetEndBlockToWaitFor = func(endBlock, chainId int64, cfg *Config) (int64, error) {
+ if cfg.General.UseFinalityTag {
+ return endBlock + 1, nil
+ }
+
+ finalityDepth, err := GetFinalityDepth(chainId)
+ if err != nil {
+ return 0, err
+ }
+
+ return endBlock + finalityDepth, nil
+}
+
+const (
+ automationDefaultUpkeepGasLimit = uint32(2500000)
+ automationDefaultLinkFunds = int64(9e18)
+ automationDefaultUpkeepsToDeploy = 10
+ automationExpectedData = "abcdef"
+ defaultAmountOfUpkeeps = 2
+)
+
+var (
+ defaultOCRRegistryConfig = contracts.KeeperRegistrySettings{
+ PaymentPremiumPPB: uint32(200000000),
+ FlatFeeMicroLINK: uint32(0),
+ BlockCountPerTurn: big.NewInt(10),
+ CheckGasLimit: uint32(2500000),
+ StalenessSeconds: big.NewInt(90000),
+ GasCeilingMultiplier: uint16(1),
+ MinUpkeepSpend: big.NewInt(0),
+ MaxPerformGas: uint32(5000000),
+ FallbackGasPrice: big.NewInt(2e11),
+ FallbackLinkPrice: big.NewInt(2e18),
+ MaxCheckDataSize: uint32(5000),
+ MaxPerformDataSize: uint32(5000),
+ }
+
+ automationDefaultRegistryConfig = contracts.KeeperRegistrySettings{
+ PaymentPremiumPPB: uint32(200000000),
+ FlatFeeMicroLINK: uint32(0),
+ BlockCountPerTurn: big.NewInt(10),
+ CheckGasLimit: uint32(2500000),
+ StalenessSeconds: big.NewInt(90000),
+ GasCeilingMultiplier: uint16(1),
+ MinUpkeepSpend: big.NewInt(0),
+ MaxPerformGas: uint32(5000000),
+ FallbackGasPrice: big.NewInt(2e11),
+ FallbackLinkPrice: big.NewInt(2e18),
+ MaxCheckDataSize: uint32(5000),
+ MaxPerformDataSize: uint32(5000),
+ }
+)
+
+func setupLogPollerTestDocker(
+ t *testing.T,
+ registryVersion ethereum.KeeperRegistryVersion,
+ registryConfig contracts.KeeperRegistrySettings,
+ upkeepsNeeded int,
+ lpPollingInterval time.Duration,
+ finalityTagEnabled bool,
+) (
+ blockchain.EVMClient,
+ []*client.ChainlinkClient,
+ contracts.ContractDeployer,
+ contracts.LinkToken,
+ contracts.KeeperRegistry,
+ contracts.KeeperRegistrar,
+ *test_env.CLClusterTestEnv,
+) {
+ l := logging.GetTestLogger(t)
+ // Add registry version to config
+ registryConfig.RegistryVersion = registryVersion
+ network := networks.MustGetSelectedNetworksFromEnv()[0]
+
+ finalityDepth, err := GetFinalityDepth(network.ChainID)
+ require.NoError(t, err, "Error getting finality depth")
+
+ // build the node config
+ clNodeConfig := node.NewConfig(node.NewBaseConfig())
+ syncInterval := models.MustMakeDuration(5 * time.Minute)
+ clNodeConfig.Feature.LogPoller = it_utils.Ptr[bool](true)
+ clNodeConfig.OCR2.Enabled = it_utils.Ptr[bool](true)
+ clNodeConfig.Keeper.TurnLookBack = it_utils.Ptr[int64](int64(0))
+ clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval
+ clNodeConfig.Keeper.Registry.PerformGasOverhead = it_utils.Ptr[uint32](uint32(150000))
+ clNodeConfig.P2P.V2.Enabled = it_utils.Ptr[bool](true)
+ clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"}
+ clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"}
+
+ //launch the environment
+ var env *test_env.CLClusterTestEnv
+ chainlinkNodeFunding := 0.5
+ l.Debug().Msgf("Funding amount: %f", chainlinkNodeFunding)
+ clNodesCount := 5
+
+ var logPolllerSettingsFn = func(chain *evmcfg.Chain) *evmcfg.Chain {
+ chain.LogPollInterval = models.MustNewDuration(lpPollingInterval)
+ chain.FinalityDepth = it_utils.Ptr[uint32](uint32(finalityDepth))
+ chain.FinalityTagEnabled = it_utils.Ptr[bool](finalityTagEnabled)
+ return chain
+ }
+
+ var evmClientSettingsFn = func(network *blockchain.EVMNetwork) *blockchain.EVMNetwork {
+ network.FinalityDepth = uint64(finalityDepth)
+ network.FinalityTag = finalityTagEnabled
+ return network
+ }
+
+ ethBuilder := ctf_test_env.NewEthereumNetworkBuilder()
+ cfg, err := ethBuilder.
+ WithConsensusType(ctf_test_env.ConsensusType_PoS).
+ WithConsensusLayer(ctf_test_env.ConsensusLayer_Prysm).
+ WithExecutionLayer(ctf_test_env.ExecutionLayer_Geth).
+ WithBeaconChainConfig(ctf_test_env.BeaconChainConfig{
+ SecondsPerSlot: 8,
+ SlotsPerEpoch: 2,
+ }).
+ Build()
+ require.NoError(t, err, "Error building ethereum network config")
+
+ env, err = test_env.NewCLTestEnvBuilder().
+ WithTestLogger(t).
+ WithPrivateEthereumNetwork(cfg).
+ WithCLNodes(clNodesCount).
+ WithCLNodeConfig(clNodeConfig).
+ WithFunding(big.NewFloat(chainlinkNodeFunding)).
+ WithChainOptions(logPolllerSettingsFn).
+ EVMClientNetworkOptions(evmClientSettingsFn).
+ WithStandardCleanup().
+ Build()
+ require.NoError(t, err, "Error deploying test environment")
+
+ env.ParallelTransactions(true)
+ nodeClients := env.ClCluster.NodeAPIs()
+ workerNodes := nodeClients[1:]
+
+ var linkToken contracts.LinkToken
+
+ switch network.ChainID {
+ // Simulated
+ case 1337:
+ linkToken, err = env.ContractDeployer.DeployLinkTokenContract()
+ // Ethereum Sepolia
+ case 11155111:
+ linkToken, err = env.ContractLoader.LoadLINKToken("0x779877A7B0D9E8603169DdbD7836e478b4624789")
+ // Polygon Mumbai
+ case 80001:
+ linkToken, err = env.ContractLoader.LoadLINKToken("0x326C977E6efc84E512bB9C30f76E30c160eD06FB")
+ default:
+ panic("Not implemented")
+ }
+ require.NoError(t, err, "Error loading/deploying LINK token")
+
+ linkBalance, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(linkToken.Address()))
+ require.NoError(t, err, "Error getting LINK balance")
+
+ l.Info().Str("Balance", big.NewInt(0).Div(linkBalance, big.NewInt(1e18)).String()).Msg("LINK balance")
+ minLinkBalanceSingleNode := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(9))
+ minLinkBalance := big.NewInt(0).Mul(minLinkBalanceSingleNode, big.NewInt(int64(upkeepsNeeded)))
+ if minLinkBalance.Cmp(linkBalance) < 0 {
+ require.FailNowf(t, "Not enough LINK", "Not enough LINK to run the test. Need at least %s", big.NewInt(0).Div(minLinkBalance, big.NewInt(1e18)).String())
+ }
+
+ registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar(
+ t,
+ registryVersion,
+ registryConfig,
+ linkToken,
+ env.ContractDeployer,
+ env.EVMClient,
+ )
+
+ // Fund the registry with LINK
+ err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(defaultAmountOfUpkeeps))))
+ require.NoError(t, err, "Funding keeper registry contract shouldn't fail")
+
+ err = actions.CreateOCRKeeperJobsLocal(l, nodeClients, registry.Address(), network.ChainID, 0, registryVersion)
+ require.NoError(t, err, "Error creating OCR Keeper Jobs")
+ ocrConfig, err := actions.BuildAutoOCR2ConfigVarsLocal(l, workerNodes, registryConfig, registrar.Address(), 30*time.Second, registry.RegistryOwnerAddress())
+ require.NoError(t, err, "Error building OCR config vars")
+ err = registry.SetConfig(automationDefaultRegistryConfig, ocrConfig)
+ require.NoError(t, err, "Registry config should be set successfully")
+ require.NoError(t, env.EVMClient.WaitForEvents(), "Waiting for config to be set")
+
+ return env.EVMClient, nodeClients, env.ContractDeployer, linkToken, registry, registrar, env
+}
diff --git a/integration-tests/universal/log_poller/scenarios.go b/integration-tests/universal/log_poller/scenarios.go
new file mode 100644
index 00000000000..886547d46e1
--- /dev/null
+++ b/integration-tests/universal/log_poller/scenarios.go
@@ -0,0 +1,496 @@
+package logpoller
+
+import (
+ "fmt"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/onsi/gomega"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink-testing-framework/logging"
+ "github.com/smartcontractkit/chainlink/integration-tests/actions"
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts"
+ "github.com/smartcontractkit/chainlink/integration-tests/contracts/ethereum"
+ "github.com/smartcontractkit/chainlink/integration-tests/utils"
+ core_logger "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+func ExecuteBasicLogPollerTest(t *testing.T, cfg *Config) {
+ l := logging.GetTestLogger(t)
+ coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯
+
+ if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 {
+ l.Warn().Msg("No events to emit specified, using all events from log emitter contract")
+ for _, event := range EmitterABI.Events {
+ cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event)
+ }
+ }
+
+ l.Info().Msg("Starting basic log poller test")
+
+ var (
+ err error
+ upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit)
+ )
+
+ chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker(
+ t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(500*time.Millisecond), cfg.General.UseFinalityTag,
+ )
+
+ _, upkeepIDs := actions.DeployConsumers(
+ t,
+ registry,
+ registrar,
+ linkToken,
+ contractDeployer,
+ chainClient,
+ upKeepsNeeded,
+ big.NewInt(automationDefaultLinkFunds),
+ automationDefaultUpkeepGasLimit,
+ true,
+ false,
+ )
+
+ // Deploy Log Emitter contracts
+ logEmitters := make([]*contracts.LogEmitter, 0)
+ for i := 0; i < cfg.General.Contracts; i++ {
+ logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract()
+ logEmitters = append(logEmitters, &logEmitter)
+ require.NoError(t, err, "Error deploying log emitter contract")
+ l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed")
+ time.Sleep(200 * time.Millisecond)
+ }
+
+ // Register log triggered upkeep for each combination of log emitter contract and event signature (topic)
+ // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does)
+ for i := 0; i < len(upkeepIDs); i++ {
+ emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address()
+ upkeepID := upkeepIDs[i]
+ topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID
+
+ l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter")
+ err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId)
+ randomWait(50, 200)
+ require.NoError(t, err, "Error registering log trigger for log emitter")
+ }
+
+ err = chainClient.WaitForEvents()
+ require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps")
+
+ // Make sure that all nodes have expected filters registered before starting to emit events
+ expectedFilters := getExpectedFilters(logEmitters, cfg)
+ gom := gomega.NewGomegaWithT(t)
+ gom.Eventually(func(g gomega.Gomega) {
+ for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
+ nodeName := testEnv.ClCluster.Nodes[i].ContainerName
+ l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")
+
+ hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
+ return
+ }
+
+ g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
+ }
+ }, "30s", "1s").Should(gomega.Succeed())
+ l.Info().Msg("All nodes have expected filters registered")
+ l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")
+
+ // Save block number before starting to emit events, so that we can later use it when querying logs
+ sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+ startBlock := int64(sb)
+
+ l.Info().Msg("STARTING EVENT EMISSION")
+ startTime := time.Now()
+
+ // Start chaos experimnents by randomly pausing random containers (Chainlink nodes or their DBs)
+ chaosDoneCh := make(chan error, 1)
+ go func() {
+ executeChaosExperiment(l, testEnv, cfg, chaosDoneCh)
+ }()
+
+ totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters)
+ endTime := time.Now()
+ require.NoError(t, err, "Error executing event generator")
+
+ expectedLogsEmitted := getExpectedLogCount(cfg)
+ duration := int(endTime.Sub(startTime).Seconds())
+ l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION")
+
+ // Save block number after finishing to emit events, so that we can later use it when querying logs
+ eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+
+ endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg)
+ require.NoError(t, err, "Error getting end block to wait for")
+
+ l.Info().Msg("Waiting before proceeding with test until all chaos experiments finish")
+ chaosError := <-chaosDoneCh
+ require.NoError(t, chaosError, "Error encountered during chaos experiment")
+
+ // Wait until last block in which events were emitted has been finalised
+ // how long should we wait here until all logs are processed? wait for block X to be processed by all nodes?
+ waitDuration := "15m"
+ l.Warn().Str("Duration", waitDuration).Msg("Waiting for logs to be processed by all nodes and for chain to advance beyond finality")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ hasAdvanced, err := chainHasFinalisedEndBlock(l, testEnv.EVMClient, endBlock)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if chain has advanced beyond finality. Retrying...")
+ }
+ g.Expect(hasAdvanced).To(gomega.BeTrue(), "Chain has not advanced beyond finality")
+ }, waitDuration, "30s").Should(gomega.Succeed())
+
+ l.Warn().Str("Duration", "1m").Msg("Waiting for all CL nodes to have end block finalised")
+ gom.Eventually(func(g gomega.Gomega) {
+ hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...")
+ }
+ g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block")
+ }, "1m", "30s").Should(gomega.Succeed())
+
+ gom.Eventually(func(g gomega.Gomega) {
+ logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...")
+ }
+ g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count")
+ }, waitDuration, "5s").Should(gomega.Succeed())
+
+ // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has
+ logConsistencyWaitDuration := "1m"
+ l.Warn().Str("Duration", logConsistencyWaitDuration).Msg("Waiting for CL nodes to have all the logs that EVM node has")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error getting missing logs. Retrying...")
+ }
+
+ if !missingLogs.IsEmpty() {
+ printMissingLogsByType(missingLogs, l, cfg)
+ }
+ g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs")
+ }, logConsistencyWaitDuration, "5s").Should(gomega.Succeed())
+}
+
+func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string) {
+ l := logging.GetTestLogger(t)
+ coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯
+
+ if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 {
+ l.Warn().Msg("No events to emit specified, using all events from log emitter contract")
+ for _, event := range EmitterABI.Events {
+ cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event)
+ }
+ }
+
+ l.Info().Msg("Starting replay log poller test")
+
+ var (
+ err error
+ upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit)
+ )
+
+ // we set blockBackfillDepth to 0, to make sure nothing will be backfilled and won't interfere with our test
+ chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker(
+ t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(1000*time.Millisecond), cfg.General.UseFinalityTag)
+
+ _, upkeepIDs := actions.DeployConsumers(
+ t,
+ registry,
+ registrar,
+ linkToken,
+ contractDeployer,
+ chainClient,
+ upKeepsNeeded,
+ big.NewInt(automationDefaultLinkFunds),
+ automationDefaultUpkeepGasLimit,
+ true,
+ false,
+ )
+
+ // Deploy Log Emitter contracts
+ logEmitters := make([]*contracts.LogEmitter, 0)
+ for i := 0; i < cfg.General.Contracts; i++ {
+ logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract()
+ logEmitters = append(logEmitters, &logEmitter)
+ require.NoError(t, err, "Error deploying log emitter contract")
+ l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed")
+ time.Sleep(200 * time.Millisecond)
+ }
+
+ //wait for contracts to be uploaded to chain, TODO: could make this wait fluent
+ time.Sleep(5 * time.Second)
+
+ // Save block number before starting to emit events, so that we can later use it when querying logs
+ sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+ startBlock := int64(sb)
+
+ l.Info().Msg("STARTING EVENT EMISSION")
+ startTime := time.Now()
+ totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters)
+ endTime := time.Now()
+ require.NoError(t, err, "Error executing event generator")
+ expectedLogsEmitted := getExpectedLogCount(cfg)
+ duration := int(endTime.Sub(startTime).Seconds())
+ l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION")
+
+ // Save block number after finishing to emit events, so that we can later use it when querying logs
+ eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+
+ endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg)
+ require.NoError(t, err, "Error getting end block to wait for")
+
+ // Lets make sure no logs are in DB yet
+ expectedFilters := getExpectedFilters(logEmitters, cfg)
+ logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), 0, expectedFilters, l, coreLogger, testEnv.ClCluster)
+ require.NoError(t, err, "Error checking if CL nodes have expected log count")
+ require.True(t, logCountMatches, "Some CL nodes already had logs in DB")
+ l.Info().Msg("No logs were saved by CL nodes yet, as expected. Proceeding.")
+
+ // Register log triggered upkeep for each combination of log emitter contract and event signature (topic)
+ // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does)
+ for i := 0; i < len(upkeepIDs); i++ {
+ emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address()
+ upkeepID := upkeepIDs[i]
+ topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID
+
+ l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter")
+ err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId)
+ require.NoError(t, err, "Error registering log trigger for log emitter")
+ }
+
+ err = chainClient.WaitForEvents()
+ require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps")
+
+ // Make sure that all nodes have expected filters registered before starting to emit events
+ gom := gomega.NewGomegaWithT(t)
+ gom.Eventually(func(g gomega.Gomega) {
+ for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
+ nodeName := testEnv.ClCluster.Nodes[i].ContainerName
+ l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")
+
+ hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
+ return
+ }
+
+ g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
+ }
+ }, "30s", "1s").Should(gomega.Succeed())
+ l.Info().Msg("All nodes have expected filters registered")
+ l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")
+
+ l.Warn().Str("Duration", "1m").Msg("Waiting for all CL nodes to have end block finalised")
+ gom.Eventually(func(g gomega.Gomega) {
+ hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...")
+ }
+ g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block")
+ }, "1m", "30s").Should(gomega.Succeed())
+
+ // Trigger replay
+ l.Info().Msg("Triggering log poller's replay")
+ for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
+ nodeName := testEnv.ClCluster.Nodes[i].ContainerName
+ response, _, err := testEnv.ClCluster.Nodes[i].API.ReplayLogPollerFromBlock(startBlock, testEnv.EVMClient.GetChainID().Int64())
+ require.NoError(t, err, "Error triggering log poller's replay on node %s", nodeName)
+ require.Equal(t, "Replay started", response.Data.Attributes.Message, "Unexpected response message from log poller's replay")
+ }
+
+ l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for replay logs to be processed by all nodes")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...")
+ }
+ g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count")
+ }, consistencyTimeout, "30s").Should(gomega.Succeed())
+
+ // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has
+ l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for CL nodes to have all the logs that EVM node has")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error getting missing logs. Retrying...")
+ }
+
+ if !missingLogs.IsEmpty() {
+ printMissingLogsByType(missingLogs, l, cfg)
+ }
+ g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs")
+ }, consistencyTimeout, "10s").Should(gomega.Succeed())
+}
+
+type FinalityBlockFn = func(chainId int64, endBlock int64) (int64, error)
+
+func ExecuteCILogPollerTest(t *testing.T, cfg *Config) {
+ l := logging.GetTestLogger(t)
+ coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯
+
+ if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 {
+ l.Warn().Msg("No events to emit specified, using all events from log emitter contract")
+ for _, event := range EmitterABI.Events {
+ cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event)
+ }
+ }
+
+ l.Info().Msg("Starting CI log poller test")
+
+ var (
+ err error
+ upKeepsNeeded = cfg.General.Contracts * len(cfg.General.EventsToEmit)
+ )
+
+ chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := setupLogPollerTestDocker(
+ t, ethereum.RegistryVersion_2_1, defaultOCRRegistryConfig, upKeepsNeeded, time.Duration(1000*time.Millisecond), cfg.General.UseFinalityTag,
+ )
+
+ _, upkeepIDs := actions.DeployConsumers(
+ t,
+ registry,
+ registrar,
+ linkToken,
+ contractDeployer,
+ chainClient,
+ upKeepsNeeded,
+ big.NewInt(automationDefaultLinkFunds),
+ automationDefaultUpkeepGasLimit,
+ true,
+ false,
+ )
+
+ // Deploy Log Emitter contracts
+ logEmitters := make([]*contracts.LogEmitter, 0)
+ for i := 0; i < cfg.General.Contracts; i++ {
+ logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract()
+ logEmitters = append(logEmitters, &logEmitter)
+ require.NoError(t, err, "Error deploying log emitter contract")
+ l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed")
+ time.Sleep(200 * time.Millisecond)
+ }
+
+ // Register log triggered upkeep for each combination of log emitter contract and event signature (topic)
+ // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does)
+ for i := 0; i < len(upkeepIDs); i++ {
+ emitterAddress := (*logEmitters[i%cfg.General.Contracts]).Address()
+ upkeepID := upkeepIDs[i]
+ topicId := cfg.General.EventsToEmit[i%len(cfg.General.EventsToEmit)].ID
+
+ l.Info().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter")
+ err = registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId)
+ randomWait(50, 200)
+ require.NoError(t, err, "Error registering log trigger for log emitter")
+ }
+
+ err = chainClient.WaitForEvents()
+ require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps")
+
+ // Make sure that all nodes have expected filters registered before starting to emit events
+ expectedFilters := getExpectedFilters(logEmitters, cfg)
+ gom := gomega.NewGomegaWithT(t)
+ gom.Eventually(func(g gomega.Gomega) {
+ for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
+ nodeName := testEnv.ClCluster.Nodes[i].ContainerName
+ l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")
+
+ hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
+ return
+ }
+
+ g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
+ }
+ }, "1m", "1s").Should(gomega.Succeed())
+ l.Info().Msg("All nodes have expected filters registered")
+ l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")
+
+ // Save block number before starting to emit events, so that we can later use it when querying logs
+ sb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+ startBlock := int64(sb)
+
+ l.Info().Msg("STARTING EVENT EMISSION")
+ startTime := time.Now()
+
+ // Start chaos experimnents by randomly pausing random containers (Chainlink nodes or their DBs)
+ chaosDoneCh := make(chan error, 1)
+ go func() {
+ executeChaosExperiment(l, testEnv, cfg, chaosDoneCh)
+ }()
+
+ totalLogsEmitted, err := executeGenerator(t, cfg, logEmitters)
+ endTime := time.Now()
+ require.NoError(t, err, "Error executing event generator")
+
+ expectedLogsEmitted := getExpectedLogCount(cfg)
+ duration := int(endTime.Sub(startTime).Seconds())
+ l.Info().Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION")
+
+ // Save block number after finishing to emit events, so that we can later use it when querying logs
+ eb, err := testEnv.EVMClient.LatestBlockNumber(utils.TestContext(t))
+ require.NoError(t, err, "Error getting latest block number")
+
+ endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg)
+ require.NoError(t, err, "Error getting end block to wait for")
+
+ l.Info().Msg("Waiting before proceeding with test until all chaos experiments finish")
+ chaosError := <-chaosDoneCh
+ require.NoError(t, chaosError, "Error encountered during chaos experiment")
+
+ // Wait until last block in which events were emitted has been finalised (with buffer)
+ waitDuration := "45m"
+ l.Warn().Str("Duration", waitDuration).Msg("Waiting for chain to advance beyond finality")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ hasAdvanced, err := chainHasFinalisedEndBlock(l, testEnv.EVMClient, endBlock)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if chain has advanced beyond finality. Retrying...")
+ }
+ g.Expect(hasAdvanced).To(gomega.BeTrue(), "Chain has not advanced beyond finality")
+ }, waitDuration, "30s").Should(gomega.Succeed())
+
+ l.Warn().Str("Duration", waitDuration).Msg("Waiting for all CL nodes to have end block finalised")
+ gom.Eventually(func(g gomega.Gomega) {
+ hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...")
+ }
+ g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block")
+ }, waitDuration, "30s").Should(gomega.Succeed())
+
+ // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has
+ logConsistencyWaitDuration := "10m"
+ l.Warn().Str("Duration", logConsistencyWaitDuration).Msg("Waiting for CL nodes to have all the logs that EVM node has")
+
+ gom.Eventually(func(g gomega.Gomega) {
+ missingLogs, err := getMissingLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, testEnv.ClCluster, l, coreLogger, cfg)
+ if err != nil {
+ l.Warn().Err(err).Msg("Error getting missing logs. Retrying...")
+ }
+
+ if !missingLogs.IsEmpty() {
+ printMissingLogsByType(missingLogs, l, cfg)
+ }
+ g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs")
+ }, logConsistencyWaitDuration, "20s").Should(gomega.Succeed())
+
+ evmLogs, _ := getEVMLogs(startBlock, endBlock, logEmitters, testEnv.EVMClient, l, cfg)
+
+ if totalLogsEmitted != len(evmLogs) {
+ l.Warn().Int("Total logs emitted", totalLogsEmitted).Int("Total logs in EVM", len(evmLogs)).Msg("Test passed, but total logs emitted does not match total logs in EVM")
+ }
+}
diff --git a/integration-tests/utils/cl_node_jobs.go b/integration-tests/utils/cl_node_jobs.go
index 16b0c167cfe..65dc6e4e392 100644
--- a/integration-tests/utils/cl_node_jobs.go
+++ b/integration-tests/utils/cl_node_jobs.go
@@ -10,13 +10,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/lib/pq"
+ "gopkg.in/guregu/null.v4"
+
coreClient "github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
- "gopkg.in/guregu/null.v4"
)
-func BuildBootstrapSpec(verifierAddr common.Address, chainID int64, fromBlock uint64, feedId [32]byte) *coreClient.OCR2TaskJobSpec {
+func BuildBootstrapSpec(verifierAddr common.Address, chainID int64, feedId [32]byte) *coreClient.OCR2TaskJobSpec {
hash := common.BytesToHash(feedId[:])
return &coreClient.OCR2TaskJobSpec{
Name: fmt.Sprintf("bootstrap-%s", uuid.NewString()),
diff --git a/integration-tests/utils/common.go b/integration-tests/utils/common.go
index c8243097a7d..5ef3209c920 100644
--- a/integration-tests/utils/common.go
+++ b/integration-tests/utils/common.go
@@ -1,7 +1,10 @@
package utils
import (
+ "context"
+ "math/big"
"net"
+ "testing"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
)
@@ -23,3 +26,29 @@ func MustIP(s string) *net.IP {
}
return &ip
}
+
+func BigIntSliceContains(slice []*big.Int, b *big.Int) bool {
+ for _, a := range slice {
+ if b.Cmp(a) == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// TestContext returns a context with the test's deadline, if available.
+func TestContext(tb testing.TB) context.Context {
+ ctx := context.Background()
+ var cancel func()
+ switch t := tb.(type) {
+ case *testing.T:
+ if d, ok := t.Deadline(); ok {
+ ctx, cancel = context.WithDeadline(ctx, d)
+ }
+ }
+ if cancel == nil {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+ tb.Cleanup(cancel)
+ return ctx
+}
diff --git a/integration-tests/utils/log.go b/integration-tests/utils/log.go
deleted file mode 100644
index 499be8002d4..00000000000
--- a/integration-tests/utils/log.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package utils
-
-import (
- "github.com/rs/zerolog"
- "github.com/rs/zerolog/log"
- "os"
-)
-
-func SetupCoreDockerEnvLogger() {
- lvlStr := os.Getenv("CORE_DOCKER_ENV_LOG_LEVEL")
- if lvlStr == "" {
- lvlStr = "info"
- }
- lvl, err := zerolog.ParseLevel(lvlStr)
- if err != nil {
- panic(err)
- }
- log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).Level(lvl)
-}
diff --git a/integration-tests/utils/templates/secrets.go b/integration-tests/utils/templates/secrets.go
index f81287e871f..45edf0d0127 100644
--- a/integration-tests/utils/templates/secrets.go
+++ b/integration-tests/utils/templates/secrets.go
@@ -2,6 +2,7 @@ package templates
import (
"github.com/google/uuid"
+
"github.com/smartcontractkit/chainlink-testing-framework/utils/templates"
)
diff --git a/operator_ui/TAG b/operator_ui/TAG
index e08ca072670..3b63cc3addb 100644
--- a/operator_ui/TAG
+++ b/operator_ui/TAG
@@ -1 +1 @@
-v0.8.0-e10948a
+v0.8.0-2f868c3
diff --git a/operator_ui/check.sh b/operator_ui/check.sh
index 614afd4b07f..9e738218088 100755
--- a/operator_ui/check.sh
+++ b/operator_ui/check.sh
@@ -26,12 +26,13 @@ else
echo "$latest_tag" >"$tag_file"
echo "Tag updated $current_tag -> $latest_tag"
if [ "$CI" ]; then
- echo "current_tag=$current_tag" >> $GITHUB_OUTPUT
- echo "latest_tag=$latest_tag" >> $GITHUB_OUTPUT
- # See https://github.com/peter-evans/create-pull-request/blob/main/docs/examples.md#setting-the-pull-request-body-from-a-file
- body="${body//'%'/'%25'}"
- body="${body//$'\n'/'%0A'}"
- body="${body//$'\r'/'%0D'}"
- echo "body=$body" >> $GITHUB_OUTPUT
+ echo "current_tag=$current_tag" >>$GITHUB_OUTPUT
+ echo "latest_tag=$latest_tag" >>$GITHUB_OUTPUT
+
+ # See https://github.com/orgs/community/discussions/26288#discussioncomment-3876281
+ delimiter="$(openssl rand -hex 8)"
+ echo "body<<${delimiter}" >>"${GITHUB_OUTPUT}"
+ echo "$body" >>"${GITHUB_OUTPUT}"
+ echo "${delimiter}" >>"${GITHUB_OUTPUT}"
fi
fi
diff --git a/plugins/chainlink.Dockerfile b/plugins/chainlink.Dockerfile
index 001ee30bf74..f07fab48122 100644
--- a/plugins/chainlink.Dockerfile
+++ b/plugins/chainlink.Dockerfile
@@ -19,6 +19,9 @@ RUN make install-chainlink
# Build LOOP Plugins
RUN make install-median
+# Install medianpoc binary
+RUN make install-medianpoc
+
RUN go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-solana | xargs -I % ln -s % /chainlink-solana
RUN mkdir /chainlink-starknet
RUN go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-starknet/relayer | xargs -I % ln -s % /chainlink-starknet/relayer
@@ -49,6 +52,7 @@ RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get clean all
COPY --from=buildgo /go/bin/chainlink /usr/local/bin/
+COPY --from=buildgo /go/bin/chainlink-medianpoc /usr/local/bin/
COPY --from=buildgo /go/bin/chainlink-median /usr/local/bin/
ENV CL_MEDIAN_CMD chainlink-median
diff --git a/plugins/cmd/chainlink-medianpoc/main.go b/plugins/cmd/chainlink-medianpoc/main.go
new file mode 100644
index 00000000000..325de6538fa
--- /dev/null
+++ b/plugins/cmd/chainlink-medianpoc/main.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "github.com/hashicorp/go-plugin"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop"
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop/reportingplugins"
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/plugins/medianpoc"
+)
+
+const (
+ loggerName = "PluginMedianPoc"
+)
+
+func main() {
+ s := loop.MustNewStartedServer(loggerName)
+ defer s.Stop()
+
+ p := medianpoc.NewPlugin(s.Logger)
+ defer s.Logger.ErrorIfFn(p.Close, "Failed to close")
+
+ s.MustRegister(p)
+
+ stop := make(chan struct{})
+ defer close(stop)
+
+ plugin.Serve(&plugin.ServeConfig{
+ HandshakeConfig: reportingplugins.ReportingPluginHandshakeConfig(),
+ Plugins: map[string]plugin.Plugin{
+ reportingplugins.PluginServiceName: &reportingplugins.GRPCService[types.MedianProvider]{
+ PluginServer: p,
+ BrokerConfig: loop.BrokerConfig{
+ Logger: s.Logger,
+ StopCh: stop,
+ GRPCOpts: s.GRPCOpts,
+ },
+ },
+ },
+ GRPCServer: s.GRPCOpts.NewServer,
+ })
+}
diff --git a/plugins/medianpoc/data_source.go b/plugins/medianpoc/data_source.go
new file mode 100644
index 00000000000..7b20f1e5eb3
--- /dev/null
+++ b/plugins/medianpoc/data_source.go
@@ -0,0 +1,79 @@
+package medianpoc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+ "time"
+
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/logger"
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/bridges"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+type DataSource struct {
+ pipelineRunner types.PipelineRunnerService
+ spec string
+ lggr logger.Logger
+
+ current bridges.BridgeMetaData
+ mu sync.RWMutex
+}
+
+func (d *DataSource) Observe(ctx context.Context, reportTimestamp ocrtypes.ReportTimestamp) (*big.Int, error) {
+ md, err := bridges.MarshalBridgeMetaData(d.currentAnswer())
+ if err != nil {
+ d.lggr.Warnw("unable to attach metadata for run", "err", err)
+ }
+
+ // NOTE: job metadata is automatically attached by the pipeline runner service
+ vars := types.Vars{
+ Vars: map[string]interface{}{
+ "jobRun": md,
+ },
+ }
+
+ results, err := d.pipelineRunner.ExecuteRun(ctx, d.spec, vars, types.Options{})
+ if err != nil {
+ return nil, err
+ }
+
+ finalResults := results.FinalResults()
+ if len(finalResults) == 0 {
+ return nil, errors.New("pipeline execution failed: not enough results")
+ }
+
+ finalResult := finalResults[0]
+ if finalResult.Error != nil {
+ return nil, fmt.Errorf("pipeline execution failed: %w", finalResult.Error)
+ }
+
+ asDecimal, err := utils.ToDecimal(finalResult.Value)
+ if err != nil {
+ return nil, errors.New("cannot convert observation to decimal")
+ }
+
+ resultAsBigInt := asDecimal.BigInt()
+ d.updateAnswer(resultAsBigInt)
+ return resultAsBigInt, nil
+}
+
+func (d *DataSource) currentAnswer() (*big.Int, *big.Int) {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+ return d.current.LatestAnswer, d.current.UpdatedAt
+}
+
+func (d *DataSource) updateAnswer(latestAnswer *big.Int) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.current = bridges.BridgeMetaData{
+ LatestAnswer: latestAnswer,
+ UpdatedAt: big.NewInt(time.Now().Unix()),
+ }
+}
diff --git a/plugins/medianpoc/data_source_test.go b/plugins/medianpoc/data_source_test.go
new file mode 100644
index 00000000000..e9a7945cee4
--- /dev/null
+++ b/plugins/medianpoc/data_source_test.go
@@ -0,0 +1,115 @@
+package medianpoc
+
+import (
+ "context"
+ "errors"
+ "math/big"
+ "testing"
+
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+)
+
+type mockPipelineRunner struct {
+ results types.TaskResults
+ err error
+ spec string
+ vars types.Vars
+ options types.Options
+}
+
+func (m *mockPipelineRunner) ExecuteRun(ctx context.Context, spec string, vars types.Vars, options types.Options) (types.TaskResults, error) {
+ m.spec = spec
+ m.vars = vars
+ m.options = options
+ return m.results, m.err
+}
+
+func TestDataSource(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ expect := int64(3)
+ pr := &mockPipelineRunner{
+ results: types.TaskResults{
+ {
+ TaskValue: types.TaskValue{
+ Value: expect,
+ Error: nil,
+ IsTerminal: true,
+ },
+ Index: 2,
+ },
+ {
+ TaskValue: types.TaskValue{
+ Value: int(4),
+ Error: nil,
+ IsTerminal: false,
+ },
+ Index: 1,
+ },
+ },
+ }
+ spec := "SPEC"
+ ds := &DataSource{
+ pipelineRunner: pr,
+ spec: spec,
+ lggr: lggr,
+ }
+ res, err := ds.Observe(context.Background(), ocrtypes.ReportTimestamp{})
+ require.NoError(t, err)
+ assert.Equal(t, big.NewInt(expect), res)
+ assert.Equal(t, spec, pr.spec)
+ assert.Equal(t, big.NewInt(expect), ds.current.LatestAnswer)
+}
+
+func TestDataSource_ResultErrors(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ pr := &mockPipelineRunner{
+ results: types.TaskResults{
+ {
+ TaskValue: types.TaskValue{
+ Error: errors.New("something went wrong"),
+ IsTerminal: true,
+ },
+ Index: 0,
+ },
+ },
+ }
+ spec := "SPEC"
+ ds := &DataSource{
+ pipelineRunner: pr,
+ spec: spec,
+ lggr: lggr,
+ }
+ _, err := ds.Observe(context.Background(), ocrtypes.ReportTimestamp{})
+ assert.ErrorContains(t, err, "something went wrong")
+}
+
+func TestDataSource_ResultNotAnInt(t *testing.T) {
+ lggr := logger.TestLogger(t)
+
+ expect := "string-result"
+ pr := &mockPipelineRunner{
+ results: types.TaskResults{
+ {
+ TaskValue: types.TaskValue{
+ Value: expect,
+ IsTerminal: true,
+ },
+ Index: 0,
+ },
+ },
+ }
+ spec := "SPEC"
+ ds := &DataSource{
+ pipelineRunner: pr,
+ spec: spec,
+ lggr: lggr,
+ }
+ _, err := ds.Observe(context.Background(), ocrtypes.ReportTimestamp{})
+ assert.ErrorContains(t, err, "cannot convert observation to decimal")
+}
diff --git a/plugins/medianpoc/plugin.go b/plugins/medianpoc/plugin.go
new file mode 100644
index 00000000000..ceea1eb84f5
--- /dev/null
+++ b/plugins/medianpoc/plugin.go
@@ -0,0 +1,126 @@
+package medianpoc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
+
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/logger"
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop"
+ "github.com/smartcontractkit/chainlink-relay/pkg/loop/reportingplugins"
+ "github.com/smartcontractkit/chainlink-relay/pkg/services"
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/utils"
+)
+
+func NewPlugin(lggr logger.Logger) *Plugin {
+ return &Plugin{
+ Plugin: loop.Plugin{Logger: lggr},
+ MedianProviderServer: reportingplugins.MedianProviderServer{},
+ stop: make(utils.StopChan),
+ }
+}
+
+type Plugin struct {
+ loop.Plugin
+ stop utils.StopChan
+ reportingplugins.MedianProviderServer
+}
+
+type jsonConfig struct {
+ Pipelines map[string]string `json:"pipelines"`
+}
+
+func (j jsonConfig) defaultPipeline() (string, error) {
+ return j.getPipeline("__DEFAULT_PIPELINE__")
+}
+
+func (j jsonConfig) getPipeline(key string) (string, error) {
+ v, ok := j.Pipelines[key]
+ if ok {
+ return v, nil
+ }
+ return "", fmt.Errorf("no pipeline found for %s", key)
+}
+
+func (p *Plugin) NewReportingPluginFactory(
+ ctx context.Context,
+ config types.ReportingPluginServiceConfig,
+ provider types.MedianProvider,
+ pipelineRunner types.PipelineRunnerService,
+ telemetry types.TelemetryClient,
+ errorLog types.ErrorLog,
+) (types.ReportingPluginFactory, error) {
+ f, err := p.newFactory(ctx, config, provider, pipelineRunner, telemetry, errorLog)
+ if err != nil {
+ return nil, err
+ }
+ s := &reportingPluginFactoryService{lggr: p.Logger, ReportingPluginFactory: f}
+ p.SubService(s)
+ return s, nil
+}
+
+func (p *Plugin) newFactory(ctx context.Context, config types.ReportingPluginServiceConfig, provider types.MedianProvider, pipelineRunner types.PipelineRunnerService, telemetry types.TelemetryClient, errorLog types.ErrorLog) (*median.NumericalMedianFactory, error) {
+ jc := &jsonConfig{}
+ err := json.Unmarshal([]byte(config.PluginConfig), jc)
+ if err != nil {
+ return nil, err
+ }
+
+ dp, err := jc.defaultPipeline()
+ if err != nil {
+ return nil, err
+ }
+ ds := &DataSource{
+ pipelineRunner: pipelineRunner,
+ spec: dp,
+ lggr: p.Logger,
+ }
+
+ jfp, err := jc.getPipeline("juelsPerFeeCoinPipeline")
+ if err != nil {
+ return nil, err
+ }
+ jds := &DataSource{
+ pipelineRunner: pipelineRunner,
+ spec: jfp,
+ lggr: p.Logger,
+ }
+ factory := &median.NumericalMedianFactory{
+ ContractTransmitter: provider.MedianContract(),
+ DataSource: ds,
+ JuelsPerFeeCoinDataSource: jds,
+ Logger: logger.NewOCRWrapper(
+ p.Logger,
+ true,
+ func(msg string) {},
+ ),
+ OnchainConfigCodec: provider.OnchainConfigCodec(),
+ ReportCodec: provider.ReportCodec(),
+ }
+ return factory, nil
+}
+
+type reportingPluginFactoryService struct {
+ services.StateMachine
+ lggr logger.Logger
+ ocrtypes.ReportingPluginFactory
+}
+
+func (r *reportingPluginFactoryService) Name() string { return r.lggr.Name() }
+
+func (r *reportingPluginFactoryService) Start(ctx context.Context) error {
+ return r.StartOnce("ReportingPluginFactory", func() error { return nil })
+}
+
+func (r *reportingPluginFactoryService) Close() error {
+ return r.StopOnce("ReportingPluginFactory", func() error { return nil })
+}
+
+func (r *reportingPluginFactoryService) HealthReport() map[string]error {
+ return map[string]error{r.Name(): r.Healthy()}
+}
diff --git a/plugins/medianpoc/plugin_test.go b/plugins/medianpoc/plugin_test.go
new file mode 100644
index 00000000000..74a0695c6c9
--- /dev/null
+++ b/plugins/medianpoc/plugin_test.go
@@ -0,0 +1,105 @@
+package medianpoc
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+
+ "github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
+
+ "github.com/smartcontractkit/chainlink-relay/pkg/types"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+type mockErrorLog struct {
+ types.ErrorLog
+}
+
+type mockOffchainConfigDigester struct {
+ ocrtypes.OffchainConfigDigester
+}
+
+type mockContractTransmitter struct {
+ ocrtypes.ContractTransmitter
+}
+
+type mockContractConfigTracker struct {
+ ocrtypes.ContractConfigTracker
+}
+
+type mockReportCodec struct {
+ median.ReportCodec
+}
+
+type mockMedianContract struct {
+ median.MedianContract
+}
+
+type mockOnchainConfigCodec struct {
+ median.OnchainConfigCodec
+}
+
+type provider struct {
+ types.Service
+}
+
+func (p provider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
+ return mockOffchainConfigDigester{}
+}
+
+func (p provider) ContractTransmitter() ocrtypes.ContractTransmitter {
+ return mockContractTransmitter{}
+}
+
+func (p provider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
+ return mockContractConfigTracker{}
+}
+
+func (p provider) ReportCodec() median.ReportCodec {
+ return mockReportCodec{}
+}
+
+func (p provider) MedianContract() median.MedianContract {
+ return mockMedianContract{}
+}
+
+func (p provider) OnchainConfigCodec() median.OnchainConfigCodec {
+ return mockOnchainConfigCodec{}
+}
+
+func TestNewPlugin(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ p := NewPlugin(lggr)
+
+ defaultSpec := "default-spec"
+ juelsPerFeeCoinSpec := "jpfc-spec"
+ config := types.ReportingPluginServiceConfig{
+ PluginConfig: fmt.Sprintf(
+ `{"pipelines": {"__DEFAULT_PIPELINE__": "%s", "juelsPerFeeCoinPipeline": "%s"}}`,
+ defaultSpec,
+ juelsPerFeeCoinSpec,
+ ),
+ }
+ pr := &mockPipelineRunner{}
+ prov := provider{}
+
+ f, err := p.newFactory(
+ context.Background(),
+ config,
+ prov,
+ pr,
+ nil,
+ mockErrorLog{},
+ )
+ require.NoError(t, err)
+
+ ds := f.DataSource.(*DataSource)
+ assert.Equal(t, defaultSpec, ds.spec)
+ jpfcDs := f.JuelsPerFeeCoinDataSource.(*DataSource)
+ assert.Equal(t, juelsPerFeeCoinSpec, jpfcDs.spec)
+}
diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar
index 189476bfa84..8a3b1af96fa 100644
--- a/testdata/scripts/node/validate/default.txtar
+++ b/testdata/scripts/node/validate/default.txtar
@@ -73,6 +73,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -85,6 +86,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
index 593aa0b21d0..31fded1b423 100644
--- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
@@ -117,6 +117,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -129,6 +130,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
index 7b8aa5e3836..78fc976912c 100644
--- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
@@ -117,6 +117,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -129,6 +130,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar
index ef6548619e1..226a7bbb3b4 100644
--- a/testdata/scripts/node/validate/disk-based-logging.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging.txtar
@@ -117,6 +117,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -129,6 +130,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar
index 87b877bc882..5cd3d567467 100644
--- a/testdata/scripts/node/validate/invalid.txtar
+++ b/testdata/scripts/node/validate/invalid.txtar
@@ -107,6 +107,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -119,6 +120,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar
index c607da10644..fd24150b587 100644
--- a/testdata/scripts/node/validate/valid.txtar
+++ b/testdata/scripts/node/validate/valid.txtar
@@ -114,6 +114,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -126,6 +127,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar
index ee7926f8f5f..828d953da9a 100644
--- a/testdata/scripts/node/validate/warnings.txtar
+++ b/testdata/scripts/node/validate/warnings.txtar
@@ -110,6 +110,7 @@ MaxAgeDays = 0
MaxBackups = 1
[WebServer]
+AuthenticationMethod = 'local'
AllowOrigins = 'http://localhost:3000,http://localhost:6688'
BridgeResponseURL = ''
BridgeCacheTTL = '0s'
@@ -122,6 +123,25 @@ HTTPMaxSize = '32.77kb'
StartTimeout = '15s'
ListenIP = '0.0.0.0'
+[WebServer.LDAP]
+ServerTLS = true
+SessionTimeout = '15m0s'
+QueryTimeout = '2m0s'
+BaseUserAttr = 'uid'
+BaseDN = ''
+UsersDN = 'ou=users'
+GroupsDN = 'ou=groups'
+ActiveAttribute = ''
+ActiveAttributeAllowedValue = ''
+AdminUserGroupCN = 'NodeAdmins'
+EditUserGroupCN = 'NodeEditors'
+RunUserGroupCN = 'NodeRunners'
+ReadUserGroupCN = 'NodeReadOnly'
+UserApiTokenEnabled = false
+UserAPITokenDuration = '240h0m0s'
+UpstreamSyncInterval = '0s'
+UpstreamSyncRateLimit = '2m0s'
+
[WebServer.MFA]
RPID = ''
RPOrigin = ''
diff --git a/tools/flakeytests/cmd/runner/main.go b/tools/flakeytests/cmd/runner/main.go
index 601832a8375..f38179f502b 100644
--- a/tools/flakeytests/cmd/runner/main.go
+++ b/tools/flakeytests/cmd/runner/main.go
@@ -20,6 +20,7 @@ func main() {
ghEventPath := flag.String("gh_event_path", "", "path to associated gh event")
ghEventName := flag.String("gh_event_name", "", "type of associated gh event")
ghRepo := flag.String("gh_repo", "", "name of gh repository")
+ ghRunID := flag.String("gh_run_id", "", "run id of the gh workflow")
flag.Parse()
if *grafanaHost == "" {
@@ -47,7 +48,7 @@ func main() {
readers = append(readers, r)
}
- ctx := flakeytests.GetGithubMetadata(*ghRepo, *ghEventName, *ghSHA, *ghEventPath)
+ ctx := flakeytests.GetGithubMetadata(*ghRepo, *ghEventName, *ghSHA, *ghEventPath, *ghRunID)
rep := flakeytests.NewLokiReporter(*grafanaHost, *grafanaAuth, *command, ctx)
r := flakeytests.NewRunner(readers, rep, numReruns)
err := r.Run()
diff --git a/tools/flakeytests/coverage.txt b/tools/flakeytests/coverage.txt
deleted file mode 100644
index 91640016fe2..00000000000
--- a/tools/flakeytests/coverage.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-mode: atomic
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:50.103,54.38 4 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:54.38,55.24 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:55.24,62.18 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:62.18,64.5 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:65.4,65.46 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:72.2,73.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:73.16,75.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:77.2,90.16 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:93.63,95.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:95.16,97.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:99.2,101.16 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:101.16,103.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:104.2,110.16 4 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:110.16,112.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:112.8,112.52 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:112.52,114.18 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:114.18,116.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:117.3,117.83 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:119.2,119.12 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:122.81,124.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:124.16,126.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:128.2,128.31 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/reporter.go:131.77,133.2 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:40.79,44.30 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:44.31,44.32 0 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:46.2,52.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:61.75,70.2 8 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:81.45,85.2 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:87.75,89.28 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:89.28,91.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:91.16,93.19 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:93.19,94.13 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:99.4,99.42 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:99.42,100.13 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:103.4,104.18 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:104.18,106.5 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:110.4,110.39 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:110.39,111.13 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:114.4,114.20 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:115.16,116.32 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:116.32,118.6 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:119.5,119.31 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:120.18,121.38 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:121.38,122.33 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:122.33,124.7 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:125.6,125.32 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:130.3,130.33 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:130.33,132.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:134.2,134.19 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:141.106,144.38 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:144.38,146.27 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:146.27,148.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:150.3,151.36 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:151.36,155.18 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:155.18,161.55 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:161.55,162.14 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:164.5,164.32 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:167.4,168.18 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:168.18,170.5 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:172.4,172.25 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:172.25,174.22 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:174.22,175.37 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:175.37,177.7 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:178.6,178.42 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:184.2,184.29 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:187.30,189.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:189.16,191.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:193.2,194.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:194.16,196.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:198.2,198.30 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:198.30,200.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:200.8,202.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/runner.go:204.2,204.43 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:12.74,15.25 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:15.25,17.10 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:17.10,19.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:21.3,21.10 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:24.2,25.9 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:25.9,27.3 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:29.2,29.16 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:32.88,34.16 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:34.16,36.17 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:36.17,38.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:40.3,41.17 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:41.17,43.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:45.3,46.17 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:46.17,48.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:51.2,52.19 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:53.22,56.17 3 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:56.17,58.4 1 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:60.3,61.19 2 0
-github.com/smartcontractkit/chainlink/v2/tools/flakeytests/utils.go:62.10,63.19 1 0
diff --git a/tools/flakeytests/reporter.go b/tools/flakeytests/reporter.go
index db3890b5c7b..6696ec29a40 100644
--- a/tools/flakeytests/reporter.go
+++ b/tools/flakeytests/reporter.go
@@ -37,6 +37,7 @@ type Context struct {
PullRequestURL string `json:"pull_request_url,omitempty"`
Repository string `json:"repository"`
Type string `json:"event_type"`
+ RunURL string `json:"run_url,omitempty"`
}
type LokiReporter struct {
diff --git a/tools/flakeytests/utils.go b/tools/flakeytests/utils.go
index 18ab43980b3..d2326c47262 100644
--- a/tools/flakeytests/utils.go
+++ b/tools/flakeytests/utils.go
@@ -2,6 +2,7 @@ package flakeytests
import (
"encoding/json"
+ "fmt"
"io"
"log"
"os"
@@ -29,26 +30,20 @@ func DigString(mp map[string]interface{}, path []string) (string, error) {
return vs, nil
}
-func GetGithubMetadata(repo string, eventName string, sha string, path string) Context {
- event := map[string]interface{}{}
- if path != "" {
- r, err := os.Open(path)
- if err != nil {
- log.Fatalf("Error reading gh event at path: %s", path)
- }
-
- d, err := io.ReadAll(r)
- if err != nil {
- log.Fatal("Error reading gh event into string")
- }
+func getGithubMetadata(repo string, eventName string, sha string, e io.Reader, runID string) Context {
+ d, err := io.ReadAll(e)
+ if err != nil {
+ log.Fatal("Error reading gh event into string")
+ }
- err = json.Unmarshal(d, &event)
- if err != nil {
- log.Fatalf("Error unmarshaling gh event at path: %s", path)
- }
+ event := map[string]interface{}{}
+ err = json.Unmarshal(d, &event)
+ if err != nil {
+ log.Fatalf("Error unmarshaling gh event at path")
}
- basicCtx := &Context{Repository: repo, CommitSHA: sha, Type: eventName}
+ runURL := fmt.Sprintf("github.com/%s/actions/runs/%s", repo, runID)
+ basicCtx := &Context{Repository: repo, CommitSHA: sha, Type: eventName, RunURL: runURL}
switch eventName {
case "pull_request":
prURL := ""
@@ -58,8 +53,27 @@ func GetGithubMetadata(repo string, eventName string, sha string, path string) C
}
basicCtx.PullRequestURL = prURL
+
+ // For pull request events, the $GITHUB_SHA variable doesn't actually
+ // contain the sha for the latest commit, as documented here:
+ // https://stackoverflow.com/a/68068674
+ var newSha string
+ s, err := DigString(event, []string{"pull_request", "head", "sha"})
+ if err == nil {
+ newSha = s
+ }
+
+ basicCtx.CommitSHA = newSha
return *basicCtx
default:
return *basicCtx
}
}
+
+func GetGithubMetadata(repo string, eventName string, sha string, path string, runID string) Context {
+ event, err := os.Open(path)
+ if err != nil {
+ log.Fatalf("Error reading gh event at path: %s", path)
+ }
+ return getGithubMetadata(repo, eventName, sha, event, runID)
+}
diff --git a/tools/flakeytests/utils_test.go b/tools/flakeytests/utils_test.go
index d3ef8eb602d..6ea912d11d4 100644
--- a/tools/flakeytests/utils_test.go
+++ b/tools/flakeytests/utils_test.go
@@ -1,6 +1,8 @@
package flakeytests
import (
+ "fmt"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -17,3 +19,31 @@ func TestDigString(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "some-url", out)
}
+
+var prEventTemplate = `
+{
+ "pull_request": {
+ "head": {
+ "sha": "%s"
+ },
+ "_links": {
+ "html": {
+ "href": "%s"
+ }
+ }
+ }
+}
+`
+
+func TestGetGithubMetadata(t *testing.T) {
+ repo, eventName, sha, event, runID := "chainlink", "merge_group", "a-sha", `{}`, "1234"
+ expectedRunURL := fmt.Sprintf("github.com/%s/actions/runs/%s", repo, runID)
+ ctx := getGithubMetadata(repo, eventName, sha, strings.NewReader(event), runID)
+ assert.Equal(t, Context{Repository: repo, CommitSHA: sha, Type: eventName, RunURL: expectedRunURL}, ctx)
+
+ anotherSha, eventName, url := "another-sha", "pull_request", "a-url"
+ event = fmt.Sprintf(prEventTemplate, anotherSha, url)
+ sha = "302eb05d592132309b264e316f443f1ceb81b6c3"
+ ctx = getGithubMetadata(repo, eventName, sha, strings.NewReader(event), runID)
+ assert.Equal(t, Context{Repository: repo, CommitSHA: anotherSha, Type: eventName, PullRequestURL: url, RunURL: expectedRunURL}, ctx)
+}